diff --git a/castai/provider.go b/castai/provider.go index a843a646..11f9f151 100644 --- a/castai/provider.go +++ b/castai/provider.go @@ -43,48 +43,50 @@ func Provider(version string) *schema.Provider { }, ResourcesMap: map[string]*schema.Resource{ - "castai_eks_cluster": resourceEKSCluster(), - "castai_eks_clusterid": resourceEKSClusterID(), - "castai_gke_cluster": resourceGKECluster(), - "castai_gke_cluster_id": resourceGKEClusterId(), - "castai_aks_cluster": resourceAKSCluster(), - "castai_autoscaler": resourceAutoscaler(), - "castai_evictor_advanced_config": resourceEvictionConfig(), - "castai_node_template": resourceNodeTemplate(), - "castai_rebalancing_schedule": resourceRebalancingSchedule(), - "castai_rebalancing_job": resourceRebalancingJob(), - "castai_node_configuration": resourceNodeConfiguration(), - "castai_node_configuration_default": resourceNodeConfigurationDefault(), - "castai_eks_user_arn": resourceEKSClusterUserARN(), - "castai_reservations": resourceReservations(), - "castai_commitments": resourceCommitments(), - "castai_organization_members": resourceOrganizationMembers(), - "castai_sso_connection": resourceSSOConnection(), - "castai_service_account": resourceServiceAccount(), - "castai_service_account_key": resourceServiceAccountKey(), - "castai_workload_scaling_policy": resourceWorkloadScalingPolicy(), - "castai_workload_scaling_policy_order": resourceWorkloadScalingPolicyOrder(), - "castai_organization_group": resourceOrganizationGroup(), - "castai_role_bindings": resourceRoleBindings(), - "castai_hibernation_schedule": resourceHibernationSchedule(), - "castai_security_runtime_rule": resourceSecurityRuntimeRule(), - "castai_allocation_group": resourceAllocationGroup(), - "castai_enterprise_group": resourceEnterpriseGroup(), - "castai_enterprise_role_binding": resourceEnterpriseRoleBinding(), - "castai_cache_group": resourceCacheGroup(), - "castai_cache_configuration": resourceCacheConfiguration(), - "castai_cache_rule": resourceCacheRule(), + "castai_eks_cluster": resourceEKSCluster(), + "castai_eks_clusterid": resourceEKSClusterID(), + "castai_gke_cluster": resourceGKECluster(), + "castai_gke_cluster_id": resourceGKEClusterId(), + "castai_aks_cluster": resourceAKSCluster(), + "castai_autoscaler": resourceAutoscaler(), + "castai_evictor_advanced_config": resourceEvictionConfig(), + "castai_node_template": resourceNodeTemplate(), + "castai_rebalancing_schedule": resourceRebalancingSchedule(), + "castai_rebalancing_job": resourceRebalancingJob(), + "castai_node_configuration": resourceNodeConfiguration(), + "castai_node_configuration_default": resourceNodeConfigurationDefault(), + "castai_eks_user_arn": resourceEKSClusterUserARN(), + "castai_reservations": resourceReservations(), + "castai_commitments": resourceCommitments(), + "castai_organization_members": resourceOrganizationMembers(), + "castai_sso_connection": resourceSSOConnection(), + "castai_service_account": resourceServiceAccount(), + "castai_service_account_key": resourceServiceAccountKey(), + "castai_organization_group": resourceOrganizationGroup(), + "castai_role_bindings": resourceRoleBindings(), + "castai_hibernation_schedule": resourceHibernationSchedule(), + "castai_security_runtime_rule": resourceSecurityRuntimeRule(), + "castai_allocation_group": resourceAllocationGroup(), + "castai_enterprise_group": resourceEnterpriseGroup(), + "castai_enterprise_role_binding": resourceEnterpriseRoleBinding(), + "castai_cache_group": resourceCacheGroup(), + "castai_cache_configuration": resourceCacheConfiguration(), + "castai_cache_rule": resourceCacheRule(), + + "castai_workload_scaling_policy": resourceWorkloadScalingPolicy(), + "castai_workload_scaling_policy_order": resourceWorkloadScalingPolicyOrder(), + "castai_workload_custom_metrics_data_source": resourceWorkloadCustomMetricsDataSource(), }, DataSourcesMap: map[string]*schema.Resource{ - "castai_eks_settings": dataSourceEKSSettings(), - "castai_gke_user_policies": dataSourceGKEPolicies(), - "castai_organization": dataSourceOrganization(), - "castai_rebalancing_schedule": dataSourceRebalancingSchedule(), - "castai_hibernation_schedule": dataSourceHibernationSchedule(), - "castai_workload_scaling_policy_order": dataSourceWorkloadScalingPolicyOrder(), - "castai_cache_group": dataSourceCacheGroup(), - "castai_impersonation_service_account": dataSourceImpersonationServiceAccount(), + "castai_eks_settings": dataSourceEKSSettings(), + "castai_gke_user_policies": dataSourceGKEPolicies(), + "castai_organization": dataSourceOrganization(), + "castai_rebalancing_schedule": dataSourceRebalancingSchedule(), + "castai_hibernation_schedule": dataSourceHibernationSchedule(), + "castai_workload_scaling_policy_order": dataSourceWorkloadScalingPolicyOrder(), + "castai_cache_group": dataSourceCacheGroup(), + "castai_impersonation_service_account": dataSourceImpersonationServiceAccount(), }, ConfigureContextFunc: providerConfigure(version), diff --git a/castai/resource_omni_cluster.go b/castai/resource_omni_cluster.go index 2ba525e0..d6ffcbf3 100644 --- a/castai/resource_omni_cluster.go +++ b/castai/resource_omni_cluster.go @@ -10,6 +10,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/castai/terraform-provider-castai/castai/sdk/omni" ) var ( @@ -93,7 +95,7 @@ func (r *omniClusterResource) Create(ctx context.Context, req resource.CreateReq organizationID := plan.OrganizationID.ValueString() clusterID := plan.ClusterID.ValueString() - apiResp, err := client.ClustersAPIRegisterClusterWithResponse(ctx, organizationID, clusterID) + apiResp, err := client.ClustersAPIRegisterClusterWithResponse(ctx, organizationID, clusterID, omni.ClustersAPIRegisterClusterJSONRequestBody{}) if err != nil { resp.Diagnostics.AddError("Failed to register omni cluster", err.Error()) return diff --git a/castai/resource_workload_custom_metrics_data_source.go b/castai/resource_workload_custom_metrics_data_source.go new file mode 100644 index 00000000..23ac9f24 --- /dev/null +++ b/castai/resource_workload_custom_metrics_data_source.go @@ -0,0 +1,431 @@ +package castai + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/castai/terraform-provider-castai/castai/sdk" +) + +func resourceWorkloadCustomMetricsDataSource() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceWorkloadCustomMetricsDataSourceCreate, + ReadContext: resourceWorkloadCustomMetricsDataSourceRead, + UpdateContext: resourceWorkloadCustomMetricsDataSourceUpdate, + DeleteContext: resourceWorkloadCustomMetricsDataSourceDelete, + Importer: &schema.ResourceImporter{ + StateContext: workloadCustomMetricsDataSourceImporter, + }, + Description: "Manages a CAST AI workload custom metrics data source. " + + "Custom metrics data sources allow CAST AI to collect and use non-standard metrics for workload optimization.", + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(2 * time.Minute), + Read: schema.DefaultTimeout(1 * time.Minute), + Update: schema.DefaultTimeout(2 * time.Minute), + Delete: schema.DefaultTimeout(1 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + FieldClusterID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "CAST AI cluster ID.", + ValidateDiagFunc: validation.ToDiagFunc(validation.IsUUID), + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the custom metrics data source (1-63 characters).", + ValidateDiagFunc: validation.ToDiagFunc(validation.StringLenBetween(1, 63)), + }, + "prometheus": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: "Prometheus data source configuration.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + Description: "URL of the Prometheus server.", + ValidateDiagFunc: validation.ToDiagFunc(validation.IsURLWithHTTPorHTTPS), + }, + "timeout": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Timeout for Prometheus queries (e.g. \"30s\").", + }, + "presets": { + Type: schema.TypeList, + Optional: true, + Description: "List of metric presets managed by CAST AI. Presets provide curated metric definitions " + + "that are kept up to date automatically. This is the recommended approach for most users. " + + "Currently available: \"jvm\".", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metric": { + Type: schema.TypeSet, + Optional: true, + Description: "Manually defined metrics. Use this for advanced use cases where presets " + + "don't cover your needs. Each entry defines a single metric name and PromQL query. " + + "To specify multiple queries for the same metric, use multiple entries with the same name.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the metric.", + ValidateDiagFunc: validation.ToDiagFunc(validation.StringIsNotWhiteSpace), + }, + "query": { + Type: schema.TypeString, + Required: true, + Description: "PromQL query for this metric.", + ValidateDiagFunc: validation.ToDiagFunc(validation.StringIsNotWhiteSpace), + }, + }, + }, + }, + }, + }, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Synchronization status of the data source (CONNECTING, CONNECTED, SYNCING, FAILED).", + }, + "kube_resource_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the corresponding Kubernetes resource.", + }, + "managed_by_cast": { + Type: schema.TypeBool, + Computed: true, + Description: "Whether the data source is managed by CAST AI.", + }, + }, + } +} + +func resourceWorkloadCustomMetricsDataSourceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*ProviderConfig).api + clusterID := d.Get(FieldClusterID).(string) + + req, err := expandWorkloadCustomMetricsDataSourceCreate(d) + if err != nil { + return diag.FromErr(err) + } + + tflog.Info(ctx, "creating workload custom metrics data source", map[string]interface{}{ + "cluster_id": clusterID, + "name": req.Name, + }) + + resp, err := client.WorkloadOptimizationAPICreateCustomMetricsDataSourceWithResponse(ctx, clusterID, req) + if err := sdk.CheckOKResponse(resp, err); err != nil { + return diag.Errorf("creating workload custom metrics data source: %v", err) + } + + d.SetId(resp.JSON200.Id) + + tflog.Info(ctx, "created workload custom metrics data source", map[string]interface{}{ + "cluster_id": clusterID, + "resource_id": resp.JSON200.Id, + }) + + return resourceWorkloadCustomMetricsDataSourceRead(ctx, d, meta) +} + +func resourceWorkloadCustomMetricsDataSourceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*ProviderConfig).api + clusterID := d.Get(FieldClusterID).(string) + + if d.Id() == "" { + return diag.Errorf("workload custom metrics data source ID is not set") + } + + tflog.Info(ctx, "reading workload custom metrics data source", map[string]interface{}{ + "cluster_id": clusterID, + "resource_id": d.Id(), + }) + + resp, err := client.WorkloadOptimizationAPIListCustomMetricsDataSourcesWithResponse(ctx, clusterID) + if err != nil { + return diag.Errorf("listing workload custom metrics data sources: %v", err) + } + + if !d.IsNewResource() && resp.StatusCode() == http.StatusNotFound { + tflog.Warn(ctx, "cluster not found, removing custom metrics data source from state", map[string]interface{}{ + "cluster_id": clusterID, + "resource_id": d.Id(), + }) + d.SetId("") + return nil + } + + if err := sdk.CheckOKResponse(resp, err); err != nil { + return diag.Errorf("listing workload custom metrics data sources: %v", err) + } + + var found *sdk.WorkloadoptimizationV1CustomMetricsDataSource + for i, item := range resp.JSON200.Items { + if item.Id == d.Id() { + found = &resp.JSON200.Items[i] + break + } + } + + if found == nil { + if !d.IsNewResource() { + tflog.Warn(ctx, "workload custom metrics data source not found, removing from state", map[string]interface{}{ + "cluster_id": clusterID, + "resource_id": d.Id(), + }) + d.SetId("") + return nil + } + return diag.Errorf("workload custom metrics data source %s not found in cluster %s", d.Id(), clusterID) + } + + return flattenWorkloadCustomMetricsDataSource(d, found) +} + +func resourceWorkloadCustomMetricsDataSourceUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*ProviderConfig).api + clusterID := d.Get(FieldClusterID).(string) + + req, err := expandWorkloadCustomMetricsDataSourceUpdate(d) + if err != nil { + return diag.FromErr(err) + } + + tflog.Info(ctx, "updating workload custom metrics data source", map[string]interface{}{ + "cluster_id": clusterID, + "resource_id": d.Id(), + }) + + resp, err := client.WorkloadOptimizationAPIUpdateCustomMetricsDataSourceWithResponse(ctx, clusterID, d.Id(), req) + if err := sdk.CheckOKResponse(resp, err); err != nil { + return diag.Errorf("updating workload custom metrics data source: %v", err) + } + + tflog.Info(ctx, "updated workload custom metrics data source", map[string]interface{}{ + "cluster_id": clusterID, + "resource_id": d.Id(), + }) + + return resourceWorkloadCustomMetricsDataSourceRead(ctx, d, meta) +} + +func resourceWorkloadCustomMetricsDataSourceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*ProviderConfig).api + clusterID := d.Get(FieldClusterID).(string) + + tflog.Info(ctx, "deleting workload custom metrics data source", map[string]interface{}{ + "cluster_id": clusterID, + "resource_id": d.Id(), + }) + + resp, err := client.WorkloadOptimizationAPIDeleteCustomMetricsDataSourceWithResponse(ctx, clusterID, d.Id()) + if err != nil { + return diag.Errorf("deleting workload custom metrics data source: %v", err) + } + + if resp.StatusCode() == http.StatusNotFound { + tflog.Debug(ctx, "workload custom metrics data source already deleted", map[string]interface{}{ + "cluster_id": clusterID, + "resource_id": d.Id(), + }) + return nil + } + + if err := sdk.CheckOKResponse(resp, err); err != nil { + return diag.Errorf("deleting workload custom metrics data source: %v", err) + } + + tflog.Info(ctx, "deleted workload custom metrics data source", map[string]interface{}{ + "cluster_id": clusterID, + "resource_id": d.Id(), + }) + + return nil +} + +func workloadCustomMetricsDataSourceImporter(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + clusterID, id, found := strings.Cut(d.Id(), "/") + if !found { + return nil, fmt.Errorf("expected import id with format: /, got: %q", d.Id()) + } + + if err := d.Set(FieldClusterID, clusterID); err != nil { + return nil, fmt.Errorf("setting cluster_id: %w", err) + } + + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandWorkloadCustomMetricsDataSourceCreate(d *schema.ResourceData) (sdk.WorkloadOptimizationAPICreateCustomMetricsDataSourceJSONRequestBody, error) { + name := d.Get("name").(string) + prometheusInput, err := expandPrometheusInputConfig(d) + if err != nil { + return sdk.WorkloadOptimizationAPICreateCustomMetricsDataSourceJSONRequestBody{}, err + } + + return sdk.WorkloadOptimizationAPICreateCustomMetricsDataSourceJSONRequestBody{ + Name: name, + Type: sdk.PROMETHEUS, + Data: sdk.WorkloadoptimizationV1CustomMetricsDataSourceInput{ + Prometheus: prometheusInput, + }, + }, nil +} + +func expandWorkloadCustomMetricsDataSourceUpdate(d *schema.ResourceData) (sdk.WorkloadOptimizationAPIUpdateCustomMetricsDataSourceJSONRequestBody, error) { + name := d.Get("name").(string) + prometheusInput, err := expandPrometheusInputConfig(d) + if err != nil { + return sdk.WorkloadOptimizationAPIUpdateCustomMetricsDataSourceJSONRequestBody{}, err + } + + return sdk.WorkloadOptimizationAPIUpdateCustomMetricsDataSourceJSONRequestBody{ + DataSource: &sdk.WorkloadoptimizationV1UpdateCustomMetricsDataSource{ + Name: &name, + Data: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceInput{ + Prometheus: prometheusInput, + }, + }, + UpdateMask: "name,data", + }, nil +} + +func expandPrometheusInputConfig(d *schema.ResourceData) (*sdk.WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheus, error) { + promList := d.Get("prometheus").([]interface{}) + if len(promList) == 0 { + return nil, fmt.Errorf("prometheus configuration is required") + } + + promMap := promList[0].(map[string]interface{}) + url := promMap["url"].(string) + + dataSource := sdk.WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusDataSource{ + Url: url, + } + + if v, ok := promMap["timeout"].(string); ok && v != "" { + dataSource.Timeout = &v + } + + result := &sdk.WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheus{ + DataSource: dataSource, + } + + var metrics *sdk.WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusMetrics + + // Expand presets. + if v, ok := promMap["presets"].([]interface{}); ok && len(v) > 0 { + presets := make([]string, len(v)) + for i, p := range v { + presets[i] = p.(string) + } + metrics = &sdk.WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusMetrics{} + metrics.Presets = &presets + } + + if v, ok := promMap["metric"].(*schema.Set); ok && v.Len() > 0 { + items := v.List() + customMetrics := make([]sdk.WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusMetric, 0, len(items)) + for _, m := range items { + metricMap := m.(map[string]interface{}) + customMetrics = append(customMetrics, sdk.WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusMetric{ + Name: metricMap["name"].(string), + Query: metricMap["query"].(string), + }) + } + if metrics == nil { + metrics = &sdk.WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusMetrics{} + } + metrics.Manual = &customMetrics + } + + result.Metrics = metrics + + return result, nil +} + +func flattenWorkloadCustomMetricsDataSource(d *schema.ResourceData, ds *sdk.WorkloadoptimizationV1CustomMetricsDataSource) diag.Diagnostics { + if err := d.Set("name", ds.Name); err != nil { + return diag.Errorf("setting name: %v", err) + } + if err := d.Set("status", string(ds.Status)); err != nil { + return diag.Errorf("setting status: %v", err) + } + if err := d.Set("kube_resource_name", ds.KubeResourceName); err != nil { + return diag.Errorf("setting kube_resource_name: %v", err) + } + if err := d.Set("managed_by_cast", ds.ManagedByCast); err != nil { + return diag.Errorf("setting managed_by_cast: %v", err) + } + + if ds.Data.Prometheus != nil { + prom := flattenPrometheusConfig(ds.Data.Prometheus) + if err := d.Set("prometheus", prom); err != nil { + return diag.Errorf("setting prometheus: %v", err) + } + } + + return nil +} + +func flattenPrometheusConfig(prom *sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus) []interface{} { + promMap := map[string]interface{}{ + "url": prom.DataSource.Url, + "timeout": "", + } + + if prom.DataSource.Timeout != nil { + promMap["timeout"] = *prom.DataSource.Timeout + } + + if prom.Metrics != nil && prom.Metrics.Presets != nil { + promMap["presets"] = *prom.Metrics.Presets + } else { + promMap["presets"] = []string{} + } + + if prom.Metrics != nil && prom.Metrics.Resolved != nil { + var metrics []interface{} + for _, rm := range *prom.Metrics.Resolved { + for _, q := range rm.Queries { + if q.Origin != sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginMANUAL { + continue + } + metrics = append(metrics, map[string]interface{}{ + "name": rm.Name, + "query": q.Value, + }) + } + } + promMap["metric"] = metrics + } else { + promMap["metric"] = nil + } + + return []interface{}{promMap} +} diff --git a/castai/resource_workload_custom_metrics_data_source_test.go b/castai/resource_workload_custom_metrics_data_source_test.go new file mode 100644 index 00000000..72e49ec7 --- /dev/null +++ b/castai/resource_workload_custom_metrics_data_source_test.go @@ -0,0 +1,774 @@ +package castai + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "testing" + + "github.com/golang/mock/gomock" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/stretchr/testify/require" + + "github.com/castai/terraform-provider-castai/castai/sdk" + mock_sdk "github.com/castai/terraform-provider-castai/castai/sdk/mock" +) + +func TestWorkloadCustomMetricsDataSource_CreateWithPresets(t *testing.T) { + r := require.New(t) + mockctrl := gomock.NewController(t) + mockClient := mock_sdk.NewMockClientInterface(mockctrl) + + ctx := context.Background() + provider := &ProviderConfig{ + api: &sdk.ClientWithResponses{ + ClientInterface: mockClient, + }, + } + + res := resourceWorkloadCustomMetricsDataSource() + + clusterID := "b6bfc074-a267-400f-b8f1-db0850c36gk1" + dsID := "ds-123" + + // GIVEN + val := cty.ObjectVal(map[string]cty.Value{ + FieldClusterID: cty.StringVal(clusterID), + "name": cty.StringVal("my-prometheus"), + "prometheus": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "url": cty.StringVal("http://prometheus:9090"), + "timeout": cty.StringVal("30s"), + "presets": cty.ListVal([]cty.Value{cty.StringVal("jvm")}), + "metric": cty.SetValEmpty(cty.Object(map[string]cty.Type{"name": cty.String, "query": cty.String})), + }), + }), + "status": cty.StringVal(""), + "kube_resource_name": cty.StringVal(""), + "managed_by_cast": cty.False, + }) + state := terraform.NewInstanceStateShimmedFromValue(val, 0) + data := res.Data(state) + + createResponse := sdk.WorkloadoptimizationV1CustomMetricsDataSource{ + Id: dsID, + ClusterId: clusterID, + Name: "my-prometheus", + Type: sdk.PROMETHEUS, + Status: sdk.WorkloadoptimizationV1CustomMetricsDataSourceStatusCONNECTING, + KubeResourceName: "my-prometheus", + ManagedByCast: true, + Data: sdk.WorkloadoptimizationV1CustomMetricsDataSourceData{ + Prometheus: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus{ + DataSource: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource{ + Url: "http://prometheus:9090", + Timeout: toPtr("30s"), + }, + Metrics: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetrics{ + Presets: &[]string{"jvm"}, + }, + }, + }, + } + + createBody, _ := json.Marshal(createResponse) + mockClient.EXPECT(). + WorkloadOptimizationAPICreateCustomMetricsDataSource(ctx, clusterID, gomock.Any()). + Return(&http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(createBody)), + }, nil) + + listResponse := sdk.WorkloadoptimizationV1ListCustomMetricsDataSourcesResponse{ + Items: []sdk.WorkloadoptimizationV1CustomMetricsDataSource{createResponse}, + } + listBody, _ := json.Marshal(listResponse) + mockClient.EXPECT(). + WorkloadOptimizationAPIListCustomMetricsDataSources(ctx, clusterID). + Return(&http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(listBody)), + }, nil) + + // WHEN + diags := resourceWorkloadCustomMetricsDataSourceCreate(ctx, data, provider) + + // THEN + r.Empty(diags) + r.Equal(dsID, data.Id()) + r.Equal("my-prometheus", data.Get("name")) + r.Equal("CONNECTING", data.Get("status")) + r.Equal("my-prometheus", data.Get("kube_resource_name")) + r.Equal(true, data.Get("managed_by_cast")) +} + +func TestWorkloadCustomMetricsDataSource_CreateWithManualMetrics(t *testing.T) { + r := require.New(t) + mockctrl := gomock.NewController(t) + mockClient := mock_sdk.NewMockClientInterface(mockctrl) + + ctx := context.Background() + provider := &ProviderConfig{ + api: &sdk.ClientWithResponses{ + ClientInterface: mockClient, + }, + } + + res := resourceWorkloadCustomMetricsDataSource() + + clusterID := "b6bfc074-a267-400f-b8f1-db0850c36gk1" + dsID := "ds-456" + + // GIVEN + val := cty.ObjectVal(map[string]cty.Value{ + FieldClusterID: cty.StringVal(clusterID), + "name": cty.StringVal("custom-prom"), + "prometheus": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "url": cty.StringVal("http://prometheus:9090"), + "timeout": cty.StringVal(""), + "presets": cty.ListValEmpty(cty.String), + "metric": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("http_requests_total"), + "query": cty.StringVal("sum(rate(http_requests_total[5m])) by (pod)"), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("queue_depth"), + "query": cty.StringVal("avg(queue_depth) by (pod)"), + }), + }), + }), + }), + "status": cty.StringVal(""), + "kube_resource_name": cty.StringVal(""), + "managed_by_cast": cty.False, + }) + state := terraform.NewInstanceStateShimmedFromValue(val, 0) + data := res.Data(state) + + createResponse := sdk.WorkloadoptimizationV1CustomMetricsDataSource{ + Id: dsID, + ClusterId: clusterID, + Name: "custom-prom", + Type: sdk.PROMETHEUS, + Status: sdk.WorkloadoptimizationV1CustomMetricsDataSourceStatusCONNECTING, + KubeResourceName: "custom-prom", + ManagedByCast: true, + Data: sdk.WorkloadoptimizationV1CustomMetricsDataSourceData{ + Prometheus: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus{ + DataSource: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource{ + Url: "http://prometheus:9090", + }, + Metrics: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetrics{ + Resolved: &[]sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetric{ + {Name: "http_requests_total", Queries: []sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery{ + {Value: "sum(rate(http_requests_total[5m])) by (pod)", Origin: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginMANUAL}, + }}, + {Name: "queue_depth", Queries: []sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery{ + {Value: "avg(queue_depth) by (pod)", Origin: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginMANUAL}, + }}, + }, + }, + }, + }, + } + + createBody, _ := json.Marshal(createResponse) + mockClient.EXPECT(). + WorkloadOptimizationAPICreateCustomMetricsDataSource(ctx, clusterID, gomock.Any()). + Return(&http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(createBody)), + }, nil) + + listResponse := sdk.WorkloadoptimizationV1ListCustomMetricsDataSourcesResponse{ + Items: []sdk.WorkloadoptimizationV1CustomMetricsDataSource{createResponse}, + } + listBody, _ := json.Marshal(listResponse) + mockClient.EXPECT(). + WorkloadOptimizationAPIListCustomMetricsDataSources(ctx, clusterID). + Return(&http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(listBody)), + }, nil) + + // WHEN + diags := resourceWorkloadCustomMetricsDataSourceCreate(ctx, data, provider) + + // THEN + r.Empty(diags) + r.Equal(dsID, data.Id()) + r.Equal("custom-prom", data.Get("name")) + + promList := data.Get("prometheus").([]interface{}) + r.Len(promList, 1) + promMap := promList[0].(map[string]interface{}) + r.Equal("http://prometheus:9090", promMap["url"]) + + metricSet := promMap["metric"].(*schema.Set) + r.Equal(2, metricSet.Len()) + + metricList := metricSet.List() + names := map[string]bool{} + for _, m := range metricList { + names[m.(map[string]interface{})["name"].(string)] = true + } + r.True(names["http_requests_total"]) + r.True(names["queue_depth"]) +} + +func TestWorkloadCustomMetricsDataSource_ReadNotFound(t *testing.T) { + r := require.New(t) + mockctrl := gomock.NewController(t) + mockClient := mock_sdk.NewMockClientInterface(mockctrl) + + ctx := context.Background() + provider := &ProviderConfig{ + api: &sdk.ClientWithResponses{ + ClientInterface: mockClient, + }, + } + + res := resourceWorkloadCustomMetricsDataSource() + + clusterID := "b6bfc074-a267-400f-b8f1-db0850c36gk1" + dsID := "ds-not-exist" + + // GIVEN + val := cty.ObjectVal(map[string]cty.Value{ + FieldClusterID: cty.StringVal(clusterID), + "name": cty.StringVal("test"), + "prometheus": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "url": cty.StringVal("http://prometheus:9090"), + "timeout": cty.StringVal(""), + "presets": cty.ListValEmpty(cty.String), + "metric": cty.SetValEmpty(cty.Object(map[string]cty.Type{"name": cty.String, "query": cty.String})), + }), + }), + "status": cty.StringVal(""), + "kube_resource_name": cty.StringVal(""), + "managed_by_cast": cty.False, + }) + state := terraform.NewInstanceStateShimmedFromValue(val, 0) + state.ID = dsID + data := res.Data(state) + + listResponse := sdk.WorkloadoptimizationV1ListCustomMetricsDataSourcesResponse{ + Items: []sdk.WorkloadoptimizationV1CustomMetricsDataSource{}, + } + listBody, _ := json.Marshal(listResponse) + mockClient.EXPECT(). + WorkloadOptimizationAPIListCustomMetricsDataSources(ctx, clusterID). + Return(&http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(listBody)), + }, nil) + + // WHEN + diags := resourceWorkloadCustomMetricsDataSourceRead(ctx, data, provider) + + // THEN + r.Empty(diags) + r.Empty(data.Id()) +} + +func TestWorkloadCustomMetricsDataSource_Update(t *testing.T) { + r := require.New(t) + mockctrl := gomock.NewController(t) + mockClient := mock_sdk.NewMockClientInterface(mockctrl) + + ctx := context.Background() + provider := &ProviderConfig{ + api: &sdk.ClientWithResponses{ + ClientInterface: mockClient, + }, + } + + res := resourceWorkloadCustomMetricsDataSource() + + clusterID := "b6bfc074-a267-400f-b8f1-db0850c36gk1" + dsID := "ds-123" + + // GIVEN + val := cty.ObjectVal(map[string]cty.Value{ + FieldClusterID: cty.StringVal(clusterID), + "name": cty.StringVal("updated-name"), + "prometheus": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "url": cty.StringVal("http://new-prometheus:9090"), + "timeout": cty.StringVal("60s"), + "presets": cty.ListVal([]cty.Value{cty.StringVal("jvm")}), + "metric": cty.SetValEmpty(cty.Object(map[string]cty.Type{"name": cty.String, "query": cty.String})), + }), + }), + "status": cty.StringVal(""), + "kube_resource_name": cty.StringVal(""), + "managed_by_cast": cty.False, + }) + state := terraform.NewInstanceStateShimmedFromValue(val, 0) + state.ID = dsID + data := res.Data(state) + + updatedDS := sdk.WorkloadoptimizationV1CustomMetricsDataSource{ + Id: dsID, + ClusterId: clusterID, + Name: "updated-name", + Type: sdk.PROMETHEUS, + Status: sdk.WorkloadoptimizationV1CustomMetricsDataSourceStatusCONNECTED, + KubeResourceName: "my-prometheus", + ManagedByCast: true, + Data: sdk.WorkloadoptimizationV1CustomMetricsDataSourceData{ + Prometheus: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus{ + DataSource: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource{ + Url: "http://new-prometheus:9090", + Timeout: toPtr("60s"), + }, + Metrics: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetrics{ + Presets: &[]string{"jvm"}, + }, + }, + }, + } + + updateBody, _ := json.Marshal(updatedDS) + mockClient.EXPECT(). + WorkloadOptimizationAPIUpdateCustomMetricsDataSource(ctx, clusterID, dsID, gomock.Any()). + Return(&http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(updateBody)), + }, nil) + + listResponse := sdk.WorkloadoptimizationV1ListCustomMetricsDataSourcesResponse{ + Items: []sdk.WorkloadoptimizationV1CustomMetricsDataSource{updatedDS}, + } + listBody, _ := json.Marshal(listResponse) + mockClient.EXPECT(). + WorkloadOptimizationAPIListCustomMetricsDataSources(ctx, clusterID). + Return(&http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(listBody)), + }, nil) + + // WHEN + diags := resourceWorkloadCustomMetricsDataSourceUpdate(ctx, data, provider) + + // THEN + r.Empty(diags) + r.Equal(dsID, data.Id()) + r.Equal("updated-name", data.Get("name")) + r.Equal("CONNECTED", data.Get("status")) +} + +func TestWorkloadCustomMetricsDataSource_Delete(t *testing.T) { + r := require.New(t) + mockctrl := gomock.NewController(t) + mockClient := mock_sdk.NewMockClientInterface(mockctrl) + + ctx := context.Background() + provider := &ProviderConfig{ + api: &sdk.ClientWithResponses{ + ClientInterface: mockClient, + }, + } + + res := resourceWorkloadCustomMetricsDataSource() + + clusterID := "b6bfc074-a267-400f-b8f1-db0850c36gk1" + dsID := "ds-123" + + // GIVEN + val := cty.ObjectVal(map[string]cty.Value{ + FieldClusterID: cty.StringVal(clusterID), + "name": cty.StringVal("test"), + "prometheus": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "url": cty.StringVal("http://prometheus:9090"), + "timeout": cty.StringVal(""), + "presets": cty.ListValEmpty(cty.String), + "metric": cty.SetValEmpty(cty.Object(map[string]cty.Type{"name": cty.String, "query": cty.String})), + }), + }), + "status": cty.StringVal(""), + "kube_resource_name": cty.StringVal(""), + "managed_by_cast": cty.False, + }) + state := terraform.NewInstanceStateShimmedFromValue(val, 0) + state.ID = dsID + data := res.Data(state) + + deleteBody, _ := json.Marshal(map[string]interface{}{}) + mockClient.EXPECT(). + WorkloadOptimizationAPIDeleteCustomMetricsDataSource(ctx, clusterID, dsID). + Return(&http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(deleteBody)), + }, nil) + + // WHEN + diags := resourceWorkloadCustomMetricsDataSourceDelete(ctx, data, provider) + + // THEN + r.Empty(diags) +} + +func TestWorkloadCustomMetricsDataSource_DeleteNotFound(t *testing.T) { + r := require.New(t) + mockctrl := gomock.NewController(t) + mockClient := mock_sdk.NewMockClientInterface(mockctrl) + + ctx := context.Background() + provider := &ProviderConfig{ + api: &sdk.ClientWithResponses{ + ClientInterface: mockClient, + }, + } + + res := resourceWorkloadCustomMetricsDataSource() + + clusterID := "b6bfc074-a267-400f-b8f1-db0850c36gk1" + dsID := "ds-gone" + + // GIVEN + val := cty.ObjectVal(map[string]cty.Value{ + FieldClusterID: cty.StringVal(clusterID), + "name": cty.StringVal("test"), + "prometheus": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "url": cty.StringVal("http://prometheus:9090"), + "timeout": cty.StringVal(""), + "presets": cty.ListValEmpty(cty.String), + "metric": cty.SetValEmpty(cty.Object(map[string]cty.Type{"name": cty.String, "query": cty.String})), + }), + }), + "status": cty.StringVal(""), + "kube_resource_name": cty.StringVal(""), + "managed_by_cast": cty.False, + }) + state := terraform.NewInstanceStateShimmedFromValue(val, 0) + state.ID = dsID + data := res.Data(state) + + mockClient.EXPECT(). + WorkloadOptimizationAPIDeleteCustomMetricsDataSource(ctx, clusterID, dsID). + Return(&http.Response{ + StatusCode: http.StatusNotFound, + Body: io.NopCloser(bytes.NewReader([]byte(`{"message":"not found"}`))), + }, nil) + + // WHEN + diags := resourceWorkloadCustomMetricsDataSourceDelete(ctx, data, provider) + + // THEN + r.Empty(diags) +} + +func TestFlattenPrometheusConfig(t *testing.T) { + tests := map[string]struct { + prom *sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus + expectedURL string + expectedTimeout string + expectedPresets []string + expectedMetric []interface{} // nil means metric key should be nil + }{ + "minimal config with nil metrics": { + prom: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus{ + DataSource: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource{ + Url: "http://prometheus:9090", + }, + }, + expectedURL: "http://prometheus:9090", + expectedTimeout: "", + expectedPresets: []string{}, + expectedMetric: nil, + }, + "with timeout": { + prom: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus{ + DataSource: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource{ + Url: "http://prometheus:9090", + Timeout: toPtr("30s"), + }, + }, + expectedURL: "http://prometheus:9090", + expectedTimeout: "30s", + expectedPresets: []string{}, + expectedMetric: nil, + }, + "with presets": { + prom: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus{ + DataSource: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource{ + Url: "http://prometheus:9090", + }, + Metrics: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetrics{ + Presets: &[]string{"jvm", "kafka"}, + }, + }, + expectedURL: "http://prometheus:9090", + expectedTimeout: "", + expectedPresets: []string{"jvm", "kafka"}, + expectedMetric: nil, + }, + "resolved with only preset metrics": { + prom: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus{ + DataSource: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource{ + Url: "http://prometheus:9090", + }, + Metrics: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetrics{ + Presets: &[]string{"jvm"}, + Resolved: &[]sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetric{ + {Name: "jvm_threads", Queries: []sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery{ + {Value: "jvm_threads_current", Origin: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginPRESET}, + }}, + }, + }, + }, + expectedURL: "http://prometheus:9090", + expectedTimeout: "", + expectedPresets: []string{"jvm"}, + expectedMetric: nil, + }, + "resolved with empty list": { + prom: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus{ + DataSource: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource{ + Url: "http://prometheus:9090", + }, + Metrics: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetrics{ + Resolved: &[]sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetric{}, + }, + }, + expectedURL: "http://prometheus:9090", + expectedTimeout: "", + expectedPresets: []string{}, + expectedMetric: nil, + }, + "resolved with manual metrics": { + prom: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus{ + DataSource: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource{ + Url: "http://prometheus:9090", + Timeout: toPtr("15s"), + }, + Metrics: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetrics{ + Resolved: &[]sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetric{ + {Name: "http_requests_total", Queries: []sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery{ + {Value: "sum(rate(http_requests_total[5m])) by (pod)", Origin: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginMANUAL}, + }}, + {Name: "queue_depth", Queries: []sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery{ + {Value: "avg(queue_depth) by (pod)", Origin: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginMANUAL}, + }}, + }, + }, + }, + expectedURL: "http://prometheus:9090", + expectedTimeout: "15s", + expectedPresets: []string{}, + expectedMetric: []interface{}{ + map[string]interface{}{"name": "http_requests_total", "query": "sum(rate(http_requests_total[5m])) by (pod)"}, + map[string]interface{}{"name": "queue_depth", "query": "avg(queue_depth) by (pod)"}, + }, + }, + "resolved with mixed preset and manual metrics": { + prom: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus{ + DataSource: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource{ + Url: "http://prometheus:9090", + }, + Metrics: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetrics{ + Presets: &[]string{"jvm"}, + Resolved: &[]sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetric{ + {Name: "jvm_threads", Queries: []sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery{ + {Value: "jvm_threads_current", Origin: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginPRESET}, + }}, + {Name: "custom_metric", Queries: []sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery{ + {Value: "my_custom_query", Origin: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginMANUAL}, + }}, + }, + }, + }, + expectedURL: "http://prometheus:9090", + expectedTimeout: "", + expectedPresets: []string{"jvm"}, + expectedMetric: []interface{}{ + map[string]interface{}{"name": "custom_metric", "query": "my_custom_query"}, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + r := require.New(t) + + result := flattenPrometheusConfig(tc.prom) + r.Len(result, 1) + + promMap := result[0].(map[string]interface{}) + r.Equal(tc.expectedURL, promMap["url"]) + r.Equal(tc.expectedTimeout, promMap["timeout"]) + r.Equal(tc.expectedPresets, promMap["presets"]) + + if tc.expectedMetric == nil { + r.Nil(promMap["metric"]) + } else { + r.Equal(tc.expectedMetric, promMap["metric"]) + } + }) + } +} + +func TestWorkloadCustomMetricsDataSource_Importer(t *testing.T) { + type testCase struct { + importID string + expectError bool + expectedClusterID string + expectedID string + } + + tests := map[string]testCase{ + "valid import ID": { + importID: "b6bfc074-a267-400f-b8f1-db0850c36gk1/ds-123", + expectedClusterID: "b6bfc074-a267-400f-b8f1-db0850c36gk1", + expectedID: "ds-123", + }, + "missing separator": { + importID: "b6bfc074-a267-400f-b8f1-db0850c36gk1", + expectError: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + r := require.New(t) + res := resourceWorkloadCustomMetricsDataSource() + + // GIVEN + state := &terraform.InstanceState{ID: tc.importID} + data := res.Data(state) + + // WHEN + result, err := workloadCustomMetricsDataSourceImporter(context.Background(), data, nil) + + // THEN + if tc.expectError { + r.Error(err) + return + } + r.NoError(err) + r.Len(result, 1) + r.Equal(tc.expectedID, result[0].Id()) + r.Equal(tc.expectedClusterID, result[0].Get(FieldClusterID)) + }) + } +} + +func TestWorkloadCustomMetricsDataSource_ImportReadWithManualMetrics(t *testing.T) { + r := require.New(t) + mockctrl := gomock.NewController(t) + mockClient := mock_sdk.NewMockClientInterface(mockctrl) + + ctx := context.Background() + provider := &ProviderConfig{ + api: &sdk.ClientWithResponses{ + ClientInterface: mockClient, + }, + } + + res := resourceWorkloadCustomMetricsDataSource() + + clusterID := "b6bfc074-a267-400f-b8f1-db0850c36gk1" + dsID := "ds-456" + + // GIVEN — simulate post-import state: cluster_id is set but no prometheus config in state. + val := cty.ObjectVal(map[string]cty.Value{ + FieldClusterID: cty.StringVal(clusterID), + "name": cty.StringVal(""), + "prometheus": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "url": cty.String, + "timeout": cty.String, + "presets": cty.List(cty.String), + "metric": cty.Set(cty.Object(map[string]cty.Type{ + "name": cty.String, + "query": cty.String, + })), + })), + "status": cty.StringVal(""), + "kube_resource_name": cty.StringVal(""), + "managed_by_cast": cty.False, + }) + state := terraform.NewInstanceStateShimmedFromValue(val, 0) + state.ID = dsID + data := res.Data(state) + + listResponse := sdk.WorkloadoptimizationV1ListCustomMetricsDataSourcesResponse{ + Items: []sdk.WorkloadoptimizationV1CustomMetricsDataSource{ + { + Id: dsID, + ClusterId: clusterID, + Name: "custom-prom", + Type: sdk.PROMETHEUS, + Status: sdk.WorkloadoptimizationV1CustomMetricsDataSourceStatusCONNECTED, + KubeResourceName: "custom-prom", + ManagedByCast: true, + Data: sdk.WorkloadoptimizationV1CustomMetricsDataSourceData{ + Prometheus: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus{ + DataSource: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource{ + Url: "http://prometheus:9090", + }, + Metrics: &sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetrics{ + Resolved: &[]sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetric{ + {Name: "http_requests_total", Queries: []sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery{ + {Value: "sum(rate(http_requests_total[5m])) by (pod)", Origin: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginMANUAL}, + }}, + {Name: "queue_depth", Queries: []sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery{ + {Value: "avg(queue_depth) by (pod)", Origin: sdk.WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginMANUAL}, + }}, + }, + }, + }, + }, + }, + }, + } + listBody, _ := json.Marshal(listResponse) + mockClient.EXPECT(). + WorkloadOptimizationAPIListCustomMetricsDataSources(ctx, clusterID). + Return(&http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(listBody)), + }, nil) + + // WHEN + diags := resourceWorkloadCustomMetricsDataSourceRead(ctx, data, provider) + + // THEN + r.Empty(diags) + r.Equal("custom-prom", data.Get("name")) + + promList := data.Get("prometheus").([]interface{}) + r.Len(promList, 1) + promMap := promList[0].(map[string]interface{}) + r.Equal("http://prometheus:9090", promMap["url"]) + + metricSet := promMap["metric"].(*schema.Set) + r.Equal(2, metricSet.Len()) + + metricsByName := map[string]string{} + for _, m := range metricSet.List() { + mm := m.(map[string]interface{}) + metricsByName[mm["name"].(string)] = mm["query"].(string) + } + r.Equal("sum(rate(http_requests_total[5m])) by (pod)", metricsByName["http_requests_total"]) + r.Equal("avg(queue_depth) by (pod)", metricsByName["queue_depth"]) +} + diff --git a/castai/sdk/api.gen.go b/castai/sdk/api.gen.go index 91f5839c..c300b9f6 100644 --- a/castai/sdk/api.gen.go +++ b/castai/sdk/api.gen.go @@ -712,6 +712,20 @@ const ( WorkloadoptimizationV1CustomMetricUnitTHREADS WorkloadoptimizationV1CustomMetricUnit = "THREADS" ) +// Defines values for WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetricOrigin. +const ( + WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetricOriginMANUAL WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetricOrigin = "MANUAL" + WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetricOriginORIGINUNSPECIFIED WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetricOrigin = "ORIGIN_UNSPECIFIED" + WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetricOriginPRESET WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetricOrigin = "PRESET" +) + +// Defines values for WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOrigin. +const ( + WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginMANUAL WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOrigin = "MANUAL" + WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginORIGINUNSPECIFIED WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOrigin = "ORIGIN_UNSPECIFIED" + WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOriginPRESET WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOrigin = "PRESET" +) + // Defines values for WorkloadoptimizationV1CustomMetricsDataSourceStatus. const ( WorkloadoptimizationV1CustomMetricsDataSourceStatusCONNECTED WorkloadoptimizationV1CustomMetricsDataSourceStatus = "CONNECTED" @@ -8672,17 +8686,37 @@ type WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadDataSource str // WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetrics defines model for workloadoptimization.v1.CustomMetricsDataSource.Data.NodeWorkload.Metrics. type WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetrics struct { - Presets *[]string `json:"presets,omitempty"` + // Presets Preset names used to resolve metrics. + Presets *[]string `json:"presets,omitempty"` + + // Resolved All resolved metrics from presets and manual metrics combined, with origin information. + Resolved *[]WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetric `json:"resolved,omitempty"` + + // ResolvedMetrics Deprecated: use `resolved` field instead. + // Deprecated: ResolvedMetrics *[]WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsMetric `json:"resolvedMetrics,omitempty"` } -// WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsMetric defines model for workloadoptimization.v1.CustomMetricsDataSource.Data.NodeWorkload.Metrics.Metric. +// WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsMetric Deprecated: use `resolved` field instead. type WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsMetric struct { Errors []string `json:"errors"` Name string `json:"name"` Warnings []string `json:"warnings"` } +// WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetric ResolvedMetric represents a fully resolved metric with its origin. +type WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetric struct { + Errors *[]string `json:"errors,omitempty"` + Name string `json:"name"` + + // Origin Origin indicates whether this metric was manually defined or resolved from a preset. + Origin WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetricOrigin `json:"origin"` + Warnings *[]string `json:"warnings,omitempty"` +} + +// WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetricOrigin Origin indicates whether this metric was manually defined or resolved from a preset. +type WorkloadoptimizationV1CustomMetricsDataSourceDataNodeWorkloadMetricsResolvedMetricOrigin string + // WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus defines model for workloadoptimization.v1.CustomMetricsDataSource.Data.Prometheus. type WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheus struct { DataSource WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource `json:"dataSource"` @@ -8697,11 +8731,18 @@ type WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusDataSource struc // WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetrics defines model for workloadoptimization.v1.CustomMetricsDataSource.Data.Prometheus.Metrics. type WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetrics struct { - Presets *[]string `json:"presets,omitempty"` + // Presets Preset names used to resolve metrics. + Presets *[]string `json:"presets,omitempty"` + + // Resolved All resolved metrics from presets and manual metrics combined, with origin information. + Resolved *[]WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetric `json:"resolved,omitempty"` + + // ResolvedMetrics Deprecated: use `resolved` field instead. + // Deprecated: ResolvedMetrics *[]WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsMetric `json:"resolvedMetrics,omitempty"` } -// WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsMetric defines model for workloadoptimization.v1.CustomMetricsDataSource.Data.Prometheus.Metrics.Metric. +// WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsMetric Deprecated: use `ResolvedMetric` instead. type WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsMetric struct { Errors []string `json:"errors"` Name string `json:"name"` @@ -8709,6 +8750,24 @@ type WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsMetric st Warnings []string `json:"warnings"` } +// WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetric ResolvedMetric represents a fully resolved metric with origin-tagged queries. +type WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetric struct { + Errors *[]string `json:"errors,omitempty"` + Name string `json:"name"` + Queries []WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery `json:"queries"` + Warnings *[]string `json:"warnings,omitempty"` +} + +// WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery Query represents a single Prometheus query with its origin. +type WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQuery struct { + // Origin Origin indicates whether this query was manually defined or resolved from a preset. + Origin WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOrigin `json:"origin"` + Value string `json:"value"` +} + +// WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOrigin Origin indicates whether this query was manually defined or resolved from a preset. +type WorkloadoptimizationV1CustomMetricsDataSourceDataPrometheusMetricsResolvedMetricQueryOrigin string + // WorkloadoptimizationV1CustomMetricsDataSourceStatus Status represents the synchronization status of the custom metrics data source. It indicates whether the data // source is currently being synchronized, has been successfully synchronized, is in the process of syncing, or has // failed to sync. @@ -8718,6 +8777,36 @@ type WorkloadoptimizationV1CustomMetricsDataSourceStatus string // For each type, exactly one of the data fields should be populated. type WorkloadoptimizationV1CustomMetricsDataSourceType string +// WorkloadoptimizationV1CustomMetricsDataSourceInput CustomMetricsDataSourceInput is the input data type for Create and Update operations on custom metrics data sources. +type WorkloadoptimizationV1CustomMetricsDataSourceInput struct { + Prometheus *WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheus `json:"prometheus,omitempty"` +} + +// WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheus defines model for workloadoptimization.v1.CustomMetricsDataSourceInput.Prometheus. +type WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheus struct { + DataSource WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusDataSource `json:"dataSource"` + Metrics *WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusMetrics `json:"metrics,omitempty"` +} + +// WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusDataSource defines model for workloadoptimization.v1.CustomMetricsDataSourceInput.Prometheus.DataSource. +type WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusDataSource struct { + Timeout *string `json:"timeout,omitempty"` + Url string `json:"url"` +} + +// WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusMetric Metric defines a single Prometheus metric query. Multiple entries with the same name +// represent multiple queries for a single metric. +type WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusMetric struct { + Name string `json:"name"` + Query string `json:"query"` +} + +// WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusMetrics defines model for workloadoptimization.v1.CustomMetricsDataSourceInput.Prometheus.Metrics. +type WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusMetrics struct { + Manual *[]WorkloadoptimizationV1CustomMetricsDataSourceInputPrometheusMetric `json:"manual,omitempty"` + Presets *[]string `json:"presets,omitempty"` +} + // WorkloadoptimizationV1DeleteWorkloadScalingPolicyResponse defines model for workloadoptimization.v1.DeleteWorkloadScalingPolicyResponse. type WorkloadoptimizationV1DeleteWorkloadScalingPolicyResponse = map[string]interface{} @@ -9550,8 +9639,9 @@ type WorkloadoptimizationV1NativeVPAStateChangedEvent struct { // WorkloadoptimizationV1NewCustomMetricsDataSource defines model for workloadoptimization.v1.NewCustomMetricsDataSource. type WorkloadoptimizationV1NewCustomMetricsDataSource struct { - Data WorkloadoptimizationV1CustomMetricsDataSourceData `json:"data"` - Name string `json:"name"` + // Data CustomMetricsDataSourceInput is the input data type for Create and Update operations on custom metrics data sources. + Data WorkloadoptimizationV1CustomMetricsDataSourceInput `json:"data"` + Name string `json:"name"` // Type Type defines the type of custom metrics data source. Respective Data field will be populated based on the type. // For each type, exactly one of the data fields should be populated. @@ -10291,8 +10381,9 @@ type WorkloadoptimizationV1UnboundMemoryGrowthEvent = map[string]interface{} // WorkloadoptimizationV1UpdateCustomMetricsDataSource defines model for workloadoptimization.v1.UpdateCustomMetricsDataSource. type WorkloadoptimizationV1UpdateCustomMetricsDataSource struct { - Data *WorkloadoptimizationV1CustomMetricsDataSourceData `json:"data,omitempty"` - Name *string `json:"name,omitempty"` + // Data CustomMetricsDataSourceInput is the input data type for Create and Update operations on custom metrics data sources. + Data *WorkloadoptimizationV1CustomMetricsDataSourceInput `json:"data,omitempty"` + Name *string `json:"name,omitempty"` } // WorkloadoptimizationV1UpdateWorkloadResponseV2 defines model for workloadoptimization.v1.UpdateWorkloadResponseV2. diff --git a/castai/sdk/omni/api.gen.go b/castai/sdk/omni/api.gen.go index a7dfc310..851e46a5 100644 --- a/castai/sdk/omni/api.gen.go +++ b/castai/sdk/omni/api.gen.go @@ -350,6 +350,9 @@ type ListEdgeLocationsResponse struct { // OCIParam Message that represents OCI location specific parameters. type OCIParam struct { + // Client OCI WIF client configuration for token exchange. + Client *OCIParamClient `json:"client,omitempty"` + // CompartmentId OCI compartment id of edge location. CompartmentId *string `json:"compartmentId,omitempty"` @@ -363,6 +366,18 @@ type OCIParam struct { TenancyId *string `json:"tenancyId,omitempty"` } +// OCIParamClient OCI WIF client configuration for token exchange. +type OCIParamClient struct { + // Id ID of the OCI confidential application. + Id string `json:"id"` + + // IdentityDomainUri OCI Identity Domain URL (e.g., "idcs-xxxx.identity.oraclecloud.com"). + IdentityDomainUri string `json:"identityDomainUri"` + + // Secret Secret of the OCI confidential application. + Secret *string `json:"secret,omitempty"` +} + // OCIParamCredentials OCI credentials. type OCIParamCredentials struct { // Fingerprint API key fingerprint; @@ -426,6 +441,21 @@ type OnboardEdgeLocationResponse struct { // RegisterClusterResponse Response message of register cluster. type RegisterClusterResponse = map[string]interface{} +// RegisteredCluster Cluster information to be registered. +type RegisteredCluster struct { + // Status Current status information. + Status *RegisteredClusterStatus `json:"status,omitempty"` +} + +// RegisteredClusterStatus Current status information of the cluster. +type RegisteredClusterStatus struct { + // OmniAgentVersion The version of omni agent running on the cluster. + OmniAgentVersion string `json:"omniAgentVersion"` + + // PodCidr The pod CIDR of the cluster. + PodCidr string `json:"podCidr"` +} + // ReportStatusRequest Message to submit object status. type ReportStatusRequest struct { // Cluster The status of the cluster. @@ -446,6 +476,9 @@ type ReportStatusRequest struct { // ReportStatusRequestCluster Cluster object. type ReportStatusRequestCluster struct { + // AgentVersion The omni-agent version. + AgentVersion *string `json:"agentVersion,omitempty"` + // ExternalCidr The external CIDR. ExternalCidr *string `json:"externalCidr,omitempty"` @@ -539,6 +572,9 @@ type EdgeLocationsAPICreateEdgeLocationJSONRequestBody = EdgeLocation // EdgeLocationsAPIUpdateEdgeLocationJSONRequestBody defines body for EdgeLocationsAPIUpdateEdgeLocation for application/json ContentType. type EdgeLocationsAPIUpdateEdgeLocationJSONRequestBody = EdgeLocationUpdate +// ClustersAPIRegisterClusterJSONRequestBody defines body for ClustersAPIRegisterCluster for application/json ContentType. +type ClustersAPIRegisterClusterJSONRequestBody = RegisteredCluster + // ClustersAPIReportStatusJSONRequestBody defines body for ClustersAPIReportStatus for application/json ContentType. type ClustersAPIReportStatusJSONRequestBody = ReportStatusRequest diff --git a/castai/sdk/omni/client.gen.go b/castai/sdk/omni/client.gen.go index 5eea1aa5..828eb668 100644 --- a/castai/sdk/omni/client.gen.go +++ b/castai/sdk/omni/client.gen.go @@ -139,8 +139,10 @@ type ClientInterface interface { // ClustersAPIOnboardClusterScript request ClustersAPIOnboardClusterScript(ctx context.Context, organizationId string, id string, reqEditors ...RequestEditorFn) (*http.Response, error) - // ClustersAPIRegisterCluster request - ClustersAPIRegisterCluster(ctx context.Context, organizationId string, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + // ClustersAPIRegisterClusterWithBody request with any body + ClustersAPIRegisterClusterWithBody(ctx context.Context, organizationId string, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + ClustersAPIRegisterCluster(ctx context.Context, organizationId string, id string, body ClustersAPIRegisterClusterJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) // ClustersAPIReportStatusWithBody request with any body ClustersAPIReportStatusWithBody(ctx context.Context, organizationId string, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -352,8 +354,20 @@ func (c *Client) ClustersAPIOnboardClusterScript(ctx context.Context, organizati return c.Client.Do(req) } -func (c *Client) ClustersAPIRegisterCluster(ctx context.Context, organizationId string, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { - req, err := NewClustersAPIRegisterClusterRequest(c.Server, organizationId, id) +func (c *Client) ClustersAPIRegisterClusterWithBody(ctx context.Context, organizationId string, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewClustersAPIRegisterClusterRequestWithBody(c.Server, organizationId, id, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) ClustersAPIRegisterCluster(ctx context.Context, organizationId string, id string, body ClustersAPIRegisterClusterJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewClustersAPIRegisterClusterRequest(c.Server, organizationId, id, body) if err != nil { return nil, err } @@ -1214,8 +1228,19 @@ func NewClustersAPIOnboardClusterScriptRequest(server string, organizationId str return req, nil } -// NewClustersAPIRegisterClusterRequest generates requests for ClustersAPIRegisterCluster -func NewClustersAPIRegisterClusterRequest(server string, organizationId string, id string) (*http.Request, error) { +// NewClustersAPIRegisterClusterRequest calls the generic ClustersAPIRegisterCluster builder with application/json body +func NewClustersAPIRegisterClusterRequest(server string, organizationId string, id string, body ClustersAPIRegisterClusterJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewClustersAPIRegisterClusterRequestWithBody(server, organizationId, id, "application/json", bodyReader) +} + +// NewClustersAPIRegisterClusterRequestWithBody generates requests for ClustersAPIRegisterCluster with any type of body +func NewClustersAPIRegisterClusterRequestWithBody(server string, organizationId string, id string, contentType string, body io.Reader) (*http.Request, error) { var err error var pathParam0 string @@ -1247,11 +1272,13 @@ func NewClustersAPIRegisterClusterRequest(server string, organizationId string, return nil, err } - req, err := http.NewRequest("POST", queryURL.String(), nil) + req, err := http.NewRequest("POST", queryURL.String(), body) if err != nil { return nil, err } + req.Header.Add("Content-Type", contentType) + return req, nil } @@ -1402,8 +1429,10 @@ type ClientWithResponsesInterface interface { // ClustersAPIOnboardClusterScript request ClustersAPIOnboardClusterScriptWithResponse(ctx context.Context, organizationId string, id string) (*ClustersAPIOnboardClusterScriptResponse, error) - // ClustersAPIRegisterCluster request - ClustersAPIRegisterClusterWithResponse(ctx context.Context, organizationId string, id string) (*ClustersAPIRegisterClusterResponse, error) + // ClustersAPIRegisterCluster request with any body + ClustersAPIRegisterClusterWithBodyWithResponse(ctx context.Context, organizationId string, id string, contentType string, body io.Reader) (*ClustersAPIRegisterClusterResponse, error) + + ClustersAPIRegisterClusterWithResponse(ctx context.Context, organizationId string, id string, body ClustersAPIRegisterClusterJSONRequestBody) (*ClustersAPIRegisterClusterResponse, error) // ClustersAPIReportStatus request with any body ClustersAPIReportStatusWithBodyWithResponse(ctx context.Context, organizationId string, id string, contentType string, body io.Reader) (*ClustersAPIReportStatusResponse, error) @@ -2092,9 +2121,17 @@ func (c *ClientWithResponses) ClustersAPIOnboardClusterScriptWithResponse(ctx co return ParseClustersAPIOnboardClusterScriptResponse(rsp) } -// ClustersAPIRegisterClusterWithResponse request returning *ClustersAPIRegisterClusterResponse -func (c *ClientWithResponses) ClustersAPIRegisterClusterWithResponse(ctx context.Context, organizationId string, id string) (*ClustersAPIRegisterClusterResponse, error) { - rsp, err := c.ClustersAPIRegisterCluster(ctx, organizationId, id) +// ClustersAPIRegisterClusterWithBodyWithResponse request with arbitrary body returning *ClustersAPIRegisterClusterResponse +func (c *ClientWithResponses) ClustersAPIRegisterClusterWithBodyWithResponse(ctx context.Context, organizationId string, id string, contentType string, body io.Reader) (*ClustersAPIRegisterClusterResponse, error) { + rsp, err := c.ClustersAPIRegisterClusterWithBody(ctx, organizationId, id, contentType, body) + if err != nil { + return nil, err + } + return ParseClustersAPIRegisterClusterResponse(rsp) +} + +func (c *ClientWithResponses) ClustersAPIRegisterClusterWithResponse(ctx context.Context, organizationId string, id string, body ClustersAPIRegisterClusterJSONRequestBody) (*ClustersAPIRegisterClusterResponse, error) { + rsp, err := c.ClustersAPIRegisterCluster(ctx, organizationId, id, body) if err != nil { return nil, err } diff --git a/castai/sdk/omni/mock/client.go b/castai/sdk/omni/mock/client.go index d44083cc..215c8224 100644 --- a/castai/sdk/omni/mock/client.go +++ b/castai/sdk/omni/mock/client.go @@ -176,9 +176,9 @@ func (mr *MockClientInterfaceMockRecorder) ClustersAPIOnboardClusterScript(ctx, } // ClustersAPIRegisterCluster mocks base method. -func (m *MockClientInterface) ClustersAPIRegisterCluster(ctx context.Context, organizationId, id string, reqEditors ...omni.RequestEditorFn) (*http.Response, error) { +func (m *MockClientInterface) ClustersAPIRegisterCluster(ctx context.Context, organizationId, id string, body omni.ClustersAPIRegisterClusterJSONRequestBody, reqEditors ...omni.RequestEditorFn) (*http.Response, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, organizationId, id} + varargs := []interface{}{ctx, organizationId, id, body} for _, a := range reqEditors { varargs = append(varargs, a) } @@ -189,12 +189,32 @@ func (m *MockClientInterface) ClustersAPIRegisterCluster(ctx context.Context, or } // ClustersAPIRegisterCluster indicates an expected call of ClustersAPIRegisterCluster. -func (mr *MockClientInterfaceMockRecorder) ClustersAPIRegisterCluster(ctx, organizationId, id interface{}, reqEditors ...interface{}) *gomock.Call { +func (mr *MockClientInterfaceMockRecorder) ClustersAPIRegisterCluster(ctx, organizationId, id, body interface{}, reqEditors ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, organizationId, id}, reqEditors...) + varargs := append([]interface{}{ctx, organizationId, id, body}, reqEditors...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClustersAPIRegisterCluster", reflect.TypeOf((*MockClientInterface)(nil).ClustersAPIRegisterCluster), varargs...) } +// ClustersAPIRegisterClusterWithBody mocks base method. +func (m *MockClientInterface) ClustersAPIRegisterClusterWithBody(ctx context.Context, organizationId, id, contentType string, body io.Reader, reqEditors ...omni.RequestEditorFn) (*http.Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, organizationId, id, contentType, body} + for _, a := range reqEditors { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ClustersAPIRegisterClusterWithBody", varargs...) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClustersAPIRegisterClusterWithBody indicates an expected call of ClustersAPIRegisterClusterWithBody. +func (mr *MockClientInterfaceMockRecorder) ClustersAPIRegisterClusterWithBody(ctx, organizationId, id, contentType, body interface{}, reqEditors ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, organizationId, id, contentType, body}, reqEditors...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClustersAPIRegisterClusterWithBody", reflect.TypeOf((*MockClientInterface)(nil).ClustersAPIRegisterClusterWithBody), varargs...) +} + // ClustersAPIReportStatus mocks base method. func (m *MockClientInterface) ClustersAPIReportStatus(ctx context.Context, organizationId, id string, body omni.ClustersAPIReportStatusJSONRequestBody, reqEditors ...omni.RequestEditorFn) (*http.Response, error) { m.ctrl.T.Helper() @@ -674,9 +694,9 @@ func (mr *MockClientWithResponsesInterfaceMockRecorder) ClustersAPIOnboardCluste } // ClustersAPIRegisterCluster mocks base method. -func (m *MockClientWithResponsesInterface) ClustersAPIRegisterCluster(ctx context.Context, organizationId, id string, reqEditors ...omni.RequestEditorFn) (*http.Response, error) { +func (m *MockClientWithResponsesInterface) ClustersAPIRegisterCluster(ctx context.Context, organizationId, id string, body omni.ClustersAPIRegisterClusterJSONRequestBody, reqEditors ...omni.RequestEditorFn) (*http.Response, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, organizationId, id} + varargs := []interface{}{ctx, organizationId, id, body} for _, a := range reqEditors { varargs = append(varargs, a) } @@ -687,25 +707,60 @@ func (m *MockClientWithResponsesInterface) ClustersAPIRegisterCluster(ctx contex } // ClustersAPIRegisterCluster indicates an expected call of ClustersAPIRegisterCluster. -func (mr *MockClientWithResponsesInterfaceMockRecorder) ClustersAPIRegisterCluster(ctx, organizationId, id interface{}, reqEditors ...interface{}) *gomock.Call { +func (mr *MockClientWithResponsesInterfaceMockRecorder) ClustersAPIRegisterCluster(ctx, organizationId, id, body interface{}, reqEditors ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, organizationId, id}, reqEditors...) + varargs := append([]interface{}{ctx, organizationId, id, body}, reqEditors...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClustersAPIRegisterCluster", reflect.TypeOf((*MockClientWithResponsesInterface)(nil).ClustersAPIRegisterCluster), varargs...) } +// ClustersAPIRegisterClusterWithBody mocks base method. +func (m *MockClientWithResponsesInterface) ClustersAPIRegisterClusterWithBody(ctx context.Context, organizationId, id, contentType string, body io.Reader, reqEditors ...omni.RequestEditorFn) (*http.Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, organizationId, id, contentType, body} + for _, a := range reqEditors { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ClustersAPIRegisterClusterWithBody", varargs...) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClustersAPIRegisterClusterWithBody indicates an expected call of ClustersAPIRegisterClusterWithBody. +func (mr *MockClientWithResponsesInterfaceMockRecorder) ClustersAPIRegisterClusterWithBody(ctx, organizationId, id, contentType, body interface{}, reqEditors ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, organizationId, id, contentType, body}, reqEditors...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClustersAPIRegisterClusterWithBody", reflect.TypeOf((*MockClientWithResponsesInterface)(nil).ClustersAPIRegisterClusterWithBody), varargs...) +} + +// ClustersAPIRegisterClusterWithBodyWithResponse mocks base method. +func (m *MockClientWithResponsesInterface) ClustersAPIRegisterClusterWithBodyWithResponse(ctx context.Context, organizationId, id, contentType string, body io.Reader) (*omni.ClustersAPIRegisterClusterResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClustersAPIRegisterClusterWithBodyWithResponse", ctx, organizationId, id, contentType, body) + ret0, _ := ret[0].(*omni.ClustersAPIRegisterClusterResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClustersAPIRegisterClusterWithBodyWithResponse indicates an expected call of ClustersAPIRegisterClusterWithBodyWithResponse. +func (mr *MockClientWithResponsesInterfaceMockRecorder) ClustersAPIRegisterClusterWithBodyWithResponse(ctx, organizationId, id, contentType, body interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClustersAPIRegisterClusterWithBodyWithResponse", reflect.TypeOf((*MockClientWithResponsesInterface)(nil).ClustersAPIRegisterClusterWithBodyWithResponse), ctx, organizationId, id, contentType, body) +} + // ClustersAPIRegisterClusterWithResponse mocks base method. -func (m *MockClientWithResponsesInterface) ClustersAPIRegisterClusterWithResponse(ctx context.Context, organizationId, id string) (*omni.ClustersAPIRegisterClusterResponse, error) { +func (m *MockClientWithResponsesInterface) ClustersAPIRegisterClusterWithResponse(ctx context.Context, organizationId, id string, body omni.ClustersAPIRegisterClusterJSONRequestBody) (*omni.ClustersAPIRegisterClusterResponse, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClustersAPIRegisterClusterWithResponse", ctx, organizationId, id) + ret := m.ctrl.Call(m, "ClustersAPIRegisterClusterWithResponse", ctx, organizationId, id, body) ret0, _ := ret[0].(*omni.ClustersAPIRegisterClusterResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // ClustersAPIRegisterClusterWithResponse indicates an expected call of ClustersAPIRegisterClusterWithResponse. -func (mr *MockClientWithResponsesInterfaceMockRecorder) ClustersAPIRegisterClusterWithResponse(ctx, organizationId, id interface{}) *gomock.Call { +func (mr *MockClientWithResponsesInterfaceMockRecorder) ClustersAPIRegisterClusterWithResponse(ctx, organizationId, id, body interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClustersAPIRegisterClusterWithResponse", reflect.TypeOf((*MockClientWithResponsesInterface)(nil).ClustersAPIRegisterClusterWithResponse), ctx, organizationId, id) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClustersAPIRegisterClusterWithResponse", reflect.TypeOf((*MockClientWithResponsesInterface)(nil).ClustersAPIRegisterClusterWithResponse), ctx, organizationId, id, body) } // ClustersAPIReportStatus mocks base method. diff --git a/docs/resources/workload_custom_metrics_data_source.md b/docs/resources/workload_custom_metrics_data_source.md new file mode 100644 index 00000000..ae43c7b1 --- /dev/null +++ b/docs/resources/workload_custom_metrics_data_source.md @@ -0,0 +1,108 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "castai_workload_custom_metrics_data_source Resource - terraform-provider-castai" +subcategory: "" +description: |- + Manages a CAST AI workload custom metrics data source. Custom metrics data sources allow CAST AI to collect and use non-standard metrics for workload optimization. +--- + +# castai_workload_custom_metrics_data_source (Resource) + +Manages a CAST AI workload custom metrics data source. Custom metrics data sources allow CAST AI to collect and use non-standard metrics for workload optimization. + +## Example Usage + +```terraform +# Recommended: Use presets for standard metrics (managed by CAST AI). +# Presets provide curated metric definitions that are kept up to date automatically. +# Currently available: "jvm". More presets may be added in the future. +resource "castai_workload_custom_metrics_data_source" "prometheus" { + cluster_id = castai_eks_cluster.this.id + + name = "my-prometheus" + + prometheus { + url = "http://prometheus-server.monitoring.svc.cluster.local:9090" + timeout = "30s" + + presets = ["jvm"] + } +} + +# Advanced: Define custom metrics manually with PromQL queries. +# This can be combined with presets if needed. +resource "castai_workload_custom_metrics_data_source" "prometheus_custom" { + cluster_id = castai_eks_cluster.this.id + + name = "my-prometheus-custom" + + prometheus { + url = "http://prometheus-server.monitoring.svc.cluster.local:9090" + + metric { + name = "http_requests_total" + query = "sum(rate(http_requests_total[5m])) by (pod)" + } + + metric { + name = "custom_queue_depth" + query = "avg(queue_depth) by (pod)" + } + } +} +``` + + +## Schema + +### Required + +- `cluster_id` (String) CAST AI cluster ID. +- `name` (String) Name of the custom metrics data source (1-63 characters). +- `prometheus` (Block List, Min: 1, Max: 1) Prometheus data source configuration. (see [below for nested schema](#nestedblock--prometheus)) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `kube_resource_name` (String) Name of the corresponding Kubernetes resource. +- `managed_by_cast` (Boolean) Whether the data source is managed by CAST AI. +- `status` (String) Synchronization status of the data source (CONNECTING, CONNECTED, SYNCING, FAILED). + + +### Nested Schema for `prometheus` + +Required: + +- `url` (String) URL of the Prometheus server. + +Optional: + +- `metric` (Block Set) Manually defined metrics. Use this for advanced use cases where presets don't cover your needs. Each entry defines a single metric name and PromQL query. To specify multiple queries for the same metric, use multiple entries with the same name. (see [below for nested schema](#nestedblock--prometheus--metric)) +- `presets` (List of String) List of metric presets managed by CAST AI. Presets provide curated metric definitions that are kept up to date automatically. This is the recommended approach for most users. Currently available: "jvm". +- `timeout` (String) Timeout for Prometheus queries (e.g. "30s"). + + +### Nested Schema for `prometheus.metric` + +Required: + +- `name` (String) Name of the metric. +- `query` (String) PromQL query for this metric. + + + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) +- `delete` (String) +- `read` (String) +- `update` (String) + + diff --git a/examples/resources/castai_workload_custom_metrics_data_source/resource.tf b/examples/resources/castai_workload_custom_metrics_data_source/resource.tf new file mode 100644 index 00000000..796600ac --- /dev/null +++ b/examples/resources/castai_workload_custom_metrics_data_source/resource.tf @@ -0,0 +1,37 @@ +# Recommended: Use presets for standard metrics (managed by CAST AI). +# Presets provide curated metric definitions that are kept up to date automatically. +# Currently available: "jvm". More presets may be added in the future. +resource "castai_workload_custom_metrics_data_source" "prometheus" { + cluster_id = castai_eks_cluster.this.id + + name = "my-prometheus" + + prometheus { + url = "http://prometheus-server.monitoring.svc.cluster.local:9090" + timeout = "30s" + + presets = ["jvm"] + } +} + +# Advanced: Define custom metrics manually with PromQL queries. +# This can be combined with presets if needed. +resource "castai_workload_custom_metrics_data_source" "prometheus_custom" { + cluster_id = castai_eks_cluster.this.id + + name = "my-prometheus-custom" + + prometheus { + url = "http://prometheus-server.monitoring.svc.cluster.local:9090" + + metric { + name = "http_requests_total" + query = "sum(rate(http_requests_total[5m])) by (pod)" + } + + metric { + name = "custom_queue_depth" + query = "avg(queue_depth) by (pod)" + } + } +}