diff --git a/Makefile b/Makefile index 8cc65ce0e..94b3d40f4 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ PROJECT_NAME := provider-$(PROVIDER_NAME) PROJECT_REPO := github.com/upbound/$(PROJECT_NAME) export TERRAFORM_VERSION := 1.5.5 -export TERRAFORM_PROVIDER_VERSION := 5.45.0 +export TERRAFORM_PROVIDER_VERSION := 6.16.0 export TERRAFORM_PROVIDER_SOURCE := hashicorp/google export TERRAFORM_PROVIDER_REPO ?= https://github.com/hashicorp/terraform-provider-google export TERRAFORM_DOCS_PATH ?= website/docs/r diff --git a/apis/accesscontextmanager/v1beta2/zz_accesslevel_types.go b/apis/accesscontextmanager/v1beta2/zz_accesslevel_types.go index 9cfb05408..ef2704a1f 100755 --- a/apis/accesscontextmanager/v1beta2/zz_accesslevel_types.go +++ b/apis/accesscontextmanager/v1beta2/zz_accesslevel_types.go @@ -555,7 +555,7 @@ type VPCSubnetworkInitParameters struct { // Required. Network name to be allowed by this Access Level. Networks of foreign organizations requires compute.network.get permission to be granted to caller. Network *string `json:"network,omitempty" tf:"network,omitempty"` - // CIDR block IP subnetwork specification. Must be IPv4. + // A list of CIDR block IP subnetwork specification. Must be IPv4. VPCIPSubnetworks []*string `json:"vpcIpSubnetworks,omitempty" tf:"vpc_ip_subnetworks,omitempty"` } @@ -564,7 +564,7 @@ type VPCSubnetworkObservation struct { // Required. Network name to be allowed by this Access Level. Networks of foreign organizations requires compute.network.get permission to be granted to caller. Network *string `json:"network,omitempty" tf:"network,omitempty"` - // CIDR block IP subnetwork specification. Must be IPv4. + // A list of CIDR block IP subnetwork specification. Must be IPv4. VPCIPSubnetworks []*string `json:"vpcIpSubnetworks,omitempty" tf:"vpc_ip_subnetworks,omitempty"` } @@ -574,7 +574,7 @@ type VPCSubnetworkParameters struct { // +kubebuilder:validation:Optional Network *string `json:"network" tf:"network,omitempty"` - // CIDR block IP subnetwork specification. Must be IPv4. + // A list of CIDR block IP subnetwork specification. Must be IPv4. // +kubebuilder:validation:Optional VPCIPSubnetworks []*string `json:"vpcIpSubnetworks,omitempty" tf:"vpc_ip_subnetworks,omitempty"` } diff --git a/apis/accesscontextmanager/v1beta2/zz_serviceperimeter_types.go b/apis/accesscontextmanager/v1beta2/zz_serviceperimeter_types.go index c519a5e1e..bc7485829 100755 --- a/apis/accesscontextmanager/v1beta2/zz_serviceperimeter_types.go +++ b/apis/accesscontextmanager/v1beta2/zz_serviceperimeter_types.go @@ -15,9 +15,11 @@ import ( type EgressFromInitParameters struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` @@ -38,9 +40,11 @@ type EgressFromInitParameters struct { type EgressFromObservation struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` @@ -61,9 +65,11 @@ type EgressFromObservation struct { type EgressFromParameters struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +kubebuilder:validation:Optional // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` @@ -128,9 +134,11 @@ type EgressFromSourcesParameters struct { type EgressPoliciesEgressFromInitParameters struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` @@ -151,9 +159,11 @@ type EgressPoliciesEgressFromInitParameters struct { type EgressPoliciesEgressFromObservation struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` @@ -174,9 +184,11 @@ type EgressPoliciesEgressFromObservation struct { type EgressPoliciesEgressFromParameters struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +kubebuilder:validation:Optional // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` @@ -450,9 +462,11 @@ type EgressToParameters struct { type IngressFromInitParameters struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` @@ -469,9 +483,11 @@ type IngressFromInitParameters struct { type IngressFromObservation struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` @@ -488,9 +504,11 @@ type IngressFromObservation struct { type IngressFromParameters struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +kubebuilder:validation:Optional // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` @@ -584,9 +602,11 @@ type IngressFromSourcesParameters struct { type IngressPoliciesIngressFromInitParameters struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` @@ -603,9 +623,11 @@ type IngressPoliciesIngressFromInitParameters struct { type IngressPoliciesIngressFromObservation struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` @@ -622,9 +644,11 @@ type IngressPoliciesIngressFromObservation struct { type IngressPoliciesIngressFromParameters struct { - // A list of identities that are allowed access through this ingress policy. - // Should be in the format of email address. The email address should represent - // individual user or service account only. + // Identities can be an individual user, service account, Google group, + // or third-party identity. For third-party identity, only single identities + // are supported and other identity types are not supported.The v1 identities + // that have the prefix user, group and serviceAccount in + // https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. // +kubebuilder:validation:Optional // +listType=set Identities []*string `json:"identities,omitempty" tf:"identities,omitempty"` diff --git a/apis/activedirectory/v1beta1/zz_domain_types.go b/apis/activedirectory/v1beta1/zz_domain_types.go index cabc07e80..eddfa8559 100755 --- a/apis/activedirectory/v1beta1/zz_domain_types.go +++ b/apis/activedirectory/v1beta1/zz_domain_types.go @@ -58,6 +58,10 @@ type DomainObservation struct { // +listType=set AuthorizedNetworks []*string `json:"authorizedNetworks,omitempty" tf:"authorized_networks,omitempty"` + // Defaults to true. + // When the field is set to false, deleting the domain is allowed. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions // of https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains. DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` diff --git a/apis/activedirectory/v1beta1/zz_generated.deepcopy.go b/apis/activedirectory/v1beta1/zz_generated.deepcopy.go index a6599e805..9df22b1cc 100644 --- a/apis/activedirectory/v1beta1/zz_generated.deepcopy.go +++ b/apis/activedirectory/v1beta1/zz_generated.deepcopy.go @@ -163,6 +163,11 @@ func (in *DomainObservation) DeepCopyInto(out *DomainObservation) { } } } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } if in.DomainName != nil { in, out := &in.DomainName, &out.DomainName *out = new(string) diff --git a/apis/alloydb/v1beta2/zz_cluster_types.go b/apis/alloydb/v1beta2/zz_cluster_types.go index 0bf45c33e..9b462b484 100755 --- a/apis/alloydb/v1beta2/zz_cluster_types.go +++ b/apis/alloydb/v1beta2/zz_cluster_types.go @@ -214,6 +214,7 @@ type ClusterInitParameters struct { // Policy to determine if the cluster should be deleted forcefully. // Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. // Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + // Possible values: DEFAULT, FORCE DeletionPolicy *string `json:"deletionPolicy,omitempty" tf:"deletion_policy,omitempty"` // User-settable and human-readable display name for the Cluster. @@ -240,24 +241,10 @@ type ClusterInitParameters struct { // Structure is documented below. MaintenanceUpdatePolicy *MaintenanceUpdatePolicyInitParameters `json:"maintenanceUpdatePolicy,omitempty" tf:"maintenance_update_policy,omitempty"` - // The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - // "projects/{projectNumber}/global/networks/{network_id}". - // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.Network - // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() - Network *string `json:"network,omitempty" tf:"network,omitempty"` - // Metadata related to network configuration. // Structure is documented below. NetworkConfig *NetworkConfigInitParameters `json:"networkConfig,omitempty" tf:"network_config,omitempty"` - // Reference to a Network in compute to populate network. - // +kubebuilder:validation:Optional - NetworkRef *v1.Reference `json:"networkRef,omitempty" tf:"-"` - - // Selector for a Network in compute to populate network. - // +kubebuilder:validation:Optional - NetworkSelector *v1.Selector `json:"networkSelector,omitempty" tf:"-"` - // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -277,6 +264,10 @@ type ClusterInitParameters struct { // Configuration of the secondary cluster for Cross Region Replication. This should be set if and only if the cluster is of type SECONDARY. // Structure is documented below. SecondaryConfig *SecondaryConfigInitParameters `json:"secondaryConfig,omitempty" tf:"secondary_config,omitempty"` + + // The subscrition type of cluster. + // Possible values are: TRIAL, STANDARD. + SubscriptionType *string `json:"subscriptionType,omitempty" tf:"subscription_type,omitempty"` } type ClusterObservation struct { @@ -314,6 +305,7 @@ type ClusterObservation struct { // Policy to determine if the cluster should be deleted forcefully. // Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. // Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + // Possible values: DEFAULT, FORCE DeletionPolicy *string `json:"deletionPolicy,omitempty" tf:"deletion_policy,omitempty"` // User-settable and human-readable display name for the Cluster. @@ -364,10 +356,6 @@ type ClusterObservation struct { // The name of the cluster resource. Name *string `json:"name,omitempty" tf:"name,omitempty"` - // The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - // "projects/{projectNumber}/global/networks/{network_id}". - Network *string `json:"network,omitempty" tf:"network,omitempty"` - // Metadata related to network configuration. // Structure is documented below. NetworkConfig *NetworkConfigObservation `json:"networkConfig,omitempty" tf:"network_config,omitempty"` @@ -400,11 +388,19 @@ type ClusterObservation struct { // Output only. The current serving state of the cluster. State *string `json:"state,omitempty" tf:"state,omitempty"` + // The subscrition type of cluster. + // Possible values are: TRIAL, STANDARD. + SubscriptionType *string `json:"subscriptionType,omitempty" tf:"subscription_type,omitempty"` + // The combination of labels configured directly on the resource // and default labels configured on the provider. // +mapType=granular TerraformLabels map[string]*string `json:"terraformLabels,omitempty" tf:"terraform_labels,omitempty"` + // Contains information and all metadata related to TRIAL clusters. + // Structure is documented below. + TrialMetadata []TrialMetadataObservation `json:"trialMetadata,omitempty" tf:"trial_metadata,omitempty"` + // The system-generated UID of the resource. UID *string `json:"uid,omitempty" tf:"uid,omitempty"` } @@ -441,6 +437,7 @@ type ClusterParameters struct { // Policy to determine if the cluster should be deleted forcefully. // Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. // Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + // Possible values: DEFAULT, FORCE // +kubebuilder:validation:Optional DeletionPolicy *string `json:"deletionPolicy,omitempty" tf:"deletion_policy,omitempty"` @@ -478,26 +475,11 @@ type ClusterParameters struct { // +kubebuilder:validation:Optional MaintenanceUpdatePolicy *MaintenanceUpdatePolicyParameters `json:"maintenanceUpdatePolicy,omitempty" tf:"maintenance_update_policy,omitempty"` - // The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - // "projects/{projectNumber}/global/networks/{network_id}". - // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.Network - // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() - // +kubebuilder:validation:Optional - Network *string `json:"network,omitempty" tf:"network,omitempty"` - // Metadata related to network configuration. // Structure is documented below. // +kubebuilder:validation:Optional NetworkConfig *NetworkConfigParameters `json:"networkConfig,omitempty" tf:"network_config,omitempty"` - // Reference to a Network in compute to populate network. - // +kubebuilder:validation:Optional - NetworkRef *v1.Reference `json:"networkRef,omitempty" tf:"-"` - - // Selector for a Network in compute to populate network. - // +kubebuilder:validation:Optional - NetworkSelector *v1.Selector `json:"networkSelector,omitempty" tf:"-"` - // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional @@ -522,6 +504,11 @@ type ClusterParameters struct { // Structure is documented below. // +kubebuilder:validation:Optional SecondaryConfig *SecondaryConfigParameters `json:"secondaryConfig,omitempty" tf:"secondary_config,omitempty"` + + // The subscrition type of cluster. + // Possible values are: TRIAL, STANDARD. + // +kubebuilder:validation:Optional + SubscriptionType *string `json:"subscriptionType,omitempty" tf:"subscription_type,omitempty"` } type ContinuousBackupConfigEncryptionConfigInitParameters struct { @@ -1077,6 +1064,27 @@ type TimeBasedRetentionParameters struct { RetentionPeriod *string `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` } +type TrialMetadataInitParameters struct { +} + +type TrialMetadataObservation struct { + + // End time of the trial cluster. + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Grace end time of the trial cluster. + GraceEndTime *string `json:"graceEndTime,omitempty" tf:"grace_end_time,omitempty"` + + // Start time of the trial cluster. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Upgrade time of the trial cluster to standard cluster. + UpgradeTime *string `json:"upgradeTime,omitempty" tf:"upgrade_time,omitempty"` +} + +type TrialMetadataParameters struct { +} + type WeeklyScheduleInitParameters struct { // The days of the week to perform a backup. At least one day of the week must be provided. diff --git a/apis/alloydb/v1beta2/zz_generated.deepcopy.go b/apis/alloydb/v1beta2/zz_generated.deepcopy.go index 799a3ed2f..4ccfc9639 100644 --- a/apis/alloydb/v1beta2/zz_generated.deepcopy.go +++ b/apis/alloydb/v1beta2/zz_generated.deepcopy.go @@ -1161,26 +1161,11 @@ func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { *out = new(MaintenanceUpdatePolicyInitParameters) (*in).DeepCopyInto(*out) } - if in.Network != nil { - in, out := &in.Network, &out.Network - *out = new(string) - **out = **in - } if in.NetworkConfig != nil { in, out := &in.NetworkConfig, &out.NetworkConfig *out = new(NetworkConfigInitParameters) (*in).DeepCopyInto(*out) } - if in.NetworkRef != nil { - in, out := &in.NetworkRef, &out.NetworkRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.NetworkSelector != nil { - in, out := &in.NetworkSelector, &out.NetworkSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -1206,6 +1191,11 @@ func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { *out = new(SecondaryConfigInitParameters) (*in).DeepCopyInto(*out) } + if in.SubscriptionType != nil { + in, out := &in.SubscriptionType, &out.SubscriptionType + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. @@ -1410,11 +1400,6 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = new(string) **out = **in } - if in.Network != nil { - in, out := &in.Network, &out.Network - *out = new(string) - **out = **in - } if in.NetworkConfig != nil { in, out := &in.NetworkConfig, &out.NetworkConfig *out = new(NetworkConfigObservation) @@ -1455,6 +1440,11 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = new(string) **out = **in } + if in.SubscriptionType != nil { + in, out := &in.SubscriptionType, &out.SubscriptionType + *out = new(string) + **out = **in + } if in.TerraformLabels != nil { in, out := &in.TerraformLabels, &out.TerraformLabels *out = make(map[string]*string, len(*in)) @@ -1471,6 +1461,13 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { (*out)[key] = outVal } } + if in.TrialMetadata != nil { + in, out := &in.TrialMetadata, &out.TrialMetadata + *out = make([]TrialMetadataObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.UID != nil { in, out := &in.UID, &out.UID *out = new(string) @@ -1578,26 +1575,11 @@ func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { *out = new(MaintenanceUpdatePolicyParameters) (*in).DeepCopyInto(*out) } - if in.Network != nil { - in, out := &in.Network, &out.Network - *out = new(string) - **out = **in - } if in.NetworkConfig != nil { in, out := &in.NetworkConfig, &out.NetworkConfig *out = new(NetworkConfigParameters) (*in).DeepCopyInto(*out) } - if in.NetworkRef != nil { - in, out := &in.NetworkRef, &out.NetworkRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.NetworkSelector != nil { - in, out := &in.NetworkSelector, &out.NetworkSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -1623,6 +1605,11 @@ func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { *out = new(SecondaryConfigParameters) (*in).DeepCopyInto(*out) } + if in.SubscriptionType != nil { + in, out := &in.SubscriptionType, &out.SubscriptionType + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. @@ -2384,6 +2371,11 @@ func (in *InstanceNetworkConfigInitParameters) DeepCopyInto(out *InstanceNetwork (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.EnableOutboundPublicIP != nil { + in, out := &in.EnableOutboundPublicIP, &out.EnableOutboundPublicIP + *out = new(bool) + **out = **in + } if in.EnablePublicIP != nil { in, out := &in.EnablePublicIP, &out.EnablePublicIP *out = new(bool) @@ -2411,6 +2403,11 @@ func (in *InstanceNetworkConfigObservation) DeepCopyInto(out *InstanceNetworkCon (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.EnableOutboundPublicIP != nil { + in, out := &in.EnableOutboundPublicIP, &out.EnableOutboundPublicIP + *out = new(bool) + **out = **in + } if in.EnablePublicIP != nil { in, out := &in.EnablePublicIP, &out.EnablePublicIP *out = new(bool) @@ -2438,6 +2435,11 @@ func (in *InstanceNetworkConfigParameters) DeepCopyInto(out *InstanceNetworkConf (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.EnableOutboundPublicIP != nil { + in, out := &in.EnableOutboundPublicIP, &out.EnableOutboundPublicIP + *out = new(bool) + **out = **in + } if in.EnablePublicIP != nil { in, out := &in.EnablePublicIP, &out.EnablePublicIP *out = new(bool) @@ -2598,6 +2600,17 @@ func (in *InstanceObservation) DeepCopyInto(out *InstanceObservation) { *out = new(InstanceNetworkConfigObservation) (*in).DeepCopyInto(*out) } + if in.OutboundPublicIPAddresses != nil { + in, out := &in.OutboundPublicIPAddresses, &out.OutboundPublicIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.PscInstanceConfig != nil { in, out := &in.PscInstanceConfig, &out.PscInstanceConfig *out = new(PscInstanceConfigObservation) @@ -4153,6 +4166,71 @@ func (in *TimeBasedRetentionParameters) DeepCopy() *TimeBasedRetentionParameters return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrialMetadataInitParameters) DeepCopyInto(out *TrialMetadataInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrialMetadataInitParameters. +func (in *TrialMetadataInitParameters) DeepCopy() *TrialMetadataInitParameters { + if in == nil { + return nil + } + out := new(TrialMetadataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrialMetadataObservation) DeepCopyInto(out *TrialMetadataObservation) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.GraceEndTime != nil { + in, out := &in.GraceEndTime, &out.GraceEndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.UpgradeTime != nil { + in, out := &in.UpgradeTime, &out.UpgradeTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrialMetadataObservation. +func (in *TrialMetadataObservation) DeepCopy() *TrialMetadataObservation { + if in == nil { + return nil + } + out := new(TrialMetadataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrialMetadataParameters) DeepCopyInto(out *TrialMetadataParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrialMetadataParameters. +func (in *TrialMetadataParameters) DeepCopy() *TrialMetadataParameters { + if in == nil { + return nil + } + out := new(TrialMetadataParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WeeklyScheduleInitParameters) DeepCopyInto(out *WeeklyScheduleInitParameters) { *out = *in diff --git a/apis/alloydb/v1beta2/zz_generated.resolvers.go b/apis/alloydb/v1beta2/zz_generated.resolvers.go index 54366ecc3..998e48dc2 100644 --- a/apis/alloydb/v1beta2/zz_generated.resolvers.go +++ b/apis/alloydb/v1beta2/zz_generated.resolvers.go @@ -76,25 +76,6 @@ func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error - { - m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta1", "Network", "NetworkList") - if err != nil { - return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") - } - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Network), - Extract: resource.ExtractResourceID(), - Reference: mg.Spec.ForProvider.NetworkRef, - Selector: mg.Spec.ForProvider.NetworkSelector, - To: reference.To{List: l, Managed: m}, - }) - } - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.Network") - } - mg.Spec.ForProvider.Network = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.ForProvider.NetworkRef = rsp.ResolvedReference if mg.Spec.ForProvider.NetworkConfig != nil { { @@ -180,25 +161,6 @@ func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.SecondaryConfig.PrimaryClusterNameRef = rsp.ResolvedReference } - { - m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta1", "Network", "NetworkList") - if err != nil { - return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") - } - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Network), - Extract: resource.ExtractResourceID(), - Reference: mg.Spec.InitProvider.NetworkRef, - Selector: mg.Spec.InitProvider.NetworkSelector, - To: reference.To{List: l, Managed: m}, - }) - } - if err != nil { - return errors.Wrap(err, "mg.Spec.InitProvider.Network") - } - mg.Spec.InitProvider.Network = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.InitProvider.NetworkRef = rsp.ResolvedReference - if mg.Spec.InitProvider.NetworkConfig != nil { { m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta1", "Network", "NetworkList") diff --git a/apis/alloydb/v1beta2/zz_instance_types.go b/apis/alloydb/v1beta2/zz_instance_types.go index 82299e3d1..13ade47bd 100755 --- a/apis/alloydb/v1beta2/zz_instance_types.go +++ b/apis/alloydb/v1beta2/zz_instance_types.go @@ -148,6 +148,9 @@ type InstanceNetworkConfigInitParameters struct { // Structure is documented below. AuthorizedExternalNetworks []AuthorizedExternalNetworksInitParameters `json:"authorizedExternalNetworks,omitempty" tf:"authorized_external_networks,omitempty"` + // Enabling outbound public ip for the instance. + EnableOutboundPublicIP *bool `json:"enableOutboundPublicIp,omitempty" tf:"enable_outbound_public_ip,omitempty"` + // Enabling public ip for the instance. If a user wishes to disable this, // please also clear the list of the authorized external networks set on // the same instance. @@ -162,6 +165,9 @@ type InstanceNetworkConfigObservation struct { // Structure is documented below. AuthorizedExternalNetworks []AuthorizedExternalNetworksObservation `json:"authorizedExternalNetworks,omitempty" tf:"authorized_external_networks,omitempty"` + // Enabling outbound public ip for the instance. + EnableOutboundPublicIP *bool `json:"enableOutboundPublicIp,omitempty" tf:"enable_outbound_public_ip,omitempty"` + // Enabling public ip for the instance. If a user wishes to disable this, // please also clear the list of the authorized external networks set on // the same instance. @@ -177,6 +183,10 @@ type InstanceNetworkConfigParameters struct { // +kubebuilder:validation:Optional AuthorizedExternalNetworks []AuthorizedExternalNetworksParameters `json:"authorizedExternalNetworks,omitempty" tf:"authorized_external_networks,omitempty"` + // Enabling outbound public ip for the instance. + // +kubebuilder:validation:Optional + EnableOutboundPublicIP *bool `json:"enableOutboundPublicIp,omitempty" tf:"enable_outbound_public_ip,omitempty"` + // Enabling public ip for the instance. If a user wishes to disable this, // please also clear the list of the authorized external networks set on // the same instance. @@ -259,6 +269,11 @@ type InstanceObservation struct { // Structure is documented below. NetworkConfig *InstanceNetworkConfigObservation `json:"networkConfig,omitempty" tf:"network_config,omitempty"` + // The outbound public IP addresses for the instance. This is available ONLY when + // networkConfig.enableOutboundPublicIp is set to true. These IP addresses are used + // for outbound connections. + OutboundPublicIPAddresses []*string `json:"outboundPublicIpAddresses,omitempty" tf:"outbound_public_ip_addresses,omitempty"` + // Configuration for Private Service Connect (PSC) for the instance. // Structure is documented below. PscInstanceConfig *PscInstanceConfigObservation `json:"pscInstanceConfig,omitempty" tf:"psc_instance_config,omitempty"` diff --git a/apis/apigee/v1beta1/zz_generated.deepcopy.go b/apis/apigee/v1beta1/zz_generated.deepcopy.go index f8c7380ad..f03fd8b7a 100644 --- a/apis/apigee/v1beta1/zz_generated.deepcopy.go +++ b/apis/apigee/v1beta1/zz_generated.deepcopy.go @@ -2448,6 +2448,11 @@ func (in *NATAddress) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NATAddressInitParameters) DeepCopyInto(out *NATAddressInitParameters) { *out = *in + if in.Activate != nil { + in, out := &in.Activate, &out.Activate + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NATAddressInitParameters. @@ -2495,6 +2500,11 @@ func (in *NATAddressList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NATAddressObservation) DeepCopyInto(out *NATAddressObservation) { *out = *in + if in.Activate != nil { + in, out := &in.Activate, &out.Activate + *out = new(bool) + **out = **in + } if in.ID != nil { in, out := &in.ID, &out.ID *out = new(string) @@ -2530,6 +2540,11 @@ func (in *NATAddressObservation) DeepCopy() *NATAddressObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NATAddressParameters) DeepCopyInto(out *NATAddressParameters) { *out = *in + if in.Activate != nil { + in, out := &in.Activate, &out.Activate + *out = new(bool) + **out = **in + } if in.InstanceID != nil { in, out := &in.InstanceID, &out.InstanceID *out = new(string) @@ -2562,7 +2577,7 @@ func (in *NATAddressSpec) DeepCopyInto(out *NATAddressSpec) { *out = *in in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) in.ForProvider.DeepCopyInto(&out.ForProvider) - out.InitProvider = in.InitProvider + in.InitProvider.DeepCopyInto(&out.InitProvider) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NATAddressSpec. diff --git a/apis/apigee/v1beta1/zz_nataddress_types.go b/apis/apigee/v1beta1/zz_nataddress_types.go index 848da38ff..0ca6d49e2 100755 --- a/apis/apigee/v1beta1/zz_nataddress_types.go +++ b/apis/apigee/v1beta1/zz_nataddress_types.go @@ -14,10 +14,16 @@ import ( ) type NATAddressInitParameters struct { + + // Flag that specifies whether the reserved NAT address should be activate. + Activate *bool `json:"activate,omitempty" tf:"activate,omitempty"` } type NATAddressObservation struct { + // Flag that specifies whether the reserved NAT address should be activate. + Activate *bool `json:"activate,omitempty" tf:"activate,omitempty"` + // an identifier for the resource with format {{instance_id}}/natAddresses/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` @@ -34,6 +40,10 @@ type NATAddressObservation struct { type NATAddressParameters struct { + // Flag that specifies whether the reserved NAT address should be activate. + // +kubebuilder:validation:Optional + Activate *bool `json:"activate,omitempty" tf:"activate,omitempty"` + // The Apigee instance associated with the Apigee environment, // in the format organizations/{{org_name}}/instances/{{instance_name}}. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/apigee/v1beta1.Instance diff --git a/apis/artifact/v1beta2/zz_generated.deepcopy.go b/apis/artifact/v1beta2/zz_generated.deepcopy.go index d89abd415..96758239a 100644 --- a/apis/artifact/v1beta2/zz_generated.deepcopy.go +++ b/apis/artifact/v1beta2/zz_generated.deepcopy.go @@ -178,6 +178,86 @@ func (in *CleanupPoliciesParameters) DeepCopy() *CleanupPoliciesParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonRepositoryInitParameters) DeepCopyInto(out *CommonRepositoryInitParameters) { + *out = *in + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.URIRef != nil { + in, out := &in.URIRef, &out.URIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.URISelector != nil { + in, out := &in.URISelector, &out.URISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonRepositoryInitParameters. +func (in *CommonRepositoryInitParameters) DeepCopy() *CommonRepositoryInitParameters { + if in == nil { + return nil + } + out := new(CommonRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonRepositoryObservation) DeepCopyInto(out *CommonRepositoryObservation) { + *out = *in + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonRepositoryObservation. +func (in *CommonRepositoryObservation) DeepCopy() *CommonRepositoryObservation { + if in == nil { + return nil + } + out := new(CommonRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonRepositoryParameters) DeepCopyInto(out *CommonRepositoryParameters) { + *out = *in + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.URIRef != nil { + in, out := &in.URIRef, &out.URIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.URISelector != nil { + in, out := &in.URISelector, &out.URISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonRepositoryParameters. +func (in *CommonRepositoryParameters) DeepCopy() *CommonRepositoryParameters { + if in == nil { + return nil + } + out := new(CommonRepositoryParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { *out = *in @@ -1642,6 +1722,11 @@ func (in *RegistryRepositoryInitParameters) DeepCopyInto(out *RegistryRepository *out = new(VirtualRepositoryConfigInitParameters) (*in).DeepCopyInto(*out) } + if in.VulnerabilityScanningConfig != nil { + in, out := &in.VulnerabilityScanningConfig, &out.VulnerabilityScanningConfig + *out = new(VulnerabilityScanningConfigInitParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryRepositoryInitParameters. @@ -1819,6 +1904,11 @@ func (in *RegistryRepositoryObservation) DeepCopyInto(out *RegistryRepositoryObs *out = new(VirtualRepositoryConfigObservation) (*in).DeepCopyInto(*out) } + if in.VulnerabilityScanningConfig != nil { + in, out := &in.VulnerabilityScanningConfig, &out.VulnerabilityScanningConfig + *out = new(VulnerabilityScanningConfigObservation) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryRepositoryObservation. @@ -1912,6 +2002,11 @@ func (in *RegistryRepositoryParameters) DeepCopyInto(out *RegistryRepositoryPara *out = new(VirtualRepositoryConfigParameters) (*in).DeepCopyInto(*out) } + if in.VulnerabilityScanningConfig != nil { + in, out := &in.VulnerabilityScanningConfig, &out.VulnerabilityScanningConfig + *out = new(VulnerabilityScanningConfigParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryRepositoryParameters. @@ -1967,6 +2062,11 @@ func (in *RemoteRepositoryConfigInitParameters) DeepCopyInto(out *RemoteReposito *out = new(AptRepositoryInitParameters) (*in).DeepCopyInto(*out) } + if in.CommonRepository != nil { + in, out := &in.CommonRepository, &out.CommonRepository + *out = new(CommonRepositoryInitParameters) + (*in).DeepCopyInto(*out) + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -2027,6 +2127,11 @@ func (in *RemoteRepositoryConfigObservation) DeepCopyInto(out *RemoteRepositoryC *out = new(AptRepositoryObservation) (*in).DeepCopyInto(*out) } + if in.CommonRepository != nil { + in, out := &in.CommonRepository, &out.CommonRepository + *out = new(CommonRepositoryObservation) + (*in).DeepCopyInto(*out) + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -2087,6 +2192,11 @@ func (in *RemoteRepositoryConfigParameters) DeepCopyInto(out *RemoteRepositoryCo *out = new(AptRepositoryParameters) (*in).DeepCopyInto(*out) } + if in.CommonRepository != nil { + in, out := &in.CommonRepository, &out.CommonRepository + *out = new(CommonRepositoryParameters) + (*in).DeepCopyInto(*out) + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -2470,6 +2580,76 @@ func (in *VirtualRepositoryConfigParameters) DeepCopy() *VirtualRepositoryConfig return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VulnerabilityScanningConfigInitParameters) DeepCopyInto(out *VulnerabilityScanningConfigInitParameters) { + *out = *in + if in.EnablementConfig != nil { + in, out := &in.EnablementConfig, &out.EnablementConfig + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VulnerabilityScanningConfigInitParameters. +func (in *VulnerabilityScanningConfigInitParameters) DeepCopy() *VulnerabilityScanningConfigInitParameters { + if in == nil { + return nil + } + out := new(VulnerabilityScanningConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VulnerabilityScanningConfigObservation) DeepCopyInto(out *VulnerabilityScanningConfigObservation) { + *out = *in + if in.EnablementConfig != nil { + in, out := &in.EnablementConfig, &out.EnablementConfig + *out = new(string) + **out = **in + } + if in.EnablementState != nil { + in, out := &in.EnablementState, &out.EnablementState + *out = new(string) + **out = **in + } + if in.EnablementStateReason != nil { + in, out := &in.EnablementStateReason, &out.EnablementStateReason + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VulnerabilityScanningConfigObservation. +func (in *VulnerabilityScanningConfigObservation) DeepCopy() *VulnerabilityScanningConfigObservation { + if in == nil { + return nil + } + out := new(VulnerabilityScanningConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VulnerabilityScanningConfigParameters) DeepCopyInto(out *VulnerabilityScanningConfigParameters) { + *out = *in + if in.EnablementConfig != nil { + in, out := &in.EnablementConfig, &out.EnablementConfig + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VulnerabilityScanningConfigParameters. +func (in *VulnerabilityScanningConfigParameters) DeepCopy() *VulnerabilityScanningConfigParameters { + if in == nil { + return nil + } + out := new(VulnerabilityScanningConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *YumRepositoryInitParameters) DeepCopyInto(out *YumRepositoryInitParameters) { *out = *in diff --git a/apis/artifact/v1beta2/zz_generated.resolvers.go b/apis/artifact/v1beta2/zz_generated.resolvers.go index 2f780f043..a08a16e03 100644 --- a/apis/artifact/v1beta2/zz_generated.resolvers.go +++ b/apis/artifact/v1beta2/zz_generated.resolvers.go @@ -26,6 +26,29 @@ func (mg *RegistryRepository) ResolveReferences( // ResolveReferences of this Re var rsp reference.ResolutionResponse var err error + if mg.Spec.ForProvider.RemoteRepositoryConfig != nil { + if mg.Spec.ForProvider.RemoteRepositoryConfig.CommonRepository != nil { + { + m, l, err = apisresolver.GetManagedResource("artifact.gcp.upbound.io", "v1beta2", "RegistryRepository", "RegistryRepositoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RemoteRepositoryConfig.CommonRepository.URI), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RemoteRepositoryConfig.CommonRepository.URIRef, + Selector: mg.Spec.ForProvider.RemoteRepositoryConfig.CommonRepository.URISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RemoteRepositoryConfig.CommonRepository.URI") + } + mg.Spec.ForProvider.RemoteRepositoryConfig.CommonRepository.URI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RemoteRepositoryConfig.CommonRepository.URIRef = rsp.ResolvedReference + + } + } if mg.Spec.ForProvider.RemoteRepositoryConfig != nil { if mg.Spec.ForProvider.RemoteRepositoryConfig.UpstreamCredentials != nil { if mg.Spec.ForProvider.RemoteRepositoryConfig.UpstreamCredentials.UsernamePasswordCredentials != nil { @@ -74,6 +97,29 @@ func (mg *RegistryRepository) ResolveReferences( // ResolveReferences of this Re } } + if mg.Spec.InitProvider.RemoteRepositoryConfig != nil { + if mg.Spec.InitProvider.RemoteRepositoryConfig.CommonRepository != nil { + { + m, l, err = apisresolver.GetManagedResource("artifact.gcp.upbound.io", "v1beta2", "RegistryRepository", "RegistryRepositoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RemoteRepositoryConfig.CommonRepository.URI), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RemoteRepositoryConfig.CommonRepository.URIRef, + Selector: mg.Spec.InitProvider.RemoteRepositoryConfig.CommonRepository.URISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RemoteRepositoryConfig.CommonRepository.URI") + } + mg.Spec.InitProvider.RemoteRepositoryConfig.CommonRepository.URI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RemoteRepositoryConfig.CommonRepository.URIRef = rsp.ResolvedReference + + } + } if mg.Spec.InitProvider.RemoteRepositoryConfig != nil { if mg.Spec.InitProvider.RemoteRepositoryConfig.UpstreamCredentials != nil { if mg.Spec.InitProvider.RemoteRepositoryConfig.UpstreamCredentials.UsernamePasswordCredentials != nil { diff --git a/apis/artifact/v1beta2/zz_registryrepository_types.go b/apis/artifact/v1beta2/zz_registryrepository_types.go index 8a451e2e5..bed4341c6 100755 --- a/apis/artifact/v1beta2/zz_registryrepository_types.go +++ b/apis/artifact/v1beta2/zz_registryrepository_types.go @@ -96,6 +96,45 @@ type CleanupPoliciesParameters struct { MostRecentVersions *MostRecentVersionsParameters `json:"mostRecentVersions,omitempty" tf:"most_recent_versions,omitempty"` } +type CommonRepositoryInitParameters struct { + + // Specific uri to the registry, e.g. "https://registry-1.docker.io" + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/artifact/v1beta2.RegistryRepository + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Reference to a RegistryRepository in artifact to populate uri. + // +kubebuilder:validation:Optional + URIRef *v1.Reference `json:"uriRef,omitempty" tf:"-"` + + // Selector for a RegistryRepository in artifact to populate uri. + // +kubebuilder:validation:Optional + URISelector *v1.Selector `json:"uriSelector,omitempty" tf:"-"` +} + +type CommonRepositoryObservation struct { + + // Specific uri to the registry, e.g. "https://registry-1.docker.io" + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type CommonRepositoryParameters struct { + + // Specific uri to the registry, e.g. "https://registry-1.docker.io" + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/artifact/v1beta2.RegistryRepository + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Reference to a RegistryRepository in artifact to populate uri. + // +kubebuilder:validation:Optional + URIRef *v1.Reference `json:"uriRef,omitempty" tf:"-"` + + // Selector for a RegistryRepository in artifact to populate uri. + // +kubebuilder:validation:Optional + URISelector *v1.Selector `json:"uriSelector,omitempty" tf:"-"` +} + type ConditionInitParameters struct { // Match versions newer than a duration. @@ -211,7 +250,7 @@ type DockerConfigParameters struct { type DockerRepositoryInitParameters struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. CustomRepository *CustomRepositoryInitParameters `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -222,7 +261,7 @@ type DockerRepositoryInitParameters struct { type DockerRepositoryObservation struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. CustomRepository *CustomRepositoryObservation `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -233,7 +272,7 @@ type DockerRepositoryObservation struct { type DockerRepositoryParameters struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. // +kubebuilder:validation:Optional CustomRepository *CustomRepositoryParameters `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -303,7 +342,7 @@ type MavenRepositoryCustomRepositoryParameters struct { type MavenRepositoryInitParameters struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. CustomRepository *MavenRepositoryCustomRepositoryInitParameters `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -314,7 +353,7 @@ type MavenRepositoryInitParameters struct { type MavenRepositoryObservation struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. CustomRepository *MavenRepositoryCustomRepositoryObservation `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -325,7 +364,7 @@ type MavenRepositoryObservation struct { type MavenRepositoryParameters struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. // +kubebuilder:validation:Optional CustomRepository *MavenRepositoryCustomRepositoryParameters `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -386,7 +425,7 @@ type NpmRepositoryCustomRepositoryParameters struct { type NpmRepositoryInitParameters struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. CustomRepository *NpmRepositoryCustomRepositoryInitParameters `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -397,7 +436,7 @@ type NpmRepositoryInitParameters struct { type NpmRepositoryObservation struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. CustomRepository *NpmRepositoryCustomRepositoryObservation `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -408,7 +447,7 @@ type NpmRepositoryObservation struct { type NpmRepositoryParameters struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. // +kubebuilder:validation:Optional CustomRepository *NpmRepositoryCustomRepositoryParameters `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -472,7 +511,7 @@ type PythonRepositoryCustomRepositoryParameters struct { type PythonRepositoryInitParameters struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. CustomRepository *PythonRepositoryCustomRepositoryInitParameters `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -483,7 +522,7 @@ type PythonRepositoryInitParameters struct { type PythonRepositoryObservation struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. CustomRepository *PythonRepositoryCustomRepositoryObservation `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -494,7 +533,7 @@ type PythonRepositoryObservation struct { type PythonRepositoryParameters struct { - // Settings for a remote repository with a custom uri. + // [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. // Structure is documented below. // +kubebuilder:validation:Optional CustomRepository *PythonRepositoryCustomRepositoryParameters `json:"customRepository,omitempty" tf:"custom_repository,omitempty"` @@ -567,6 +606,10 @@ type RegistryRepositoryInitParameters struct { // Configuration specific for a Virtual Repository. // Structure is documented below. VirtualRepositoryConfig *VirtualRepositoryConfigInitParameters `json:"virtualRepositoryConfig,omitempty" tf:"virtual_repository_config,omitempty"` + + // Configuration for vulnerability scanning of artifacts stored in this repository. + // Structure is documented below. + VulnerabilityScanningConfig *VulnerabilityScanningConfigInitParameters `json:"vulnerabilityScanningConfig,omitempty" tf:"vulnerability_scanning_config,omitempty"` } type RegistryRepositoryObservation struct { @@ -619,7 +662,12 @@ type RegistryRepositoryObservation struct { // +mapType=granular Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - // The name of the location this repository is located in. + // The name of the repository's location. In addition to specific regions, + // special values for multi-region locations are asia, europe, and us. + // See here, + // or use the + // google_artifact_registry_locations + // data source for possible values. Location *string `json:"location,omitempty" tf:"location,omitempty"` // MavenRepositoryConfig is maven related repository details. @@ -656,6 +704,10 @@ type RegistryRepositoryObservation struct { // Configuration specific for a Virtual Repository. // Structure is documented below. VirtualRepositoryConfig *VirtualRepositoryConfigObservation `json:"virtualRepositoryConfig,omitempty" tf:"virtual_repository_config,omitempty"` + + // Configuration for vulnerability scanning of artifacts stored in this repository. + // Structure is documented below. + VulnerabilityScanningConfig *VulnerabilityScanningConfigObservation `json:"vulnerabilityScanningConfig,omitempty" tf:"vulnerability_scanning_config,omitempty"` } type RegistryRepositoryParameters struct { @@ -705,7 +757,12 @@ type RegistryRepositoryParameters struct { // +mapType=granular Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - // The name of the location this repository is located in. + // The name of the repository's location. In addition to specific regions, + // special values for multi-region locations are asia, europe, and us. + // See here, + // or use the + // google_artifact_registry_locations + // data source for possible values. // +kubebuilder:validation:Optional Location *string `json:"location,omitempty" tf:"location,omitempty"` @@ -736,6 +793,11 @@ type RegistryRepositoryParameters struct { // Structure is documented below. // +kubebuilder:validation:Optional VirtualRepositoryConfig *VirtualRepositoryConfigParameters `json:"virtualRepositoryConfig,omitempty" tf:"virtual_repository_config,omitempty"` + + // Configuration for vulnerability scanning of artifacts stored in this repository. + // Structure is documented below. + // +kubebuilder:validation:Optional + VulnerabilityScanningConfig *VulnerabilityScanningConfigParameters `json:"vulnerabilityScanningConfig,omitempty" tf:"vulnerability_scanning_config,omitempty"` } type RemoteRepositoryConfigInitParameters struct { @@ -744,6 +806,10 @@ type RemoteRepositoryConfigInitParameters struct { // Structure is documented below. AptRepository *AptRepositoryInitParameters `json:"aptRepository,omitempty" tf:"apt_repository,omitempty"` + // Specific settings for an Artifact Registory remote repository. + // Structure is documented below. + CommonRepository *CommonRepositoryInitParameters `json:"commonRepository,omitempty" tf:"common_repository,omitempty"` + // The description of the remote source. Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -782,6 +848,10 @@ type RemoteRepositoryConfigObservation struct { // Structure is documented below. AptRepository *AptRepositoryObservation `json:"aptRepository,omitempty" tf:"apt_repository,omitempty"` + // Specific settings for an Artifact Registory remote repository. + // Structure is documented below. + CommonRepository *CommonRepositoryObservation `json:"commonRepository,omitempty" tf:"common_repository,omitempty"` + // The description of the remote source. Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -821,6 +891,11 @@ type RemoteRepositoryConfigParameters struct { // +kubebuilder:validation:Optional AptRepository *AptRepositoryParameters `json:"aptRepository,omitempty" tf:"apt_repository,omitempty"` + // Specific settings for an Artifact Registory remote repository. + // Structure is documented below. + // +kubebuilder:validation:Optional + CommonRepository *CommonRepositoryParameters `json:"commonRepository,omitempty" tf:"common_repository,omitempty"` + // The description of the remote source. // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -1025,6 +1100,36 @@ type VirtualRepositoryConfigParameters struct { UpstreamPolicies []UpstreamPoliciesParameters `json:"upstreamPolicies,omitempty" tf:"upstream_policies,omitempty"` } +type VulnerabilityScanningConfigInitParameters struct { + + // This configures whether vulnerability scanning is automatically performed for artifacts pushed to this repository. + // Possible values are: INHERITED, DISABLED. + EnablementConfig *string `json:"enablementConfig,omitempty" tf:"enablement_config,omitempty"` +} + +type VulnerabilityScanningConfigObservation struct { + + // This configures whether vulnerability scanning is automatically performed for artifacts pushed to this repository. + // Possible values are: INHERITED, DISABLED. + EnablementConfig *string `json:"enablementConfig,omitempty" tf:"enablement_config,omitempty"` + + // (Output) + // This field returns whether scanning is active for this repository. + EnablementState *string `json:"enablementState,omitempty" tf:"enablement_state,omitempty"` + + // (Output) + // This provides an explanation for the state of scanning on this repository. + EnablementStateReason *string `json:"enablementStateReason,omitempty" tf:"enablement_state_reason,omitempty"` +} + +type VulnerabilityScanningConfigParameters struct { + + // This configures whether vulnerability scanning is automatically performed for artifacts pushed to this repository. + // Possible values are: INHERITED, DISABLED. + // +kubebuilder:validation:Optional + EnablementConfig *string `json:"enablementConfig,omitempty" tf:"enablement_config,omitempty"` +} + type YumRepositoryInitParameters struct { // One of the publicly available Yum repositories supported by Artifact Registry. diff --git a/apis/bigquery/v1beta1/zz_analyticshubdataexchange_types.go b/apis/bigquery/v1beta1/zz_analyticshubdataexchange_types.go index 116152bf0..fe10d7933 100755 --- a/apis/bigquery/v1beta1/zz_analyticshubdataexchange_types.go +++ b/apis/bigquery/v1beta1/zz_analyticshubdataexchange_types.go @@ -39,6 +39,11 @@ type AnalyticsHubDataExchangeInitParameters struct { // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // Configurable data sharing environment option for a data exchange. + // This field is required for data clean room exchanges. + // Structure is documented below. + SharingEnvironmentConfig *SharingEnvironmentConfigInitParameters `json:"sharingEnvironmentConfig,omitempty" tf:"sharing_environment_config,omitempty"` } type AnalyticsHubDataExchangeObservation struct { @@ -77,6 +82,11 @@ type AnalyticsHubDataExchangeObservation struct { // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // Configurable data sharing environment option for a data exchange. + // This field is required for data clean room exchanges. + // Structure is documented below. + SharingEnvironmentConfig *SharingEnvironmentConfigObservation `json:"sharingEnvironmentConfig,omitempty" tf:"sharing_environment_config,omitempty"` } type AnalyticsHubDataExchangeParameters struct { @@ -113,6 +123,59 @@ type AnalyticsHubDataExchangeParameters struct { // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // Configurable data sharing environment option for a data exchange. + // This field is required for data clean room exchanges. + // Structure is documented below. + // +kubebuilder:validation:Optional + SharingEnvironmentConfig *SharingEnvironmentConfigParameters `json:"sharingEnvironmentConfig,omitempty" tf:"sharing_environment_config,omitempty"` +} + +type DcrExchangeConfigInitParameters struct { +} + +type DcrExchangeConfigObservation struct { +} + +type DcrExchangeConfigParameters struct { +} + +type DefaultExchangeConfigInitParameters struct { +} + +type DefaultExchangeConfigObservation struct { +} + +type DefaultExchangeConfigParameters struct { +} + +type SharingEnvironmentConfigInitParameters struct { + + // Data Clean Room (DCR), used for privacy-safe and secured data sharing. + DcrExchangeConfig *DcrExchangeConfigInitParameters `json:"dcrExchangeConfig,omitempty" tf:"dcr_exchange_config,omitempty"` + + // Default Analytics Hub data exchange, used for secured data sharing. + DefaultExchangeConfig *DefaultExchangeConfigInitParameters `json:"defaultExchangeConfig,omitempty" tf:"default_exchange_config,omitempty"` +} + +type SharingEnvironmentConfigObservation struct { + + // Data Clean Room (DCR), used for privacy-safe and secured data sharing. + DcrExchangeConfig *DcrExchangeConfigParameters `json:"dcrExchangeConfig,omitempty" tf:"dcr_exchange_config,omitempty"` + + // Default Analytics Hub data exchange, used for secured data sharing. + DefaultExchangeConfig *DefaultExchangeConfigParameters `json:"defaultExchangeConfig,omitempty" tf:"default_exchange_config,omitempty"` +} + +type SharingEnvironmentConfigParameters struct { + + // Data Clean Room (DCR), used for privacy-safe and secured data sharing. + // +kubebuilder:validation:Optional + DcrExchangeConfig *DcrExchangeConfigParameters `json:"dcrExchangeConfig,omitempty" tf:"dcr_exchange_config,omitempty"` + + // Default Analytics Hub data exchange, used for secured data sharing. + // +kubebuilder:validation:Optional + DefaultExchangeConfig *DefaultExchangeConfigParameters `json:"defaultExchangeConfig,omitempty" tf:"default_exchange_config,omitempty"` } // AnalyticsHubDataExchangeSpec defines the desired state of AnalyticsHubDataExchange diff --git a/apis/bigquery/v1beta1/zz_generated.deepcopy.go b/apis/bigquery/v1beta1/zz_generated.deepcopy.go index a7fae3a3d..9e41e76b8 100644 --- a/apis/bigquery/v1beta1/zz_generated.deepcopy.go +++ b/apis/bigquery/v1beta1/zz_generated.deepcopy.go @@ -730,6 +730,11 @@ func (in *AnalyticsHubDataExchangeInitParameters) DeepCopyInto(out *AnalyticsHub *out = new(string) **out = **in } + if in.SharingEnvironmentConfig != nil { + in, out := &in.SharingEnvironmentConfig, &out.SharingEnvironmentConfig + *out = new(SharingEnvironmentConfigInitParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyticsHubDataExchangeInitParameters. @@ -832,6 +837,11 @@ func (in *AnalyticsHubDataExchangeObservation) DeepCopyInto(out *AnalyticsHubDat *out = new(string) **out = **in } + if in.SharingEnvironmentConfig != nil { + in, out := &in.SharingEnvironmentConfig, &out.SharingEnvironmentConfig + *out = new(SharingEnvironmentConfigObservation) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyticsHubDataExchangeObservation. @@ -887,6 +897,11 @@ func (in *AnalyticsHubDataExchangeParameters) DeepCopyInto(out *AnalyticsHubData *out = new(string) **out = **in } + if in.SharingEnvironmentConfig != nil { + in, out := &in.SharingEnvironmentConfig, &out.SharingEnvironmentConfig + *out = new(SharingEnvironmentConfigParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyticsHubDataExchangeParameters. @@ -6216,6 +6231,51 @@ func (in *DatasetStatus) DeepCopy() *DatasetStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DcrExchangeConfigInitParameters) DeepCopyInto(out *DcrExchangeConfigInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DcrExchangeConfigInitParameters. +func (in *DcrExchangeConfigInitParameters) DeepCopy() *DcrExchangeConfigInitParameters { + if in == nil { + return nil + } + out := new(DcrExchangeConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DcrExchangeConfigObservation) DeepCopyInto(out *DcrExchangeConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DcrExchangeConfigObservation. +func (in *DcrExchangeConfigObservation) DeepCopy() *DcrExchangeConfigObservation { + if in == nil { + return nil + } + out := new(DcrExchangeConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DcrExchangeConfigParameters) DeepCopyInto(out *DcrExchangeConfigParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DcrExchangeConfigParameters. +func (in *DcrExchangeConfigParameters) DeepCopy() *DcrExchangeConfigParameters { + if in == nil { + return nil + } + out := new(DcrExchangeConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DefaultDatasetInitParameters) DeepCopyInto(out *DefaultDatasetInitParameters) { *out = *in @@ -6391,6 +6451,51 @@ func (in *DefaultEncryptionConfigurationParameters) DeepCopy() *DefaultEncryptio return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultExchangeConfigInitParameters) DeepCopyInto(out *DefaultExchangeConfigInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultExchangeConfigInitParameters. +func (in *DefaultExchangeConfigInitParameters) DeepCopy() *DefaultExchangeConfigInitParameters { + if in == nil { + return nil + } + out := new(DefaultExchangeConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultExchangeConfigObservation) DeepCopyInto(out *DefaultExchangeConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultExchangeConfigObservation. +func (in *DefaultExchangeConfigObservation) DeepCopy() *DefaultExchangeConfigObservation { + if in == nil { + return nil + } + out := new(DefaultExchangeConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultExchangeConfigParameters) DeepCopyInto(out *DefaultExchangeConfigParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultExchangeConfigParameters. +func (in *DefaultExchangeConfigParameters) DeepCopy() *DefaultExchangeConfigParameters { + if in == nil { + return nil + } + out := new(DefaultExchangeConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DestinationEncryptionConfigurationInitParameters) DeepCopyInto(out *DestinationEncryptionConfigurationInitParameters) { *out = *in @@ -11646,6 +11751,81 @@ func (in *SensitiveParamsParameters) DeepCopy() *SensitiveParamsParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingEnvironmentConfigInitParameters) DeepCopyInto(out *SharingEnvironmentConfigInitParameters) { + *out = *in + if in.DcrExchangeConfig != nil { + in, out := &in.DcrExchangeConfig, &out.DcrExchangeConfig + *out = new(DcrExchangeConfigInitParameters) + **out = **in + } + if in.DefaultExchangeConfig != nil { + in, out := &in.DefaultExchangeConfig, &out.DefaultExchangeConfig + *out = new(DefaultExchangeConfigInitParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingEnvironmentConfigInitParameters. +func (in *SharingEnvironmentConfigInitParameters) DeepCopy() *SharingEnvironmentConfigInitParameters { + if in == nil { + return nil + } + out := new(SharingEnvironmentConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingEnvironmentConfigObservation) DeepCopyInto(out *SharingEnvironmentConfigObservation) { + *out = *in + if in.DcrExchangeConfig != nil { + in, out := &in.DcrExchangeConfig, &out.DcrExchangeConfig + *out = new(DcrExchangeConfigParameters) + **out = **in + } + if in.DefaultExchangeConfig != nil { + in, out := &in.DefaultExchangeConfig, &out.DefaultExchangeConfig + *out = new(DefaultExchangeConfigParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingEnvironmentConfigObservation. +func (in *SharingEnvironmentConfigObservation) DeepCopy() *SharingEnvironmentConfigObservation { + if in == nil { + return nil + } + out := new(SharingEnvironmentConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingEnvironmentConfigParameters) DeepCopyInto(out *SharingEnvironmentConfigParameters) { + *out = *in + if in.DcrExchangeConfig != nil { + in, out := &in.DcrExchangeConfig, &out.DcrExchangeConfig + *out = new(DcrExchangeConfigParameters) + **out = **in + } + if in.DefaultExchangeConfig != nil { + in, out := &in.DefaultExchangeConfig, &out.DefaultExchangeConfig + *out = new(DefaultExchangeConfigParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingEnvironmentConfigParameters. +func (in *SharingEnvironmentConfigParameters) DeepCopy() *SharingEnvironmentConfigParameters { + if in == nil { + return nil + } + out := new(SharingEnvironmentConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SourceModelInitParameters) DeepCopyInto(out *SourceModelInitParameters) { *out = *in diff --git a/apis/bigquery/v1beta2/zz_analyticshublisting_types.go b/apis/bigquery/v1beta2/zz_analyticshublisting_types.go index 6a4161107..2ee7cedaf 100755 --- a/apis/bigquery/v1beta2/zz_analyticshublisting_types.go +++ b/apis/bigquery/v1beta2/zz_analyticshublisting_types.go @@ -38,7 +38,7 @@ type AnalyticsHubListingInitParameters struct { // Base64 encoded image representing the listing. Icon *string `json:"icon,omitempty" tf:"icon,omitempty"` - // Email or URL of the listing publisher. + // Email or URL of the primary point of contact of the listing. PrimaryContact *string `json:"primaryContact,omitempty" tf:"primary_contact,omitempty"` // The ID of the project in which the resource belongs. @@ -94,7 +94,7 @@ type AnalyticsHubListingObservation struct { // The resource name of the listing. e.g. "projects/myproject/locations/US/dataExchanges/123/listings/456" Name *string `json:"name,omitempty" tf:"name,omitempty"` - // Email or URL of the listing publisher. + // Email or URL of the primary point of contact of the listing. PrimaryContact *string `json:"primaryContact,omitempty" tf:"primary_contact,omitempty"` // The ID of the project in which the resource belongs. @@ -163,7 +163,7 @@ type AnalyticsHubListingParameters struct { // +kubebuilder:validation:Required Location *string `json:"location" tf:"location,omitempty"` - // Email or URL of the listing publisher. + // Email or URL of the primary point of contact of the listing. // +kubebuilder:validation:Optional PrimaryContact *string `json:"primaryContact,omitempty" tf:"primary_contact,omitempty"` @@ -201,12 +201,20 @@ type BigqueryDatasetInitParameters struct { // Selector for a Dataset in bigquery to populate dataset. // +kubebuilder:validation:Optional DatasetSelector *v1.Selector `json:"datasetSelector,omitempty" tf:"-"` + + // Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + // Structure is documented below. + SelectedResources []SelectedResourcesInitParameters `json:"selectedResources,omitempty" tf:"selected_resources,omitempty"` } type BigqueryDatasetObservation struct { // Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 Dataset *string `json:"dataset,omitempty" tf:"dataset,omitempty"` + + // Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + // Structure is documented below. + SelectedResources []SelectedResourcesObservation `json:"selectedResources,omitempty" tf:"selected_resources,omitempty"` } type BigqueryDatasetParameters struct { @@ -224,6 +232,11 @@ type BigqueryDatasetParameters struct { // Selector for a Dataset in bigquery to populate dataset. // +kubebuilder:validation:Optional DatasetSelector *v1.Selector `json:"datasetSelector,omitempty" tf:"-"` + + // Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + // Structure is documented below. + // +kubebuilder:validation:Optional + SelectedResources []SelectedResourcesParameters `json:"selectedResources,omitempty" tf:"selected_resources,omitempty"` } type DataProviderInitParameters struct { @@ -298,6 +311,10 @@ type RestrictedExportConfigObservation struct { // If true, enable restricted export. Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Output) + // If true, restrict direct table access(read api/tabledata.list) on linked table. + RestrictDirectTableAccess *bool `json:"restrictDirectTableAccess,omitempty" tf:"restrict_direct_table_access,omitempty"` + // If true, restrict export of query result derived from restricted linked dataset table. RestrictQueryResult *bool `json:"restrictQueryResult,omitempty" tf:"restrict_query_result,omitempty"` } @@ -313,6 +330,45 @@ type RestrictedExportConfigParameters struct { RestrictQueryResult *bool `json:"restrictQueryResult,omitempty" tf:"restrict_query_result,omitempty"` } +type SelectedResourcesInitParameters struct { + + // Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Table + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Table *string `json:"table,omitempty" tf:"table,omitempty"` + + // Reference to a Table in bigquery to populate table. + // +kubebuilder:validation:Optional + TableRef *v1.Reference `json:"tableRef,omitempty" tf:"-"` + + // Selector for a Table in bigquery to populate table. + // +kubebuilder:validation:Optional + TableSelector *v1.Selector `json:"tableSelector,omitempty" tf:"-"` +} + +type SelectedResourcesObservation struct { + + // Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + Table *string `json:"table,omitempty" tf:"table,omitempty"` +} + +type SelectedResourcesParameters struct { + + // Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Table + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Table *string `json:"table,omitempty" tf:"table,omitempty"` + + // Reference to a Table in bigquery to populate table. + // +kubebuilder:validation:Optional + TableRef *v1.Reference `json:"tableRef,omitempty" tf:"-"` + + // Selector for a Table in bigquery to populate table. + // +kubebuilder:validation:Optional + TableSelector *v1.Selector `json:"tableSelector,omitempty" tf:"-"` +} + // AnalyticsHubListingSpec defines the desired state of AnalyticsHubListing type AnalyticsHubListingSpec struct { v1.ResourceSpec `json:",inline"` diff --git a/apis/bigquery/v1beta2/zz_dataset_types.go b/apis/bigquery/v1beta2/zz_dataset_types.go index 7e1b2d88c..48855bb5e 100755 --- a/apis/bigquery/v1beta2/zz_dataset_types.go +++ b/apis/bigquery/v1beta2/zz_dataset_types.go @@ -13,6 +13,79 @@ import ( v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" ) +type AccessConditionInitParameters struct { + + // A user-friendly description of the dataset + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Textual representation of an expression in Common Expression Language syntax. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // The geographic location where the dataset should reside. + // See official docs. + // There are two types of locations, regional or multi-regional. A regional + // location is a specific geographic place, such as Tokyo, and a multi-regional + // location is a large geographic area, such as the United States, that + // contains at least two geographic places. + // The default value is multi-regional location US. + // Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Title for the expression, i.e. a short string describing its purpose. + // This can be used e.g. in UIs which allow to enter the expression. + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type AccessConditionObservation struct { + + // A user-friendly description of the dataset + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Textual representation of an expression in Common Expression Language syntax. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // The geographic location where the dataset should reside. + // See official docs. + // There are two types of locations, regional or multi-regional. A regional + // location is a specific geographic place, such as Tokyo, and a multi-regional + // location is a large geographic area, such as the United States, that + // contains at least two geographic places. + // The default value is multi-regional location US. + // Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Title for the expression, i.e. a short string describing its purpose. + // This can be used e.g. in UIs which allow to enter the expression. + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type AccessConditionParameters struct { + + // A user-friendly description of the dataset + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Textual representation of an expression in Common Expression Language syntax. + // +kubebuilder:validation:Optional + Expression *string `json:"expression" tf:"expression,omitempty"` + + // The geographic location where the dataset should reside. + // See official docs. + // There are two types of locations, regional or multi-regional. A regional + // location is a specific geographic place, such as Tokyo, and a multi-regional + // location is a large geographic area, such as the United States, that + // contains at least two geographic places. + // The default value is multi-regional location US. + // Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Title for the expression, i.e. a short string describing its purpose. + // This can be used e.g. in UIs which allow to enter the expression. + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + type AccessDatasetInitParameters struct { // The dataset this entry applies to @@ -50,6 +123,11 @@ type AccessDatasetParameters struct { type AccessInitParameters struct { + // Condition for the binding. If CEL expression in this field is true, this + // access binding will be considered. + // Structure is documented below. + Condition *AccessConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + // Grants all resources of particular types in a particular dataset read access to the current dataset. // Structure is documented below. Dataset *AccessDatasetInitParameters `json:"dataset,omitempty" tf:"dataset,omitempty"` @@ -108,6 +186,11 @@ type AccessInitParameters struct { type AccessObservation struct { + // Condition for the binding. If CEL expression in this field is true, this + // access binding will be considered. + // Structure is documented below. + Condition *AccessConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + // Grants all resources of particular types in a particular dataset read access to the current dataset. // Structure is documented below. Dataset *AccessDatasetObservation `json:"dataset,omitempty" tf:"dataset,omitempty"` @@ -156,6 +239,12 @@ type AccessObservation struct { type AccessParameters struct { + // Condition for the binding. If CEL expression in this field is true, this + // access binding will be considered. + // Structure is documented below. + // +kubebuilder:validation:Optional + Condition *AccessConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + // Grants all resources of particular types in a particular dataset read access to the current dataset. // Structure is documented below. // +kubebuilder:validation:Optional diff --git a/apis/bigquery/v1beta2/zz_datasetaccess_types.go b/apis/bigquery/v1beta2/zz_datasetaccess_types.go index 2971b7567..24245c4c5 100755 --- a/apis/bigquery/v1beta2/zz_datasetaccess_types.go +++ b/apis/bigquery/v1beta2/zz_datasetaccess_types.go @@ -13,6 +13,64 @@ import ( v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" ) +type DatasetAccessConditionInitParameters struct { + + // Description of the expression. This is a longer text which describes the expression, + // e.g. when hovered over it in a UI. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Textual representation of an expression in Common Expression Language syntax. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // String indicating the location of the expression for error reporting, e.g. a file + // name and a position in the file. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Title for the expression, i.e. a short string describing its purpose. + // This can be used e.g. in UIs which allow to enter the expression. + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type DatasetAccessConditionObservation struct { + + // Description of the expression. This is a longer text which describes the expression, + // e.g. when hovered over it in a UI. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Textual representation of an expression in Common Expression Language syntax. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // String indicating the location of the expression for error reporting, e.g. a file + // name and a position in the file. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Title for the expression, i.e. a short string describing its purpose. + // This can be used e.g. in UIs which allow to enter the expression. + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type DatasetAccessConditionParameters struct { + + // Description of the expression. This is a longer text which describes the expression, + // e.g. when hovered over it in a UI. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Textual representation of an expression in Common Expression Language syntax. + // +kubebuilder:validation:Optional + Expression *string `json:"expression" tf:"expression,omitempty"` + + // String indicating the location of the expression for error reporting, e.g. a file + // name and a position in the file. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Title for the expression, i.e. a short string describing its purpose. + // This can be used e.g. in UIs which allow to enter the expression. + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + type DatasetAccessDatasetDatasetInitParameters struct { // The ID of the dataset containing this table. @@ -97,6 +155,11 @@ type DatasetAccessDatasetParameters struct { type DatasetAccessInitParameters struct { + // Condition for the binding. If CEL expression in this field is true, this + // access binding will be considered. + // Structure is documented below. + Condition *DatasetAccessConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + // Grants all resources of particular types in a particular dataset read access to the current dataset. // Structure is documented below. Dataset *DatasetAccessDatasetInitParameters `json:"dataset,omitempty" tf:"dataset,omitempty"` @@ -175,6 +238,11 @@ type DatasetAccessInitParameters struct { type DatasetAccessObservation struct { APIUpdatedMember *bool `json:"apiUpdatedMember,omitempty" tf:"api_updated_member,omitempty"` + // Condition for the binding. If CEL expression in this field is true, this + // access binding will be considered. + // Structure is documented below. + Condition *DatasetAccessConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + // Grants all resources of particular types in a particular dataset read access to the current dataset. // Structure is documented below. Dataset *DatasetAccessDatasetObservation `json:"dataset,omitempty" tf:"dataset,omitempty"` @@ -236,6 +304,12 @@ type DatasetAccessObservation struct { type DatasetAccessParameters struct { + // Condition for the binding. If CEL expression in this field is true, this + // access binding will be considered. + // Structure is documented below. + // +kubebuilder:validation:Optional + Condition *DatasetAccessConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + // Grants all resources of particular types in a particular dataset read access to the current dataset. // Structure is documented below. // +kubebuilder:validation:Optional diff --git a/apis/bigquery/v1beta2/zz_datatransferconfig_types.go b/apis/bigquery/v1beta2/zz_datatransferconfig_types.go index 349bc48a2..3ca6bbcdf 100755 --- a/apis/bigquery/v1beta2/zz_datatransferconfig_types.go +++ b/apis/bigquery/v1beta2/zz_datatransferconfig_types.go @@ -48,6 +48,10 @@ type DataTransferConfigInitParameters struct { // Structure is documented below. EmailPreferences *EmailPreferencesInitParameters `json:"emailPreferences,omitempty" tf:"email_preferences,omitempty"` + // Represents the encryption configuration for a transfer. + // Structure is documented below. + EncryptionConfiguration *EncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + // The geographic location where the transfer config should reside. // Examples: US, EU, asia-northeast1. The default value is US. Location *string `json:"location,omitempty" tf:"location,omitempty"` @@ -123,6 +127,10 @@ type DataTransferConfigObservation struct { // Structure is documented below. EmailPreferences *EmailPreferencesObservation `json:"emailPreferences,omitempty" tf:"email_preferences,omitempty"` + // Represents the encryption configuration for a transfer. + // Structure is documented below. + EncryptionConfiguration *EncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + // an identifier for the resource with format {{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` @@ -223,6 +231,11 @@ type DataTransferConfigParameters struct { // +kubebuilder:validation:Optional EmailPreferences *EmailPreferencesParameters `json:"emailPreferences,omitempty" tf:"email_preferences,omitempty"` + // Represents the encryption configuration for a transfer. + // Structure is documented below. + // +kubebuilder:validation:Optional + EncryptionConfiguration *EncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + // The geographic location where the transfer config should reside. // Examples: US, EU, asia-northeast1. The default value is US. // +kubebuilder:validation:Optional @@ -299,6 +312,45 @@ type EmailPreferencesParameters struct { EnableFailureEmail *bool `json:"enableFailureEmail" tf:"enable_failure_email,omitempty"` } +type EncryptionConfigurationInitParameters struct { + + // The name of the KMS key used for encrypting BigQuery data. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/kms/v1beta2.CryptoKey + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` + + // Reference to a CryptoKey in kms to populate kmsKeyName. + // +kubebuilder:validation:Optional + KMSKeyNameRef *v1.Reference `json:"kmsKeyNameRef,omitempty" tf:"-"` + + // Selector for a CryptoKey in kms to populate kmsKeyName. + // +kubebuilder:validation:Optional + KMSKeyNameSelector *v1.Selector `json:"kmsKeyNameSelector,omitempty" tf:"-"` +} + +type EncryptionConfigurationObservation struct { + + // The name of the KMS key used for encrypting BigQuery data. + KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` +} + +type EncryptionConfigurationParameters struct { + + // The name of the KMS key used for encrypting BigQuery data. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/kms/v1beta2.CryptoKey + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` + + // Reference to a CryptoKey in kms to populate kmsKeyName. + // +kubebuilder:validation:Optional + KMSKeyNameRef *v1.Reference `json:"kmsKeyNameRef,omitempty" tf:"-"` + + // Selector for a CryptoKey in kms to populate kmsKeyName. + // +kubebuilder:validation:Optional + KMSKeyNameSelector *v1.Selector `json:"kmsKeyNameSelector,omitempty" tf:"-"` +} + type ScheduleOptionsInitParameters struct { // If true, automatic scheduling of data transfer runs for this diff --git a/apis/bigquery/v1beta2/zz_generated.deepcopy.go b/apis/bigquery/v1beta2/zz_generated.deepcopy.go index 808e3387b..fdca51d7d 100644 --- a/apis/bigquery/v1beta2/zz_generated.deepcopy.go +++ b/apis/bigquery/v1beta2/zz_generated.deepcopy.go @@ -13,6 +13,111 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConditionInitParameters) DeepCopyInto(out *AccessConditionInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConditionInitParameters. +func (in *AccessConditionInitParameters) DeepCopy() *AccessConditionInitParameters { + if in == nil { + return nil + } + out := new(AccessConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConditionObservation) DeepCopyInto(out *AccessConditionObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConditionObservation. +func (in *AccessConditionObservation) DeepCopy() *AccessConditionObservation { + if in == nil { + return nil + } + out := new(AccessConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConditionParameters) DeepCopyInto(out *AccessConditionParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConditionParameters. +func (in *AccessConditionParameters) DeepCopy() *AccessConditionParameters { + if in == nil { + return nil + } + out := new(AccessConditionParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AccessDatasetInitParameters) DeepCopyInto(out *AccessDatasetInitParameters) { *out = *in @@ -109,6 +214,11 @@ func (in *AccessDatasetParameters) DeepCopy() *AccessDatasetParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AccessInitParameters) DeepCopyInto(out *AccessInitParameters) { *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(AccessConditionInitParameters) + (*in).DeepCopyInto(*out) + } if in.Dataset != nil { in, out := &in.Dataset, &out.Dataset *out = new(AccessDatasetInitParameters) @@ -179,6 +289,11 @@ func (in *AccessInitParameters) DeepCopy() *AccessInitParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AccessObservation) DeepCopyInto(out *AccessObservation) { *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(AccessConditionObservation) + (*in).DeepCopyInto(*out) + } if in.Dataset != nil { in, out := &in.Dataset, &out.Dataset *out = new(AccessDatasetObservation) @@ -239,6 +354,11 @@ func (in *AccessObservation) DeepCopy() *AccessObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AccessParameters) DeepCopyInto(out *AccessParameters) { *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(AccessConditionParameters) + (*in).DeepCopyInto(*out) + } if in.Dataset != nil { in, out := &in.Dataset, &out.Dataset *out = new(AccessDatasetParameters) @@ -1397,6 +1517,111 @@ func (in *AzureParameters) DeepCopy() *AzureParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BiglakeConfigurationInitParameters) DeepCopyInto(out *BiglakeConfigurationInitParameters) { + *out = *in + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.FileFormat != nil { + in, out := &in.FileFormat, &out.FileFormat + *out = new(string) + **out = **in + } + if in.StorageURI != nil { + in, out := &in.StorageURI, &out.StorageURI + *out = new(string) + **out = **in + } + if in.TableFormat != nil { + in, out := &in.TableFormat, &out.TableFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BiglakeConfigurationInitParameters. +func (in *BiglakeConfigurationInitParameters) DeepCopy() *BiglakeConfigurationInitParameters { + if in == nil { + return nil + } + out := new(BiglakeConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BiglakeConfigurationObservation) DeepCopyInto(out *BiglakeConfigurationObservation) { + *out = *in + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.FileFormat != nil { + in, out := &in.FileFormat, &out.FileFormat + *out = new(string) + **out = **in + } + if in.StorageURI != nil { + in, out := &in.StorageURI, &out.StorageURI + *out = new(string) + **out = **in + } + if in.TableFormat != nil { + in, out := &in.TableFormat, &out.TableFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BiglakeConfigurationObservation. +func (in *BiglakeConfigurationObservation) DeepCopy() *BiglakeConfigurationObservation { + if in == nil { + return nil + } + out := new(BiglakeConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BiglakeConfigurationParameters) DeepCopyInto(out *BiglakeConfigurationParameters) { + *out = *in + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.FileFormat != nil { + in, out := &in.FileFormat, &out.FileFormat + *out = new(string) + **out = **in + } + if in.StorageURI != nil { + in, out := &in.StorageURI, &out.StorageURI + *out = new(string) + **out = **in + } + if in.TableFormat != nil { + in, out := &in.TableFormat, &out.TableFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BiglakeConfigurationParameters. +func (in *BiglakeConfigurationParameters) DeepCopy() *BiglakeConfigurationParameters { + if in == nil { + return nil + } + out := new(BiglakeConfigurationParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BigqueryDatasetInitParameters) DeepCopyInto(out *BigqueryDatasetInitParameters) { *out = *in @@ -1415,6 +1640,13 @@ func (in *BigqueryDatasetInitParameters) DeepCopyInto(out *BigqueryDatasetInitPa *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.SelectedResources != nil { + in, out := &in.SelectedResources, &out.SelectedResources + *out = make([]SelectedResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BigqueryDatasetInitParameters. @@ -1435,6 +1667,13 @@ func (in *BigqueryDatasetObservation) DeepCopyInto(out *BigqueryDatasetObservati *out = new(string) **out = **in } + if in.SelectedResources != nil { + in, out := &in.SelectedResources, &out.SelectedResources + *out = make([]SelectedResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BigqueryDatasetObservation. @@ -1465,6 +1704,13 @@ func (in *BigqueryDatasetParameters) DeepCopyInto(out *BigqueryDatasetParameters *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.SelectedResources != nil { + in, out := &in.SelectedResources, &out.SelectedResources + *out = make([]SelectedResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BigqueryDatasetParameters. @@ -3171,6 +3417,11 @@ func (in *DataTransferConfigInitParameters) DeepCopyInto(out *DataTransferConfig *out = new(EmailPreferencesInitParameters) (*in).DeepCopyInto(*out) } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } if in.Location != nil { in, out := &in.Location, &out.Location *out = new(string) @@ -3299,6 +3550,11 @@ func (in *DataTransferConfigObservation) DeepCopyInto(out *DataTransferConfigObs *out = new(EmailPreferencesObservation) (*in).DeepCopyInto(*out) } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } if in.ID != nil { in, out := &in.ID, &out.ID *out = new(string) @@ -3415,6 +3671,11 @@ func (in *DataTransferConfigParameters) DeepCopyInto(out *DataTransferConfigPara *out = new(EmailPreferencesParameters) (*in).DeepCopyInto(*out) } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } if in.Location != nil { in, out := &in.Location, &out.Location *out = new(string) @@ -3567,6 +3828,111 @@ func (in *DatasetAccess) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasetAccessConditionInitParameters) DeepCopyInto(out *DatasetAccessConditionInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasetAccessConditionInitParameters. +func (in *DatasetAccessConditionInitParameters) DeepCopy() *DatasetAccessConditionInitParameters { + if in == nil { + return nil + } + out := new(DatasetAccessConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasetAccessConditionObservation) DeepCopyInto(out *DatasetAccessConditionObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasetAccessConditionObservation. +func (in *DatasetAccessConditionObservation) DeepCopy() *DatasetAccessConditionObservation { + if in == nil { + return nil + } + out := new(DatasetAccessConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasetAccessConditionParameters) DeepCopyInto(out *DatasetAccessConditionParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasetAccessConditionParameters. +func (in *DatasetAccessConditionParameters) DeepCopy() *DatasetAccessConditionParameters { + if in == nil { + return nil + } + out := new(DatasetAccessConditionParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatasetAccessDatasetDatasetInitParameters) DeepCopyInto(out *DatasetAccessDatasetDatasetInitParameters) { *out = *in @@ -3758,6 +4124,11 @@ func (in *DatasetAccessDatasetParameters) DeepCopy() *DatasetAccessDatasetParame // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatasetAccessInitParameters) DeepCopyInto(out *DatasetAccessInitParameters) { *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(DatasetAccessConditionInitParameters) + (*in).DeepCopyInto(*out) + } if in.Dataset != nil { in, out := &in.Dataset, &out.Dataset *out = new(DatasetAccessDatasetInitParameters) @@ -3885,6 +4256,11 @@ func (in *DatasetAccessObservation) DeepCopyInto(out *DatasetAccessObservation) *out = new(bool) **out = **in } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(DatasetAccessConditionObservation) + (*in).DeepCopyInto(*out) + } if in.Dataset != nil { in, out := &in.Dataset, &out.Dataset *out = new(DatasetAccessDatasetObservation) @@ -3960,6 +4336,11 @@ func (in *DatasetAccessObservation) DeepCopy() *DatasetAccessObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatasetAccessParameters) DeepCopyInto(out *DatasetAccessParameters) { *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(DatasetAccessConditionParameters) + (*in).DeepCopyInto(*out) + } if in.Dataset != nil { in, out := &in.Dataset, &out.Dataset *out = new(DatasetAccessDatasetParameters) @@ -5757,16 +6138,6 @@ func (in *DestinationEncryptionConfigurationInitParameters) DeepCopyInto(out *De *out = new(string) **out = **in } - if in.KMSKeyNameRef != nil { - in, out := &in.KMSKeyNameRef, &out.KMSKeyNameRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.KMSKeyNameSelector != nil { - in, out := &in.KMSKeyNameSelector, &out.KMSKeyNameSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationEncryptionConfigurationInitParameters. @@ -5812,16 +6183,6 @@ func (in *DestinationEncryptionConfigurationParameters) DeepCopyInto(out *Destin *out = new(string) **out = **in } - if in.KMSKeyNameRef != nil { - in, out := &in.KMSKeyNameRef, &out.KMSKeyNameRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.KMSKeyNameSelector != nil { - in, out := &in.KMSKeyNameSelector, &out.KMSKeyNameSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationEncryptionConfigurationParameters. @@ -6032,6 +6393,16 @@ func (in *EncryptionConfigurationInitParameters) DeepCopyInto(out *EncryptionCon *out = new(string) **out = **in } + if in.KMSKeyNameRef != nil { + in, out := &in.KMSKeyNameRef, &out.KMSKeyNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyNameSelector != nil { + in, out := &in.KMSKeyNameSelector, &out.KMSKeyNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationInitParameters. @@ -6052,11 +6423,6 @@ func (in *EncryptionConfigurationObservation) DeepCopyInto(out *EncryptionConfig *out = new(string) **out = **in } - if in.KMSKeyVersion != nil { - in, out := &in.KMSKeyVersion, &out.KMSKeyVersion - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationObservation. @@ -6077,6 +6443,16 @@ func (in *EncryptionConfigurationParameters) DeepCopyInto(out *EncryptionConfigu *out = new(string) **out = **in } + if in.KMSKeyNameRef != nil { + in, out := &in.KMSKeyNameRef, &out.KMSKeyNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyNameSelector != nil { + in, out := &in.KMSKeyNameSelector, &out.KMSKeyNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationParameters. @@ -9560,11 +9936,6 @@ func (in *ReservationInitParameters) DeepCopyInto(out *ReservationInitParameters *out = new(bool) **out = **in } - if in.MultiRegionAuxiliary != nil { - in, out := &in.MultiRegionAuxiliary, &out.MultiRegionAuxiliary - *out = new(bool) - **out = **in - } if in.SlotCapacity != nil { in, out := &in.SlotCapacity, &out.SlotCapacity *out = new(float64) @@ -9647,11 +10018,6 @@ func (in *ReservationObservation) DeepCopyInto(out *ReservationObservation) { *out = new(string) **out = **in } - if in.MultiRegionAuxiliary != nil { - in, out := &in.MultiRegionAuxiliary, &out.MultiRegionAuxiliary - *out = new(bool) - **out = **in - } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -9702,11 +10068,6 @@ func (in *ReservationParameters) DeepCopyInto(out *ReservationParameters) { *out = new(string) **out = **in } - if in.MultiRegionAuxiliary != nil { - in, out := &in.MultiRegionAuxiliary, &out.MultiRegionAuxiliary - *out = new(bool) - **out = **in - } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -9797,6 +10158,11 @@ func (in *RestrictedExportConfigObservation) DeepCopyInto(out *RestrictedExportC *out = new(bool) **out = **in } + if in.RestrictDirectTableAccess != nil { + in, out := &in.RestrictDirectTableAccess, &out.RestrictDirectTableAccess + *out = new(bool) + **out = **in + } if in.RestrictQueryResult != nil { in, out := &in.RestrictQueryResult, &out.RestrictQueryResult *out = new(bool) @@ -10562,6 +10928,86 @@ func (in *ScriptOptionsParameters) DeepCopy() *ScriptOptionsParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectedResourcesInitParameters) DeepCopyInto(out *SelectedResourcesInitParameters) { + *out = *in + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } + if in.TableRef != nil { + in, out := &in.TableRef, &out.TableRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TableSelector != nil { + in, out := &in.TableSelector, &out.TableSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectedResourcesInitParameters. +func (in *SelectedResourcesInitParameters) DeepCopy() *SelectedResourcesInitParameters { + if in == nil { + return nil + } + out := new(SelectedResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectedResourcesObservation) DeepCopyInto(out *SelectedResourcesObservation) { + *out = *in + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectedResourcesObservation. +func (in *SelectedResourcesObservation) DeepCopy() *SelectedResourcesObservation { + if in == nil { + return nil + } + out := new(SelectedResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectedResourcesParameters) DeepCopyInto(out *SelectedResourcesParameters) { + *out = *in + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } + if in.TableRef != nil { + in, out := &in.TableRef, &out.TableRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TableSelector != nil { + in, out := &in.TableSelector, &out.TableSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectedResourcesParameters. +func (in *SelectedResourcesParameters) DeepCopy() *SelectedResourcesParameters { + if in == nil { + return nil + } + out := new(SelectedResourcesParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SensitiveParamsInitParameters) DeepCopyInto(out *SensitiveParamsInitParameters) { *out = *in @@ -11571,6 +12017,71 @@ func (in *TableConstraintsParameters) DeepCopy() *TableConstraintsParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableEncryptionConfigurationInitParameters) DeepCopyInto(out *TableEncryptionConfigurationInitParameters) { + *out = *in + if in.KMSKeyName != nil { + in, out := &in.KMSKeyName, &out.KMSKeyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableEncryptionConfigurationInitParameters. +func (in *TableEncryptionConfigurationInitParameters) DeepCopy() *TableEncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(TableEncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableEncryptionConfigurationObservation) DeepCopyInto(out *TableEncryptionConfigurationObservation) { + *out = *in + if in.KMSKeyName != nil { + in, out := &in.KMSKeyName, &out.KMSKeyName + *out = new(string) + **out = **in + } + if in.KMSKeyVersion != nil { + in, out := &in.KMSKeyVersion, &out.KMSKeyVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableEncryptionConfigurationObservation. +func (in *TableEncryptionConfigurationObservation) DeepCopy() *TableEncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(TableEncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableEncryptionConfigurationParameters) DeepCopyInto(out *TableEncryptionConfigurationParameters) { + *out = *in + if in.KMSKeyName != nil { + in, out := &in.KMSKeyName, &out.KMSKeyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableEncryptionConfigurationParameters. +func (in *TableEncryptionConfigurationParameters) DeepCopy() *TableEncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(TableEncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TableIAMBinding) DeepCopyInto(out *TableIAMBinding) { *out = *in @@ -12290,10 +12801,10 @@ func (in *TableIAMMemberStatus) DeepCopy() *TableIAMMemberStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TableInitParameters) DeepCopyInto(out *TableInitParameters) { *out = *in - if in.AllowResourceTagsOnDeletion != nil { - in, out := &in.AllowResourceTagsOnDeletion, &out.AllowResourceTagsOnDeletion - *out = new(bool) - **out = **in + if in.BiglakeConfiguration != nil { + in, out := &in.BiglakeConfiguration, &out.BiglakeConfiguration + *out = new(BiglakeConfigurationInitParameters) + (*in).DeepCopyInto(*out) } if in.Clustering != nil { in, out := &in.Clustering, &out.Clustering @@ -12306,11 +12817,6 @@ func (in *TableInitParameters) DeepCopyInto(out *TableInitParameters) { } } } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -12318,7 +12824,7 @@ func (in *TableInitParameters) DeepCopyInto(out *TableInitParameters) { } if in.EncryptionConfiguration != nil { in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration - *out = new(EncryptionConfigurationInitParameters) + *out = new(TableEncryptionConfigurationInitParameters) (*in).DeepCopyInto(*out) } if in.ExpirationTime != nil { @@ -12460,10 +12966,10 @@ func (in *TableList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TableObservation) DeepCopyInto(out *TableObservation) { *out = *in - if in.AllowResourceTagsOnDeletion != nil { - in, out := &in.AllowResourceTagsOnDeletion, &out.AllowResourceTagsOnDeletion - *out = new(bool) - **out = **in + if in.BiglakeConfiguration != nil { + in, out := &in.BiglakeConfiguration, &out.BiglakeConfiguration + *out = new(BiglakeConfigurationObservation) + (*in).DeepCopyInto(*out) } if in.Clustering != nil { in, out := &in.Clustering, &out.Clustering @@ -12514,7 +13020,7 @@ func (in *TableObservation) DeepCopyInto(out *TableObservation) { } if in.EncryptionConfiguration != nil { in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration - *out = new(EncryptionConfigurationObservation) + *out = new(TableEncryptionConfigurationObservation) (*in).DeepCopyInto(*out) } if in.Etag != nil { @@ -12690,10 +13196,10 @@ func (in *TableObservation) DeepCopy() *TableObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TableParameters) DeepCopyInto(out *TableParameters) { *out = *in - if in.AllowResourceTagsOnDeletion != nil { - in, out := &in.AllowResourceTagsOnDeletion, &out.AllowResourceTagsOnDeletion - *out = new(bool) - **out = **in + if in.BiglakeConfiguration != nil { + in, out := &in.BiglakeConfiguration, &out.BiglakeConfiguration + *out = new(BiglakeConfigurationParameters) + (*in).DeepCopyInto(*out) } if in.Clustering != nil { in, out := &in.Clustering, &out.Clustering @@ -12721,11 +13227,6 @@ func (in *TableParameters) DeepCopyInto(out *TableParameters) { *out = new(v1.Selector) (*in).DeepCopyInto(*out) } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -12733,7 +13234,7 @@ func (in *TableParameters) DeepCopyInto(out *TableParameters) { } if in.EncryptionConfiguration != nil { in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration - *out = new(EncryptionConfigurationParameters) + *out = new(TableEncryptionConfigurationParameters) (*in).DeepCopyInto(*out) } if in.ExpirationTime != nil { diff --git a/apis/bigquery/v1beta2/zz_generated.resolvers.go b/apis/bigquery/v1beta2/zz_generated.resolvers.go index 65851bc01..34f9726a0 100644 --- a/apis/bigquery/v1beta2/zz_generated.resolvers.go +++ b/apis/bigquery/v1beta2/zz_generated.resolvers.go @@ -97,6 +97,29 @@ func (mg *AnalyticsHubListing) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.BigqueryDataset.DatasetRef = rsp.ResolvedReference } + if mg.Spec.ForProvider.BigqueryDataset != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.BigqueryDataset.SelectedResources); i4++ { + { + m, l, err = apisresolver.GetManagedResource("bigquery.gcp.upbound.io", "v1beta2", "Table", "TableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.BigqueryDataset.SelectedResources[i4].Table), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BigqueryDataset.SelectedResources[i4].TableRef, + Selector: mg.Spec.ForProvider.BigqueryDataset.SelectedResources[i4].TableSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.BigqueryDataset.SelectedResources[i4].Table") + } + mg.Spec.ForProvider.BigqueryDataset.SelectedResources[i4].Table = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BigqueryDataset.SelectedResources[i4].TableRef = rsp.ResolvedReference + + } + } { m, l, err = apisresolver.GetManagedResource("bigquery.gcp.upbound.io", "v1beta1", "AnalyticsHubDataExchange", "AnalyticsHubDataExchangeList") if err != nil { @@ -137,6 +160,29 @@ func (mg *AnalyticsHubListing) ResolveReferences(ctx context.Context, c client.R mg.Spec.InitProvider.BigqueryDataset.DatasetRef = rsp.ResolvedReference } + if mg.Spec.InitProvider.BigqueryDataset != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.BigqueryDataset.SelectedResources); i4++ { + { + m, l, err = apisresolver.GetManagedResource("bigquery.gcp.upbound.io", "v1beta2", "Table", "TableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.BigqueryDataset.SelectedResources[i4].Table), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BigqueryDataset.SelectedResources[i4].TableRef, + Selector: mg.Spec.InitProvider.BigqueryDataset.SelectedResources[i4].TableSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.BigqueryDataset.SelectedResources[i4].Table") + } + mg.Spec.InitProvider.BigqueryDataset.SelectedResources[i4].Table = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BigqueryDataset.SelectedResources[i4].TableRef = rsp.ResolvedReference + + } + } return nil } @@ -357,12 +403,33 @@ func (mg *DataTransferConfig) ResolveReferences(ctx context.Context, c client.Re } mg.Spec.ForProvider.DestinationDatasetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DestinationDatasetIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.EncryptionConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.gcp.upbound.io", "v1beta2", "CryptoKey", "CryptoKeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EncryptionConfiguration.KMSKeyName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.EncryptionConfiguration.KMSKeyNameRef, + Selector: mg.Spec.ForProvider.EncryptionConfiguration.KMSKeyNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EncryptionConfiguration.KMSKeyName") + } + mg.Spec.ForProvider.EncryptionConfiguration.KMSKeyName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EncryptionConfiguration.KMSKeyNameRef = rsp.ResolvedReference + + } { m, l, err = apisresolver.GetManagedResource("bigquery.gcp.upbound.io", "v1beta2", "Dataset", "DatasetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DestinationDatasetID), Extract: reference.ExternalName(), @@ -377,6 +444,28 @@ func (mg *DataTransferConfig) ResolveReferences(ctx context.Context, c client.Re mg.Spec.InitProvider.DestinationDatasetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DestinationDatasetIDRef = rsp.ResolvedReference + if mg.Spec.InitProvider.EncryptionConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.gcp.upbound.io", "v1beta2", "CryptoKey", "CryptoKeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EncryptionConfiguration.KMSKeyName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.EncryptionConfiguration.KMSKeyNameRef, + Selector: mg.Spec.InitProvider.EncryptionConfiguration.KMSKeyNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EncryptionConfiguration.KMSKeyName") + } + mg.Spec.InitProvider.EncryptionConfiguration.KMSKeyName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EncryptionConfiguration.KMSKeyNameRef = rsp.ResolvedReference + + } + return nil } @@ -1081,29 +1170,6 @@ func (mg *Job) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error - if mg.Spec.ForProvider.Copy != nil { - if mg.Spec.ForProvider.Copy.DestinationEncryptionConfiguration != nil { - { - m, l, err = apisresolver.GetManagedResource("kms.gcp.upbound.io", "v1beta2", "CryptoKey", "CryptoKeyList") - if err != nil { - return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") - } - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Copy.DestinationEncryptionConfiguration.KMSKeyName), - Extract: resource.ExtractResourceID(), - Reference: mg.Spec.ForProvider.Copy.DestinationEncryptionConfiguration.KMSKeyNameRef, - Selector: mg.Spec.ForProvider.Copy.DestinationEncryptionConfiguration.KMSKeyNameSelector, - To: reference.To{List: l, Managed: m}, - }) - } - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.Copy.DestinationEncryptionConfiguration.KMSKeyName") - } - mg.Spec.ForProvider.Copy.DestinationEncryptionConfiguration.KMSKeyName = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.ForProvider.Copy.DestinationEncryptionConfiguration.KMSKeyNameRef = rsp.ResolvedReference - - } - } if mg.Spec.ForProvider.Copy != nil { if mg.Spec.ForProvider.Copy.DestinationTable != nil { { @@ -1311,29 +1377,6 @@ func (mg *Job) ResolveReferences(ctx context.Context, c client.Reader) error { } } - if mg.Spec.InitProvider.Copy != nil { - if mg.Spec.InitProvider.Copy.DestinationEncryptionConfiguration != nil { - { - m, l, err = apisresolver.GetManagedResource("kms.gcp.upbound.io", "v1beta2", "CryptoKey", "CryptoKeyList") - if err != nil { - return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") - } - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Copy.DestinationEncryptionConfiguration.KMSKeyName), - Extract: resource.ExtractResourceID(), - Reference: mg.Spec.InitProvider.Copy.DestinationEncryptionConfiguration.KMSKeyNameRef, - Selector: mg.Spec.InitProvider.Copy.DestinationEncryptionConfiguration.KMSKeyNameSelector, - To: reference.To{List: l, Managed: m}, - }) - } - if err != nil { - return errors.Wrap(err, "mg.Spec.InitProvider.Copy.DestinationEncryptionConfiguration.KMSKeyName") - } - mg.Spec.InitProvider.Copy.DestinationEncryptionConfiguration.KMSKeyName = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.InitProvider.Copy.DestinationEncryptionConfiguration.KMSKeyNameRef = rsp.ResolvedReference - - } - } if mg.Spec.InitProvider.Copy != nil { if mg.Spec.InitProvider.Copy.DestinationTable != nil { { diff --git a/apis/bigquery/v1beta2/zz_job_types.go b/apis/bigquery/v1beta2/zz_job_types.go index cc4193698..e49adb116 100755 --- a/apis/bigquery/v1beta2/zz_job_types.go +++ b/apis/bigquery/v1beta2/zz_job_types.go @@ -170,17 +170,7 @@ type DestinationEncryptionConfigurationInitParameters struct { // Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. // The BigQuery Service Account associated with your project requires access to this encryption key. - // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/kms/v1beta2.CryptoKey - // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` - - // Reference to a CryptoKey in kms to populate kmsKeyName. - // +kubebuilder:validation:Optional - KMSKeyNameRef *v1.Reference `json:"kmsKeyNameRef,omitempty" tf:"-"` - - // Selector for a CryptoKey in kms to populate kmsKeyName. - // +kubebuilder:validation:Optional - KMSKeyNameSelector *v1.Selector `json:"kmsKeyNameSelector,omitempty" tf:"-"` } type DestinationEncryptionConfigurationObservation struct { @@ -198,18 +188,8 @@ type DestinationEncryptionConfigurationParameters struct { // Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. // The BigQuery Service Account associated with your project requires access to this encryption key. - // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/kms/v1beta2.CryptoKey - // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional - KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` - - // Reference to a CryptoKey in kms to populate kmsKeyName. - // +kubebuilder:validation:Optional - KMSKeyNameRef *v1.Reference `json:"kmsKeyNameRef,omitempty" tf:"-"` - - // Selector for a CryptoKey in kms to populate kmsKeyName. - // +kubebuilder:validation:Optional - KMSKeyNameSelector *v1.Selector `json:"kmsKeyNameSelector,omitempty" tf:"-"` + KMSKeyName *string `json:"kmsKeyName" tf:"kms_key_name,omitempty"` } type DestinationTableInitParameters struct { diff --git a/apis/bigquery/v1beta2/zz_reservation_types.go b/apis/bigquery/v1beta2/zz_reservation_types.go index 49f2cadf0..edd29df64 100755 --- a/apis/bigquery/v1beta2/zz_reservation_types.go +++ b/apis/bigquery/v1beta2/zz_reservation_types.go @@ -53,10 +53,6 @@ type ReservationInitParameters struct { // capacity specified above at most. IgnoreIdleSlots *bool `json:"ignoreIdleSlots,omitempty" tf:"ignore_idle_slots,omitempty"` - // Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). - // If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region. - MultiRegionAuxiliary *bool `json:"multiRegionAuxiliary,omitempty" tf:"multi_region_auxiliary,omitempty"` - // Minimum slots available to this reservation. A slot is a unit of computational power in BigQuery, and serves as the // unit of parallelism. Queries using this reservation might use more slots during runtime if ignoreIdleSlots is set to false. SlotCapacity *float64 `json:"slotCapacity,omitempty" tf:"slot_capacity,omitempty"` @@ -86,10 +82,6 @@ type ReservationObservation struct { // Examples: US, EU, asia-northeast1. The default value is US. Location *string `json:"location,omitempty" tf:"location,omitempty"` - // Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). - // If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region. - MultiRegionAuxiliary *bool `json:"multiRegionAuxiliary,omitempty" tf:"multi_region_auxiliary,omitempty"` - // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -125,11 +117,6 @@ type ReservationParameters struct { // +kubebuilder:validation:Optional Location *string `json:"location,omitempty" tf:"location,omitempty"` - // Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). - // If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region. - // +kubebuilder:validation:Optional - MultiRegionAuxiliary *bool `json:"multiRegionAuxiliary,omitempty" tf:"multi_region_auxiliary,omitempty"` - // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional diff --git a/apis/bigquery/v1beta2/zz_table_types.go b/apis/bigquery/v1beta2/zz_table_types.go index 7aa16427d..b10af6252 100755 --- a/apis/bigquery/v1beta2/zz_table_types.go +++ b/apis/bigquery/v1beta2/zz_table_types.go @@ -38,6 +38,67 @@ type AvroOptionsParameters struct { UseAvroLogicalTypes *bool `json:"useAvroLogicalTypes" tf:"use_avro_logical_types,omitempty"` } +type BiglakeConfigurationInitParameters struct { + + // The connection specifying the credentials to be used to + // read and write to external storage, such as Cloud Storage. The connection_id can + // have the form ".." or + // projects//locations//connections/". + ConnectionID *string `json:"connectionId,omitempty" tf:"connection_id,omitempty"` + + // The file format the table data is stored in. + FileFormat *string `json:"fileFormat,omitempty" tf:"file_format,omitempty"` + + // The fully qualified location prefix of the external folder where table data + // is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/" + StorageURI *string `json:"storageUri,omitempty" tf:"storage_uri,omitempty"` + + // The table format the metadata only snapshots are stored in. + TableFormat *string `json:"tableFormat,omitempty" tf:"table_format,omitempty"` +} + +type BiglakeConfigurationObservation struct { + + // The connection specifying the credentials to be used to + // read and write to external storage, such as Cloud Storage. The connection_id can + // have the form ".." or + // projects//locations//connections/". + ConnectionID *string `json:"connectionId,omitempty" tf:"connection_id,omitempty"` + + // The file format the table data is stored in. + FileFormat *string `json:"fileFormat,omitempty" tf:"file_format,omitempty"` + + // The fully qualified location prefix of the external folder where table data + // is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/" + StorageURI *string `json:"storageUri,omitempty" tf:"storage_uri,omitempty"` + + // The table format the metadata only snapshots are stored in. + TableFormat *string `json:"tableFormat,omitempty" tf:"table_format,omitempty"` +} + +type BiglakeConfigurationParameters struct { + + // The connection specifying the credentials to be used to + // read and write to external storage, such as Cloud Storage. The connection_id can + // have the form ".." or + // projects//locations//connections/". + // +kubebuilder:validation:Optional + ConnectionID *string `json:"connectionId" tf:"connection_id,omitempty"` + + // The file format the table data is stored in. + // +kubebuilder:validation:Optional + FileFormat *string `json:"fileFormat" tf:"file_format,omitempty"` + + // The fully qualified location prefix of the external folder where table data + // is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/" + // +kubebuilder:validation:Optional + StorageURI *string `json:"storageUri" tf:"storage_uri,omitempty"` + + // The table format the metadata only snapshots are stored in. + // +kubebuilder:validation:Optional + TableFormat *string `json:"tableFormat" tf:"table_format,omitempty"` +} + type BigtableOptionsInitParameters struct { // A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below. @@ -217,32 +278,32 @@ type ColumnParameters struct { type ColumnReferencesInitParameters struct { - // : The column in the primary key that are + // The column in the primary key that are // referenced by the referencingColumn ReferencedColumn *string `json:"referencedColumn,omitempty" tf:"referenced_column,omitempty"` - // : The column that composes the foreign key. + // The column that composes the foreign key. ReferencingColumn *string `json:"referencingColumn,omitempty" tf:"referencing_column,omitempty"` } type ColumnReferencesObservation struct { - // : The column in the primary key that are + // The column in the primary key that are // referenced by the referencingColumn ReferencedColumn *string `json:"referencedColumn,omitempty" tf:"referenced_column,omitempty"` - // : The column that composes the foreign key. + // The column that composes the foreign key. ReferencingColumn *string `json:"referencingColumn,omitempty" tf:"referencing_column,omitempty"` } type ColumnReferencesParameters struct { - // : The column in the primary key that are + // The column in the primary key that are // referenced by the referencingColumn // +kubebuilder:validation:Optional ReferencedColumn *string `json:"referencedColumn" tf:"referenced_column,omitempty"` - // : The column that composes the foreign key. + // The column that composes the foreign key. // +kubebuilder:validation:Optional ReferencingColumn *string `json:"referencingColumn" tf:"referencing_column,omitempty"` } @@ -340,43 +401,9 @@ type CsvOptionsParameters struct { SkipLeadingRows *float64 `json:"skipLeadingRows,omitempty" tf:"skip_leading_rows,omitempty"` } -type EncryptionConfigurationInitParameters struct { - - // The self link or full name of a key which should be used to - // encrypt this table. Note that the default bigquery service account will need to have - // encrypt/decrypt permissions on this key - you may want to see the - // google_bigquery_default_service_account datasource and the - // google_kms_crypto_key_iam_binding resource. - KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` -} - -type EncryptionConfigurationObservation struct { - - // The self link or full name of a key which should be used to - // encrypt this table. Note that the default bigquery service account will need to have - // encrypt/decrypt permissions on this key - you may want to see the - // google_bigquery_default_service_account datasource and the - // google_kms_crypto_key_iam_binding resource. - KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` - - // The self link or full name of the kms key version used to encrypt this table. - KMSKeyVersion *string `json:"kmsKeyVersion,omitempty" tf:"kms_key_version,omitempty"` -} - -type EncryptionConfigurationParameters struct { - - // The self link or full name of a key which should be used to - // encrypt this table. Note that the default bigquery service account will need to have - // encrypt/decrypt permissions on this key - you may want to see the - // google_bigquery_default_service_account datasource and the - // google_kms_crypto_key_iam_binding resource. - // +kubebuilder:validation:Optional - KMSKeyName *string `json:"kmsKeyName" tf:"kms_key_name,omitempty"` -} - type ExternalDataConfigurationInitParameters struct { - // - Let BigQuery try to autodetect the schema + // Let BigQuery try to autodetect the schema // and format of the table. Autodetect *bool `json:"autodetect,omitempty" tf:"autodetect,omitempty"` @@ -476,7 +503,7 @@ type ExternalDataConfigurationInitParameters struct { type ExternalDataConfigurationObservation struct { - // - Let BigQuery try to autodetect the schema + // Let BigQuery try to autodetect the schema // and format of the table. Autodetect *bool `json:"autodetect,omitempty" tf:"autodetect,omitempty"` @@ -576,7 +603,7 @@ type ExternalDataConfigurationObservation struct { type ExternalDataConfigurationParameters struct { - // - Let BigQuery try to autodetect the schema + // Let BigQuery try to autodetect the schema // and format of the table. // +kubebuilder:validation:Optional Autodetect *bool `json:"autodetect" tf:"autodetect,omitempty"` @@ -725,14 +752,16 @@ type ExternalDataConfigurationParquetOptionsParameters struct { type ForeignKeysInitParameters struct { - // : The pair of the foreign key column and primary key column. + // The pair of the foreign key column and primary key column. // Structure is documented below. ColumnReferences *ColumnReferencesInitParameters `json:"columnReferences,omitempty" tf:"column_references,omitempty"` - // : Set only if the foreign key constraint is named. + // ) + // Name of the SerDe. + // The maximum length is 256 characters. Name *string `json:"name,omitempty" tf:"name,omitempty"` - // : The table that holds the primary key + // The table that holds the primary key // and is referenced by this foreign key. // Structure is documented below. ReferencedTable *ReferencedTableInitParameters `json:"referencedTable,omitempty" tf:"referenced_table,omitempty"` @@ -740,14 +769,16 @@ type ForeignKeysInitParameters struct { type ForeignKeysObservation struct { - // : The pair of the foreign key column and primary key column. + // The pair of the foreign key column and primary key column. // Structure is documented below. ColumnReferences *ColumnReferencesObservation `json:"columnReferences,omitempty" tf:"column_references,omitempty"` - // : Set only if the foreign key constraint is named. + // ) + // Name of the SerDe. + // The maximum length is 256 characters. Name *string `json:"name,omitempty" tf:"name,omitempty"` - // : The table that holds the primary key + // The table that holds the primary key // and is referenced by this foreign key. // Structure is documented below. ReferencedTable *ReferencedTableObservation `json:"referencedTable,omitempty" tf:"referenced_table,omitempty"` @@ -755,16 +786,18 @@ type ForeignKeysObservation struct { type ForeignKeysParameters struct { - // : The pair of the foreign key column and primary key column. + // The pair of the foreign key column and primary key column. // Structure is documented below. // +kubebuilder:validation:Optional ColumnReferences *ColumnReferencesParameters `json:"columnReferences" tf:"column_references,omitempty"` - // : Set only if the foreign key constraint is named. + // ) + // Name of the SerDe. + // The maximum length is 256 characters. // +kubebuilder:validation:Optional Name *string `json:"name,omitempty" tf:"name,omitempty"` - // : The table that holds the primary key + // The table that holds the primary key // and is referenced by this foreign key. // Structure is documented below. // +kubebuilder:validation:Optional @@ -960,19 +993,19 @@ type MaterializedViewParameters struct { type PrimaryKeyInitParameters struct { - // : The columns that are composed of the primary key constraint. + // The columns that are composed of the primary key constraint. Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` } type PrimaryKeyObservation struct { - // : The columns that are composed of the primary key constraint. + // The columns that are composed of the primary key constraint. Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` } type PrimaryKeyParameters struct { - // : The columns that are composed of the primary key constraint. + // The columns that are composed of the primary key constraint. // +kubebuilder:validation:Optional Columns []*string `json:"columns" tf:"columns,omitempty"` } @@ -1053,7 +1086,7 @@ type RangePartitioningParameters struct { type ReferencedTableInitParameters struct { - // : The ID of the project containing this table. + // The ID of the project containing this table. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` // A unique ID for the resource. @@ -1063,10 +1096,10 @@ type ReferencedTableInitParameters struct { type ReferencedTableObservation struct { - // : The ID of the dataset containing this table. + // The ID of the dataset containing this table. DatasetID *string `json:"datasetId,omitempty" tf:"dataset_id,omitempty"` - // : The ID of the project containing this table. + // The ID of the project containing this table. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` // A unique ID for the resource. @@ -1076,11 +1109,11 @@ type ReferencedTableObservation struct { type ReferencedTableParameters struct { - // : The ID of the dataset containing this table. + // The ID of the dataset containing this table. // +kubebuilder:validation:Required DatasetID *string `json:"datasetId" tf:"dataset_id,omitempty"` - // : The ID of the project containing this table. + // The ID of the project containing this table. // +kubebuilder:validation:Optional ProjectID *string `json:"projectId" tf:"project_id,omitempty"` @@ -1134,28 +1167,57 @@ type TableConstraintsParameters struct { PrimaryKey *PrimaryKeyParameters `json:"primaryKey,omitempty" tf:"primary_key,omitempty"` } +type TableEncryptionConfigurationInitParameters struct { + + // The self link or full name of a key which should be used to + // encrypt this table. Note that the default bigquery service account will need to have + // encrypt/decrypt permissions on this key - you may want to see the + // google_bigquery_default_service_account datasource and the + // google_kms_crypto_key_iam_binding resource. + KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` +} + +type TableEncryptionConfigurationObservation struct { + + // The self link or full name of a key which should be used to + // encrypt this table. Note that the default bigquery service account will need to have + // encrypt/decrypt permissions on this key - you may want to see the + // google_bigquery_default_service_account datasource and the + // google_kms_crypto_key_iam_binding resource. + KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` + + // The self link or full name of the kms key version used to encrypt this table. + KMSKeyVersion *string `json:"kmsKeyVersion,omitempty" tf:"kms_key_version,omitempty"` +} + +type TableEncryptionConfigurationParameters struct { + + // The self link or full name of a key which should be used to + // encrypt this table. Note that the default bigquery service account will need to have + // encrypt/decrypt permissions on this key - you may want to see the + // google_bigquery_default_service_account datasource and the + // google_kms_crypto_key_iam_binding resource. + // +kubebuilder:validation:Optional + KMSKeyName *string `json:"kmsKeyName" tf:"kms_key_name,omitempty"` +} + type TableInitParameters struct { - // If set to true, it allows table - // deletion when there are still resource tags attached. The default value is - // false. - AllowResourceTagsOnDeletion *bool `json:"allowResourceTagsOnDeletion,omitempty" tf:"allow_resource_tags_on_deletion,omitempty"` + // Specifies the configuration of a BigLake managed table. Structure is documented below + BiglakeConfiguration *BiglakeConfigurationInitParameters `json:"biglakeConfiguration,omitempty" tf:"biglake_configuration,omitempty"` // Specifies column names to use for data clustering. // Up to four top-level columns are allowed, and should be specified in // descending priority order. Clustering []*string `json:"clustering,omitempty" tf:"clustering,omitempty"` - // When the field is set to false, deleting the table is allowed.. - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // The field description. Description *string `json:"description,omitempty" tf:"description,omitempty"` // Specifies how the table should be encrypted. // If left blank, the table will be encrypted with a Google-managed key; that process // is transparent to the user. Structure is documented below. - EncryptionConfiguration *EncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + EncryptionConfiguration *TableEncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` // The time when this table expires, in // milliseconds since the epoch. If not present, the table will persist @@ -1180,7 +1242,7 @@ type TableInitParameters struct { // Structure is documented below. MaterializedView *MaterializedViewInitParameters `json:"materializedView,omitempty" tf:"materialized_view,omitempty"` - // : The maximum staleness of data that could be + // The maximum staleness of data that could be // returned when the table (or stale MV) is queried. Staleness encoded as a // string encoding of SQL IntervalValue // type. @@ -1227,10 +1289,8 @@ type TableInitParameters struct { type TableObservation struct { - // If set to true, it allows table - // deletion when there are still resource tags attached. The default value is - // false. - AllowResourceTagsOnDeletion *bool `json:"allowResourceTagsOnDeletion,omitempty" tf:"allow_resource_tags_on_deletion,omitempty"` + // Specifies the configuration of a BigLake managed table. Structure is documented below + BiglakeConfiguration *BiglakeConfigurationObservation `json:"biglakeConfiguration,omitempty" tf:"biglake_configuration,omitempty"` // Specifies column names to use for data clustering. // Up to four top-level columns are allowed, and should be specified in @@ -1256,7 +1316,7 @@ type TableObservation struct { // Specifies how the table should be encrypted. // If left blank, the table will be encrypted with a Google-managed key; that process // is transparent to the user. Structure is documented below. - EncryptionConfiguration *EncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + EncryptionConfiguration *TableEncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` // A hash of the resource. Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` @@ -1276,7 +1336,7 @@ type TableObservation struct { // A descriptive name for the table. FriendlyName *string `json:"friendlyName,omitempty" tf:"friendly_name,omitempty"` - // an identifier for the resource with format projects/{{project}}/datasets/{{dataset}}/tables/{{name}} + // An identifier for the resource with format projects/{{project}}/datasets/{{dataset}}/tables/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` // A mapping of labels to assign to the resource. @@ -1293,7 +1353,7 @@ type TableObservation struct { // Structure is documented below. MaterializedView *MaterializedViewObservation `json:"materializedView,omitempty" tf:"materialized_view,omitempty"` - // : The maximum staleness of data that could be + // The maximum staleness of data that could be // returned when the table (or stale MV) is queried. Staleness encoded as a // string encoding of SQL IntervalValue // type. @@ -1363,11 +1423,9 @@ type TableObservation struct { type TableParameters struct { - // If set to true, it allows table - // deletion when there are still resource tags attached. The default value is - // false. + // Specifies the configuration of a BigLake managed table. Structure is documented below // +kubebuilder:validation:Optional - AllowResourceTagsOnDeletion *bool `json:"allowResourceTagsOnDeletion,omitempty" tf:"allow_resource_tags_on_deletion,omitempty"` + BiglakeConfiguration *BiglakeConfigurationParameters `json:"biglakeConfiguration,omitempty" tf:"biglake_configuration,omitempty"` // Specifies column names to use for data clustering. // Up to four top-level columns are allowed, and should be specified in @@ -1389,10 +1447,6 @@ type TableParameters struct { // +kubebuilder:validation:Optional DatasetIDSelector *v1.Selector `json:"datasetIdSelector,omitempty" tf:"-"` - // When the field is set to false, deleting the table is allowed.. - // +kubebuilder:validation:Optional - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // The field description. // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -1401,7 +1455,7 @@ type TableParameters struct { // If left blank, the table will be encrypted with a Google-managed key; that process // is transparent to the user. Structure is documented below. // +kubebuilder:validation:Optional - EncryptionConfiguration *EncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + EncryptionConfiguration *TableEncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` // The time when this table expires, in // milliseconds since the epoch. If not present, the table will persist @@ -1431,7 +1485,7 @@ type TableParameters struct { // +kubebuilder:validation:Optional MaterializedView *MaterializedViewParameters `json:"materializedView,omitempty" tf:"materialized_view,omitempty"` - // : The maximum staleness of data that could be + // The maximum staleness of data that could be // returned when the table (or stale MV) is queried. Staleness encoded as a // string encoding of SQL IntervalValue // type. diff --git a/apis/bigtable/v1beta1/zz_generated.deepcopy.go b/apis/bigtable/v1beta1/zz_generated.deepcopy.go index a00f457a2..55a1fe7b0 100644 --- a/apis/bigtable/v1beta1/zz_generated.deepcopy.go +++ b/apis/bigtable/v1beta1/zz_generated.deepcopy.go @@ -672,6 +672,11 @@ func (in *ColumnFamilyInitParameters) DeepCopyInto(out *ColumnFamilyInitParamete *out = new(string) **out = **in } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnFamilyInitParameters. @@ -692,6 +697,11 @@ func (in *ColumnFamilyObservation) DeepCopyInto(out *ColumnFamilyObservation) { *out = new(string) **out = **in } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnFamilyObservation. @@ -712,6 +722,11 @@ func (in *ColumnFamilyParameters) DeepCopyInto(out *ColumnFamilyParameters) { *out = new(string) **out = **in } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnFamilyParameters. @@ -3456,11 +3471,6 @@ func (in *TableInitParameters) DeepCopyInto(out *TableInitParameters) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(string) - **out = **in - } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -3604,11 +3614,6 @@ func (in *TableParameters) DeepCopyInto(out *TableParameters) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(string) - **out = **in - } if in.InstanceName != nil { in, out := &in.InstanceName, &out.InstanceName *out = new(string) diff --git a/apis/bigtable/v1beta1/zz_table_types.go b/apis/bigtable/v1beta1/zz_table_types.go index 87fcc0653..c3e380715 100755 --- a/apis/bigtable/v1beta1/zz_table_types.go +++ b/apis/bigtable/v1beta1/zz_table_types.go @@ -38,12 +38,18 @@ type ColumnFamilyInitParameters struct { // The name of the column family. Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // The type of the column family. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } type ColumnFamilyObservation struct { // The name of the column family. Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // The type of the column family. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } type ColumnFamilyParameters struct { @@ -51,6 +57,10 @@ type ColumnFamilyParameters struct { // The name of the column family. // +kubebuilder:validation:Optional Family *string `json:"family" tf:"family,omitempty"` + + // The type of the column family. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` } type TableInitParameters struct { @@ -64,9 +74,6 @@ type TableInitParameters struct { // A group of columns within a table which share a common configuration. This can be specified multiple times. Structure is documented below. ColumnFamily []ColumnFamilyInitParameters `json:"columnFamily,omitempty" tf:"column_family,omitempty"` - // A field to make the table protected against data loss i.e. when set to PROTECTED, deleting the table, the column families in the table, and the instance containing the table would be prohibited. If not provided, deletion protection will be set to UNPROTECTED. - DeletionProtection *string `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // The ID of the project in which the resource belongs. If it // is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -117,10 +124,6 @@ type TableParameters struct { // +kubebuilder:validation:Optional ColumnFamily []ColumnFamilyParameters `json:"columnFamily,omitempty" tf:"column_family,omitempty"` - // A field to make the table protected against data loss i.e. when set to PROTECTED, deleting the table, the column families in the table, and the instance containing the table would be prohibited. If not provided, deletion protection will be set to UNPROTECTED. - // +kubebuilder:validation:Optional - DeletionProtection *string `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // The name of the Bigtable instance. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigtable/v1beta2.Instance // +kubebuilder:validation:Optional diff --git a/apis/bigtable/v1beta2/zz_appprofile_types.go b/apis/bigtable/v1beta2/zz_appprofile_types.go index a07c41b9d..153f4913e 100755 --- a/apis/bigtable/v1beta2/zz_appprofile_types.go +++ b/apis/bigtable/v1beta2/zz_appprofile_types.go @@ -36,6 +36,8 @@ type AppProfileInitParameters struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + RowAffinity *bool `json:"rowAffinity,omitempty" tf:"row_affinity,omitempty"` + // Use a single-cluster routing policy. // Structure is documented below. SingleClusterRouting *SingleClusterRoutingInitParameters `json:"singleClusterRouting,omitempty" tf:"single_cluster_routing,omitempty"` @@ -77,6 +79,8 @@ type AppProfileObservation struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + RowAffinity *bool `json:"rowAffinity,omitempty" tf:"row_affinity,omitempty"` + // Use a single-cluster routing policy. // Structure is documented below. SingleClusterRouting *SingleClusterRoutingObservation `json:"singleClusterRouting,omitempty" tf:"single_cluster_routing,omitempty"` @@ -128,6 +132,9 @@ type AppProfileParameters struct { // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` + // +kubebuilder:validation:Optional + RowAffinity *bool `json:"rowAffinity,omitempty" tf:"row_affinity,omitempty"` + // Use a single-cluster routing policy. // Structure is documented below. // +kubebuilder:validation:Optional diff --git a/apis/bigtable/v1beta2/zz_generated.deepcopy.go b/apis/bigtable/v1beta2/zz_generated.deepcopy.go index eb273897e..efc768eab 100644 --- a/apis/bigtable/v1beta2/zz_generated.deepcopy.go +++ b/apis/bigtable/v1beta2/zz_generated.deepcopy.go @@ -79,6 +79,11 @@ func (in *AppProfileInitParameters) DeepCopyInto(out *AppProfileInitParameters) *out = new(string) **out = **in } + if in.RowAffinity != nil { + in, out := &in.RowAffinity, &out.RowAffinity + *out = new(bool) + **out = **in + } if in.SingleClusterRouting != nil { in, out := &in.SingleClusterRouting, &out.SingleClusterRouting *out = new(SingleClusterRoutingInitParameters) @@ -187,6 +192,11 @@ func (in *AppProfileObservation) DeepCopyInto(out *AppProfileObservation) { *out = new(string) **out = **in } + if in.RowAffinity != nil { + in, out := &in.RowAffinity, &out.RowAffinity + *out = new(bool) + **out = **in + } if in.SingleClusterRouting != nil { in, out := &in.SingleClusterRouting, &out.SingleClusterRouting *out = new(SingleClusterRoutingObservation) @@ -263,6 +273,11 @@ func (in *AppProfileParameters) DeepCopyInto(out *AppProfileParameters) { *out = new(string) **out = **in } + if in.RowAffinity != nil { + in, out := &in.RowAffinity, &out.RowAffinity + *out = new(bool) + **out = **in + } if in.SingleClusterRouting != nil { in, out := &in.SingleClusterRouting, &out.SingleClusterRouting *out = new(SingleClusterRoutingParameters) @@ -1623,11 +1638,6 @@ func (in *InstanceInitParameters) DeepCopyInto(out *InstanceInitParameters) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.DisplayName != nil { in, out := &in.DisplayName, &out.DisplayName *out = new(string) @@ -1818,11 +1828,6 @@ func (in *InstanceParameters) DeepCopyInto(out *InstanceParameters) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.DisplayName != nil { in, out := &in.DisplayName, &out.DisplayName *out = new(string) diff --git a/apis/bigtable/v1beta2/zz_instance_types.go b/apis/bigtable/v1beta2/zz_instance_types.go index 3aec4142a..339eb7cdb 100755 --- a/apis/bigtable/v1beta2/zz_instance_types.go +++ b/apis/bigtable/v1beta2/zz_instance_types.go @@ -153,9 +153,6 @@ type InstanceInitParameters struct { // to default to the backend value. See structure below. Cluster []ClusterInitParameters `json:"cluster,omitempty" tf:"cluster,omitempty"` - // When the field is set to false, deleting the instance is allowed. - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // The human-readable display name of the Bigtable instance. Defaults to the instance name. DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` @@ -228,10 +225,6 @@ type InstanceParameters struct { // +kubebuilder:validation:Optional Cluster []ClusterParameters `json:"cluster,omitempty" tf:"cluster,omitempty"` - // When the field is set to false, deleting the instance is allowed. - // +kubebuilder:validation:Optional - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // The human-readable display name of the Bigtable instance. Defaults to the instance name. // +kubebuilder:validation:Optional DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` diff --git a/apis/certificatemanager/v1beta2/zz_certificate_types.go b/apis/certificatemanager/v1beta2/zz_certificate_types.go index 5d374a52f..330d397fe 100755 --- a/apis/certificatemanager/v1beta2/zz_certificate_types.go +++ b/apis/certificatemanager/v1beta2/zz_certificate_types.go @@ -107,6 +107,9 @@ type CertificateObservation struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + // The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + SanDnsnames []*string `json:"sanDnsnames,omitempty" tf:"san_dnsnames,omitempty"` + // The scope of the certificate. // DEFAULT: Certificates with default scope are served from core Google data centers. // If unsure, choose this option. diff --git a/apis/certificatemanager/v1beta2/zz_generated.deepcopy.go b/apis/certificatemanager/v1beta2/zz_generated.deepcopy.go index c80f0d917..c5077962d 100644 --- a/apis/certificatemanager/v1beta2/zz_generated.deepcopy.go +++ b/apis/certificatemanager/v1beta2/zz_generated.deepcopy.go @@ -253,6 +253,17 @@ func (in *CertificateObservation) DeepCopyInto(out *CertificateObservation) { *out = new(string) **out = **in } + if in.SanDnsnames != nil { + in, out := &in.SanDnsnames, &out.SanDnsnames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Scope != nil { in, out := &in.Scope, &out.Scope *out = new(string) diff --git a/apis/cloudbuild/v1beta2/zz_generated.deepcopy.go b/apis/cloudbuild/v1beta2/zz_generated.deepcopy.go index 5736a326d..4708fb77c 100644 --- a/apis/cloudbuild/v1beta2/zz_generated.deepcopy.go +++ b/apis/cloudbuild/v1beta2/zz_generated.deepcopy.go @@ -1846,6 +1846,81 @@ func (in *OptionsParameters) DeepCopy() *OptionsParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateServiceConnectInitParameters) DeepCopyInto(out *PrivateServiceConnectInitParameters) { + *out = *in + if in.NetworkAttachment != nil { + in, out := &in.NetworkAttachment, &out.NetworkAttachment + *out = new(string) + **out = **in + } + if in.RouteAllTraffic != nil { + in, out := &in.RouteAllTraffic, &out.RouteAllTraffic + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateServiceConnectInitParameters. +func (in *PrivateServiceConnectInitParameters) DeepCopy() *PrivateServiceConnectInitParameters { + if in == nil { + return nil + } + out := new(PrivateServiceConnectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateServiceConnectObservation) DeepCopyInto(out *PrivateServiceConnectObservation) { + *out = *in + if in.NetworkAttachment != nil { + in, out := &in.NetworkAttachment, &out.NetworkAttachment + *out = new(string) + **out = **in + } + if in.RouteAllTraffic != nil { + in, out := &in.RouteAllTraffic, &out.RouteAllTraffic + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateServiceConnectObservation. +func (in *PrivateServiceConnectObservation) DeepCopy() *PrivateServiceConnectObservation { + if in == nil { + return nil + } + out := new(PrivateServiceConnectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateServiceConnectParameters) DeepCopyInto(out *PrivateServiceConnectParameters) { + *out = *in + if in.NetworkAttachment != nil { + in, out := &in.NetworkAttachment, &out.NetworkAttachment + *out = new(string) + **out = **in + } + if in.RouteAllTraffic != nil { + in, out := &in.RouteAllTraffic, &out.RouteAllTraffic + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateServiceConnectParameters. +func (in *PrivateServiceConnectParameters) DeepCopy() *PrivateServiceConnectParameters { + if in == nil { + return nil + } + out := new(PrivateServiceConnectParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PubsubConfigInitParameters) DeepCopyInto(out *PubsubConfigInitParameters) { *out = *in @@ -4718,6 +4793,11 @@ func (in *WorkerPoolInitParameters) DeepCopyInto(out *WorkerPoolInitParameters) *out = new(NetworkConfigInitParameters) (*in).DeepCopyInto(*out) } + if in.PrivateServiceConnect != nil { + in, out := &in.PrivateServiceConnect, &out.PrivateServiceConnect + *out = new(PrivateServiceConnectInitParameters) + (*in).DeepCopyInto(*out) + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -4837,6 +4917,11 @@ func (in *WorkerPoolObservation) DeepCopyInto(out *WorkerPoolObservation) { *out = new(NetworkConfigObservation) (*in).DeepCopyInto(*out) } + if in.PrivateServiceConnect != nil { + in, out := &in.PrivateServiceConnect, &out.PrivateServiceConnect + *out = new(PrivateServiceConnectObservation) + (*in).DeepCopyInto(*out) + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -4908,6 +4993,11 @@ func (in *WorkerPoolParameters) DeepCopyInto(out *WorkerPoolParameters) { *out = new(NetworkConfigParameters) (*in).DeepCopyInto(*out) } + if in.PrivateServiceConnect != nil { + in, out := &in.PrivateServiceConnect, &out.PrivateServiceConnect + *out = new(PrivateServiceConnectParameters) + (*in).DeepCopyInto(*out) + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) diff --git a/apis/cloudbuild/v1beta2/zz_workerpool_types.go b/apis/cloudbuild/v1beta2/zz_workerpool_types.go index ec1b57014..407941d86 100755 --- a/apis/cloudbuild/v1beta2/zz_workerpool_types.go +++ b/apis/cloudbuild/v1beta2/zz_workerpool_types.go @@ -62,12 +62,33 @@ type NetworkConfigParameters struct { PeeredNetworkSelector *v1.Selector `json:"peeredNetworkSelector,omitempty" tf:"-"` } +type PrivateServiceConnectInitParameters struct { + NetworkAttachment *string `json:"networkAttachment,omitempty" tf:"network_attachment,omitempty"` + + RouteAllTraffic *bool `json:"routeAllTraffic,omitempty" tf:"route_all_traffic,omitempty"` +} + +type PrivateServiceConnectObservation struct { + NetworkAttachment *string `json:"networkAttachment,omitempty" tf:"network_attachment,omitempty"` + + RouteAllTraffic *bool `json:"routeAllTraffic,omitempty" tf:"route_all_traffic,omitempty"` +} + +type PrivateServiceConnectParameters struct { + + // +kubebuilder:validation:Optional + NetworkAttachment *string `json:"networkAttachment" tf:"network_attachment,omitempty"` + + // +kubebuilder:validation:Optional + RouteAllTraffic *bool `json:"routeAllTraffic,omitempty" tf:"route_all_traffic,omitempty"` +} + type WorkerConfigInitParameters struct { - // Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If 0 is specified, Cloud Build will use a standard disk size. + // Size of the disk attached to the worker, in GB. See diskSizeGb. Specify a value of up to 1000. If 0 is specified, Cloud Build will use a standard disk size. DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` - // Machine type of a worker, such as n1-standard-1. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use n1-standard-1. + // Machine type of a worker, such as n1-standard-1. See machineType. If left blank, Cloud Build will use n1-standard-1. MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` // If true, workers are created without any public address, which prevents network egress to public IPs. @@ -76,10 +97,10 @@ type WorkerConfigInitParameters struct { type WorkerConfigObservation struct { - // Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If 0 is specified, Cloud Build will use a standard disk size. + // Size of the disk attached to the worker, in GB. See diskSizeGb. Specify a value of up to 1000. If 0 is specified, Cloud Build will use a standard disk size. DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` - // Machine type of a worker, such as n1-standard-1. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use n1-standard-1. + // Machine type of a worker, such as n1-standard-1. See machineType. If left blank, Cloud Build will use n1-standard-1. MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` // If true, workers are created without any public address, which prevents network egress to public IPs. @@ -88,11 +109,11 @@ type WorkerConfigObservation struct { type WorkerConfigParameters struct { - // Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If 0 is specified, Cloud Build will use a standard disk size. + // Size of the disk attached to the worker, in GB. See diskSizeGb. Specify a value of up to 1000. If 0 is specified, Cloud Build will use a standard disk size. // +kubebuilder:validation:Optional DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` - // Machine type of a worker, such as n1-standard-1. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use n1-standard-1. + // Machine type of a worker, such as n1-standard-1. See machineType. If left blank, Cloud Build will use n1-standard-1. // +kubebuilder:validation:Optional MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` @@ -112,6 +133,8 @@ type WorkerPoolInitParameters struct { // Network configuration for the WorkerPool. Structure is documented below. NetworkConfig *NetworkConfigInitParameters `json:"networkConfig,omitempty" tf:"network_config,omitempty"` + PrivateServiceConnect *PrivateServiceConnectInitParameters `json:"privateServiceConnect,omitempty" tf:"private_service_connect,omitempty"` + // The project for the resource Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -145,6 +168,8 @@ type WorkerPoolObservation struct { // Network configuration for the WorkerPool. Structure is documented below. NetworkConfig *NetworkConfigObservation `json:"networkConfig,omitempty" tf:"network_config,omitempty"` + PrivateServiceConnect *PrivateServiceConnectObservation `json:"privateServiceConnect,omitempty" tf:"private_service_connect,omitempty"` + // The project for the resource Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -179,6 +204,9 @@ type WorkerPoolParameters struct { // +kubebuilder:validation:Optional NetworkConfig *NetworkConfigParameters `json:"networkConfig,omitempty" tf:"network_config,omitempty"` + // +kubebuilder:validation:Optional + PrivateServiceConnect *PrivateServiceConnectParameters `json:"privateServiceConnect,omitempty" tf:"private_service_connect,omitempty"` + // The project for the resource // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` diff --git a/apis/cloudfunctions2/v1beta2/zz_function_types.go b/apis/cloudfunctions2/v1beta2/zz_function_types.go index 4c558753b..c8f985f40 100755 --- a/apis/cloudfunctions2/v1beta2/zz_function_types.go +++ b/apis/cloudfunctions2/v1beta2/zz_function_types.go @@ -593,7 +593,7 @@ type RepoSourceInitParameters struct { // NOT match the revision regex. InvertRegex *bool `json:"invertRegex,omitempty" tf:"invert_regex,omitempty"` - // Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. + // Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` // Name of the Cloud Source Repository. @@ -618,7 +618,7 @@ type RepoSourceObservation struct { // NOT match the revision regex. InvertRegex *bool `json:"invertRegex,omitempty" tf:"invert_regex,omitempty"` - // Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. + // Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` // Name of the Cloud Source Repository. @@ -647,7 +647,7 @@ type RepoSourceParameters struct { // +kubebuilder:validation:Optional InvertRegex *bool `json:"invertRegex,omitempty" tf:"invert_regex,omitempty"` - // Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. + // Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. // +kubebuilder:validation:Optional ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` @@ -665,7 +665,7 @@ type SecretEnvironmentVariablesInitParameters struct { // Name of the environment variable. Key *string `json:"key,omitempty" tf:"key,omitempty"` - // Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. + // Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` // Name of the secret in secret manager (not the full resource name). @@ -689,7 +689,7 @@ type SecretEnvironmentVariablesObservation struct { // Name of the environment variable. Key *string `json:"key,omitempty" tf:"key,omitempty"` - // Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. + // Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` // Name of the secret in secret manager (not the full resource name). @@ -705,7 +705,7 @@ type SecretEnvironmentVariablesParameters struct { // +kubebuilder:validation:Optional Key *string `json:"key" tf:"key,omitempty"` - // Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. + // Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. // +kubebuilder:validation:Optional ProjectID *string `json:"projectId" tf:"project_id,omitempty"` @@ -732,7 +732,7 @@ type SecretVolumesInitParameters struct { // The path within the container to mount the secret volume. For example, setting the mountPath as /etc/secrets would mount the secret value files under the /etc/secrets directory. This directory will also be completely shadowed and unavailable to mount any other secrets. Recommended mount path: /etc/secrets MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` - // Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. + // Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` // Name of the secret in secret manager (not the full resource name). @@ -757,7 +757,7 @@ type SecretVolumesObservation struct { // The path within the container to mount the secret volume. For example, setting the mountPath as /etc/secrets would mount the secret value files under the /etc/secrets directory. This directory will also be completely shadowed and unavailable to mount any other secrets. Recommended mount path: /etc/secrets MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` - // Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. + // Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` // Name of the secret in secret manager (not the full resource name). @@ -774,7 +774,7 @@ type SecretVolumesParameters struct { // +kubebuilder:validation:Optional MountPath *string `json:"mountPath" tf:"mount_path,omitempty"` - // Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. + // Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. // +kubebuilder:validation:Optional ProjectID *string `json:"projectId" tf:"project_id,omitempty"` diff --git a/apis/cloudplatform/v1beta1/zz_folder_types.go b/apis/cloudplatform/v1beta1/zz_folder_types.go index b4f48eea4..9ee5ab60c 100755 --- a/apis/cloudplatform/v1beta1/zz_folder_types.go +++ b/apis/cloudplatform/v1beta1/zz_folder_types.go @@ -32,6 +32,10 @@ type FolderInitParameters struct { // Selector for a Folder in cloudplatform to populate parent. // +kubebuilder:validation:Optional ParentSelector *v1.Selector `json:"parentSelector,omitempty" tf:"-"` + + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` } type FolderObservation struct { @@ -40,6 +44,8 @@ type FolderObservation struct { // A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". CreateTime *string `json:"createTime,omitempty" tf:"create_time,omitempty"` + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // The folder’s display name. // A folder’s display name must be unique amongst its siblings, e.g. no two folders with the same parent can share the same display name. The display name must start and end with a letter or digit, may contain letters, digits, spaces, hyphens and underscores and can be no longer than 30 characters. DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` @@ -58,6 +64,10 @@ type FolderObservation struct { // The resource name of the parent Folder or Organization. // Must be of the form folders/{folder_id} or organizations/{org_id}. Parent *string `json:"parent,omitempty" tf:"parent,omitempty"` + + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` } type FolderParameters struct { @@ -81,6 +91,11 @@ type FolderParameters struct { // Selector for a Folder in cloudplatform to populate parent. // +kubebuilder:validation:Optional ParentSelector *v1.Selector `json:"parentSelector,omitempty" tf:"-"` + + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` } // FolderSpec defines the desired state of Folder diff --git a/apis/cloudplatform/v1beta1/zz_generated.deepcopy.go b/apis/cloudplatform/v1beta1/zz_generated.deepcopy.go index c20197320..ce824c6d7 100644 --- a/apis/cloudplatform/v1beta1/zz_generated.deepcopy.go +++ b/apis/cloudplatform/v1beta1/zz_generated.deepcopy.go @@ -481,6 +481,22 @@ func (in *FolderInitParameters) DeepCopyInto(out *FolderInitParameters) { *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderInitParameters. @@ -533,6 +549,11 @@ func (in *FolderObservation) DeepCopyInto(out *FolderObservation) { *out = new(string) **out = **in } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } if in.DisplayName != nil { in, out := &in.DisplayName, &out.DisplayName *out = new(string) @@ -563,6 +584,22 @@ func (in *FolderObservation) DeepCopyInto(out *FolderObservation) { *out = new(string) **out = **in } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderObservation. @@ -598,6 +635,22 @@ func (in *FolderParameters) DeepCopyInto(out *FolderParameters) { *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderParameters. @@ -2604,10 +2657,21 @@ func (in *ProjectInitParameters) DeepCopyInto(out *ProjectInitParameters) { *out = new(string) **out = **in } - if in.SkipDelete != nil { - in, out := &in.SkipDelete, &out.SkipDelete - *out = new(bool) - **out = **in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } } } @@ -2733,10 +2797,21 @@ func (in *ProjectObservation) DeepCopyInto(out *ProjectObservation) { *out = new(string) **out = **in } - if in.SkipDelete != nil { - in, out := &in.SkipDelete, &out.SkipDelete - *out = new(bool) - **out = **in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } } if in.TerraformLabels != nil { in, out := &in.TerraformLabels, &out.TerraformLabels @@ -2830,10 +2905,21 @@ func (in *ProjectParameters) DeepCopyInto(out *ProjectParameters) { *out = new(string) **out = **in } - if in.SkipDelete != nil { - in, out := &in.SkipDelete, &out.SkipDelete - *out = new(bool) - **out = **in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } } } diff --git a/apis/cloudplatform/v1beta1/zz_project_types.go b/apis/cloudplatform/v1beta1/zz_project_types.go index 63d546ff2..4ff8fba33 100755 --- a/apis/cloudplatform/v1beta1/zz_project_types.go +++ b/apis/cloudplatform/v1beta1/zz_project_types.go @@ -29,7 +29,7 @@ type ProjectInitParameters struct { BillingAccount *string `json:"billingAccount,omitempty" tf:"billing_account,omitempty"` // The deletion policy for the Project. Setting ABANDON allows the resource - // to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is DELETE. + // to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is PREVENT. DeletionPolicy *string `json:"deletionPolicy,omitempty" tf:"deletion_policy,omitempty"` // The numeric ID of the folder this project should be @@ -70,11 +70,9 @@ type ProjectInitParameters struct { // The project ID. Changing this forces a new project to be created. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` - // skip_delete is deprecated and will be - // removed in 6.0.0. Please use deletion_policy instead. A skip_delete value of false - // can be changed to a deletion_policy value of DELETE and a skip_delete value of true - // to a deletion_policy value of ABANDON for equivalent behavior. - SkipDelete *bool `json:"skipDelete,omitempty" tf:"skip_delete,omitempty"` + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` } type ProjectObservation struct { @@ -93,7 +91,7 @@ type ProjectObservation struct { BillingAccount *string `json:"billingAccount,omitempty" tf:"billing_account,omitempty"` // The deletion policy for the Project. Setting ABANDON allows the resource - // to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is DELETE. + // to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is PREVENT. DeletionPolicy *string `json:"deletionPolicy,omitempty" tf:"deletion_policy,omitempty"` // +mapType=granular @@ -133,11 +131,9 @@ type ProjectObservation struct { // The project ID. Changing this forces a new project to be created. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` - // skip_delete is deprecated and will be - // removed in 6.0.0. Please use deletion_policy instead. A skip_delete value of false - // can be changed to a deletion_policy value of DELETE and a skip_delete value of true - // to a deletion_policy value of ABANDON for equivalent behavior. - SkipDelete *bool `json:"skipDelete,omitempty" tf:"skip_delete,omitempty"` + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // The combination of labels configured directly on the resource and default labels configured on the provider. // +mapType=granular @@ -162,7 +158,7 @@ type ProjectParameters struct { BillingAccount *string `json:"billingAccount,omitempty" tf:"billing_account,omitempty"` // The deletion policy for the Project. Setting ABANDON allows the resource - // to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is DELETE. + // to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is PREVENT. // +kubebuilder:validation:Optional DeletionPolicy *string `json:"deletionPolicy,omitempty" tf:"deletion_policy,omitempty"` @@ -209,12 +205,10 @@ type ProjectParameters struct { // +kubebuilder:validation:Optional ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` - // skip_delete is deprecated and will be - // removed in 6.0.0. Please use deletion_policy instead. A skip_delete value of false - // can be changed to a deletion_policy value of DELETE and a skip_delete value of true - // to a deletion_policy value of ABANDON for equivalent behavior. + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource. // +kubebuilder:validation:Optional - SkipDelete *bool `json:"skipDelete,omitempty" tf:"skip_delete,omitempty"` + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` } // ProjectSpec defines the desired state of Project diff --git a/apis/cloudrun/v1beta2/zz_generated.deepcopy.go b/apis/cloudrun/v1beta2/zz_generated.deepcopy.go index dac166684..7d4f0219b 100644 --- a/apis/cloudrun/v1beta2/zz_generated.deepcopy.go +++ b/apis/cloudrun/v1beta2/zz_generated.deepcopy.go @@ -1524,6 +1524,129 @@ func (in *ContainersVolumeMountsParameters) DeepCopy() *ContainersVolumeMountsPa return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsiInitParameters) DeepCopyInto(out *CsiInitParameters) { + *out = *in + if in.Driver != nil { + in, out := &in.Driver, &out.Driver + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsiInitParameters. +func (in *CsiInitParameters) DeepCopy() *CsiInitParameters { + if in == nil { + return nil + } + out := new(CsiInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsiObservation) DeepCopyInto(out *CsiObservation) { + *out = *in + if in.Driver != nil { + in, out := &in.Driver, &out.Driver + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsiObservation. +func (in *CsiObservation) DeepCopy() *CsiObservation { + if in == nil { + return nil + } + out := new(CsiObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsiParameters) DeepCopyInto(out *CsiParameters) { + *out = *in + if in.Driver != nil { + in, out := &in.Driver, &out.Driver + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsiParameters. +func (in *CsiParameters) DeepCopy() *CsiParameters { + if in == nil { + return nil + } + out := new(CsiParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DomainMapping) DeepCopyInto(out *DomainMapping) { *out = *in @@ -1750,6 +1873,81 @@ func (in *DomainMappingStatus) DeepCopy() *DomainMappingStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirInitParameters) DeepCopyInto(out *EmptyDirInitParameters) { + *out = *in + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirInitParameters. +func (in *EmptyDirInitParameters) DeepCopy() *EmptyDirInitParameters { + if in == nil { + return nil + } + out := new(EmptyDirInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirObservation) DeepCopyInto(out *EmptyDirObservation) { + *out = *in + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirObservation. +func (in *EmptyDirObservation) DeepCopy() *EmptyDirObservation { + if in == nil { + return nil + } + out := new(EmptyDirObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirParameters) DeepCopyInto(out *EmptyDirParameters) { + *out = *in + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirParameters. +func (in *EmptyDirParameters) DeepCopy() *EmptyDirParameters { + if in == nil { + return nil + } + out := new(EmptyDirParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EnvFromInitParameters) DeepCopyInto(out *EnvFromInitParameters) { *out = *in @@ -2168,16 +2366,6 @@ func (in *GcsInitParameters) DeepCopyInto(out *GcsInitParameters) { *out = new(string) **out = **in } - if in.BucketRef != nil { - in, out := &in.BucketRef, &out.BucketRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.BucketSelector != nil { - in, out := &in.BucketSelector, &out.BucketSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } if in.ReadOnly != nil { in, out := &in.ReadOnly, &out.ReadOnly *out = new(bool) @@ -2228,16 +2416,6 @@ func (in *GcsParameters) DeepCopyInto(out *GcsParameters) { *out = new(string) **out = **in } - if in.BucketRef != nil { - in, out := &in.BucketRef, &out.BucketRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.BucketSelector != nil { - in, out := &in.BucketSelector, &out.BucketSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } if in.ReadOnly != nil { in, out := &in.ReadOnly, &out.ReadOnly *out = new(bool) @@ -3888,11 +4066,6 @@ func (in *ResourcesParameters) DeepCopy() *ResourcesParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ScalingInitParameters) DeepCopyInto(out *ScalingInitParameters) { *out = *in - if in.MaxInstanceCount != nil { - in, out := &in.MaxInstanceCount, &out.MaxInstanceCount - *out = new(float64) - **out = **in - } if in.MinInstanceCount != nil { in, out := &in.MinInstanceCount, &out.MinInstanceCount *out = new(float64) @@ -3913,11 +4086,6 @@ func (in *ScalingInitParameters) DeepCopy() *ScalingInitParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ScalingObservation) DeepCopyInto(out *ScalingObservation) { *out = *in - if in.MaxInstanceCount != nil { - in, out := &in.MaxInstanceCount, &out.MaxInstanceCount - *out = new(float64) - **out = **in - } if in.MinInstanceCount != nil { in, out := &in.MinInstanceCount, &out.MinInstanceCount *out = new(float64) @@ -3938,11 +4106,6 @@ func (in *ScalingObservation) DeepCopy() *ScalingObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ScalingParameters) DeepCopyInto(out *ScalingParameters) { *out = *in - if in.MaxInstanceCount != nil { - in, out := &in.MaxInstanceCount, &out.MaxInstanceCount - *out = new(float64) - **out = **in - } if in.MinInstanceCount != nil { in, out := &in.MinInstanceCount, &out.MinInstanceCount *out = new(float64) @@ -6973,6 +7136,81 @@ func (in *TemplateParameters) DeepCopy() *TemplateParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateScalingInitParameters) DeepCopyInto(out *TemplateScalingInitParameters) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateScalingInitParameters. +func (in *TemplateScalingInitParameters) DeepCopy() *TemplateScalingInitParameters { + if in == nil { + return nil + } + out := new(TemplateScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateScalingObservation) DeepCopyInto(out *TemplateScalingObservation) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateScalingObservation. +func (in *TemplateScalingObservation) DeepCopy() *TemplateScalingObservation { + if in == nil { + return nil + } + out := new(TemplateScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateScalingParameters) DeepCopyInto(out *TemplateScalingParameters) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateScalingParameters. +func (in *TemplateScalingParameters) DeepCopy() *TemplateScalingParameters { + if in == nil { + return nil + } + out := new(TemplateScalingParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateSpecInitParameters) DeepCopyInto(out *TemplateSpecInitParameters) { *out = *in @@ -7384,15 +7622,105 @@ func (in *TemplateVPCAccessParameters) DeepCopy() *TemplateVPCAccessParameters { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TemplateVolumesInitParameters) DeepCopyInto(out *TemplateVolumesInitParameters) { +func (in *TemplateVolumesEmptyDirInitParameters) DeepCopyInto(out *TemplateVolumesEmptyDirInitParameters) { *out = *in - if in.CloudSQLInstance != nil { - in, out := &in.CloudSQLInstance, &out.CloudSQLInstance - *out = new(CloudSQLInstanceInitParameters) - (*in).DeepCopyInto(*out) - } - if in.Name != nil { - in, out := &in.Name, &out.Name + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateVolumesEmptyDirInitParameters. +func (in *TemplateVolumesEmptyDirInitParameters) DeepCopy() *TemplateVolumesEmptyDirInitParameters { + if in == nil { + return nil + } + out := new(TemplateVolumesEmptyDirInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateVolumesEmptyDirObservation) DeepCopyInto(out *TemplateVolumesEmptyDirObservation) { + *out = *in + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateVolumesEmptyDirObservation. +func (in *TemplateVolumesEmptyDirObservation) DeepCopy() *TemplateVolumesEmptyDirObservation { + if in == nil { + return nil + } + out := new(TemplateVolumesEmptyDirObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateVolumesEmptyDirParameters) DeepCopyInto(out *TemplateVolumesEmptyDirParameters) { + *out = *in + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateVolumesEmptyDirParameters. +func (in *TemplateVolumesEmptyDirParameters) DeepCopy() *TemplateVolumesEmptyDirParameters { + if in == nil { + return nil + } + out := new(TemplateVolumesEmptyDirParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateVolumesInitParameters) DeepCopyInto(out *TemplateVolumesInitParameters) { + *out = *in + if in.CloudSQLInstance != nil { + in, out := &in.CloudSQLInstance, &out.CloudSQLInstance + *out = new(CloudSQLInstanceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(VolumesEmptyDirInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Gcs != nil { + in, out := &in.Gcs, &out.Gcs + *out = new(GcsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(VolumesNFSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name *out = new(string) **out = **in } @@ -7413,6 +7741,96 @@ func (in *TemplateVolumesInitParameters) DeepCopy() *TemplateVolumesInitParamete return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateVolumesNFSInitParameters) DeepCopyInto(out *TemplateVolumesNFSInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateVolumesNFSInitParameters. +func (in *TemplateVolumesNFSInitParameters) DeepCopy() *TemplateVolumesNFSInitParameters { + if in == nil { + return nil + } + out := new(TemplateVolumesNFSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateVolumesNFSObservation) DeepCopyInto(out *TemplateVolumesNFSObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateVolumesNFSObservation. +func (in *TemplateVolumesNFSObservation) DeepCopy() *TemplateVolumesNFSObservation { + if in == nil { + return nil + } + out := new(TemplateVolumesNFSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateVolumesNFSParameters) DeepCopyInto(out *TemplateVolumesNFSParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateVolumesNFSParameters. +func (in *TemplateVolumesNFSParameters) DeepCopy() *TemplateVolumesNFSParameters { + if in == nil { + return nil + } + out := new(TemplateVolumesNFSParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateVolumesObservation) DeepCopyInto(out *TemplateVolumesObservation) { *out = *in @@ -7421,6 +7839,21 @@ func (in *TemplateVolumesObservation) DeepCopyInto(out *TemplateVolumesObservati *out = new(CloudSQLInstanceObservation) (*in).DeepCopyInto(*out) } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(VolumesEmptyDirObservation) + (*in).DeepCopyInto(*out) + } + if in.Gcs != nil { + in, out := &in.Gcs, &out.Gcs + *out = new(GcsObservation) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(VolumesNFSObservation) + (*in).DeepCopyInto(*out) + } if in.Name != nil { in, out := &in.Name, &out.Name *out = new(string) @@ -7451,6 +7884,21 @@ func (in *TemplateVolumesParameters) DeepCopyInto(out *TemplateVolumesParameters *out = new(CloudSQLInstanceParameters) (*in).DeepCopyInto(*out) } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(VolumesEmptyDirParameters) + (*in).DeepCopyInto(*out) + } + if in.Gcs != nil { + in, out := &in.Gcs, &out.Gcs + *out = new(GcsParameters) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(VolumesNFSParameters) + (*in).DeepCopyInto(*out) + } if in.Name != nil { in, out := &in.Name, &out.Name *out = new(string) @@ -8091,6 +8539,11 @@ func (in *V2JobObservation) DeepCopyInto(out *V2JobObservation) { *out = new(string) **out = **in } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } if in.EffectiveAnnotations != nil { in, out := &in.EffectiveAnnotations, &out.EffectiveAnnotations *out = make(map[string]*string, len(*in)) @@ -8811,6 +9264,11 @@ func (in *V2ServiceInitParameters) DeepCopyInto(out *V2ServiceInitParameters) { *out = new(string) **out = **in } + if in.InvokerIAMDisabled != nil { + in, out := &in.InvokerIAMDisabled, &out.InvokerIAMDisabled + *out = new(bool) + **out = **in + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -8837,6 +9295,11 @@ func (in *V2ServiceInitParameters) DeepCopyInto(out *V2ServiceInitParameters) { *out = new(string) **out = **in } + if in.Scaling != nil { + in, out := &in.Scaling, &out.Scaling + *out = new(ScalingInitParameters) + (*in).DeepCopyInto(*out) + } if in.Template != nil { in, out := &in.Template, &out.Template *out = new(V2ServiceTemplateInitParameters) @@ -8960,6 +9423,11 @@ func (in *V2ServiceObservation) DeepCopyInto(out *V2ServiceObservation) { *out = new(string) **out = **in } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -9022,6 +9490,11 @@ func (in *V2ServiceObservation) DeepCopyInto(out *V2ServiceObservation) { *out = new(string) **out = **in } + if in.InvokerIAMDisabled != nil { + in, out := &in.InvokerIAMDisabled, &out.InvokerIAMDisabled + *out = new(bool) + **out = **in + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -9078,6 +9551,11 @@ func (in *V2ServiceObservation) DeepCopyInto(out *V2ServiceObservation) { *out = new(bool) **out = **in } + if in.Scaling != nil { + in, out := &in.Scaling, &out.Scaling + *out = new(ScalingObservation) + (*in).DeepCopyInto(*out) + } if in.Template != nil { in, out := &in.Template, &out.Template *out = new(V2ServiceTemplateObservation) @@ -9135,6 +9613,17 @@ func (in *V2ServiceObservation) DeepCopyInto(out *V2ServiceObservation) { *out = new(string) **out = **in } + if in.Urls != nil { + in, out := &in.Urls, &out.Urls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new V2ServiceObservation. @@ -9202,6 +9691,11 @@ func (in *V2ServiceParameters) DeepCopyInto(out *V2ServiceParameters) { *out = new(string) **out = **in } + if in.InvokerIAMDisabled != nil { + in, out := &in.InvokerIAMDisabled, &out.InvokerIAMDisabled + *out = new(bool) + **out = **in + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -9233,6 +9727,11 @@ func (in *V2ServiceParameters) DeepCopyInto(out *V2ServiceParameters) { *out = new(string) **out = **in } + if in.Scaling != nil { + in, out := &in.Scaling, &out.Scaling + *out = new(ScalingParameters) + (*in).DeepCopyInto(*out) + } if in.Template != nil { in, out := &in.Template, &out.Template *out = new(V2ServiceTemplateParameters) @@ -9647,7 +10146,7 @@ func (in *V2ServiceTemplateInitParameters) DeepCopyInto(out *V2ServiceTemplateIn } if in.Scaling != nil { in, out := &in.Scaling, &out.Scaling - *out = new(ScalingInitParameters) + *out = new(TemplateScalingInitParameters) (*in).DeepCopyInto(*out) } if in.ServiceAccount != nil { @@ -9753,7 +10252,7 @@ func (in *V2ServiceTemplateObservation) DeepCopyInto(out *V2ServiceTemplateObser } if in.Scaling != nil { in, out := &in.Scaling, &out.Scaling - *out = new(ScalingObservation) + *out = new(TemplateScalingObservation) (*in).DeepCopyInto(*out) } if in.ServiceAccount != nil { @@ -9859,7 +10358,7 @@ func (in *V2ServiceTemplateParameters) DeepCopyInto(out *V2ServiceTemplateParame } if in.Scaling != nil { in, out := &in.Scaling, &out.Scaling - *out = new(ScalingParameters) + *out = new(TemplateScalingParameters) (*in).DeepCopyInto(*out) } if in.ServiceAccount != nil { @@ -9909,14 +10408,19 @@ func (in *V2ServiceTemplateVolumesInitParameters) DeepCopyInto(out *V2ServiceTem *out = new(VolumesCloudSQLInstanceInitParameters) (*in).DeepCopyInto(*out) } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(TemplateVolumesEmptyDirInitParameters) + (*in).DeepCopyInto(*out) + } if in.Gcs != nil { in, out := &in.Gcs, &out.Gcs - *out = new(GcsInitParameters) + *out = new(VolumesGcsInitParameters) (*in).DeepCopyInto(*out) } if in.NFS != nil { in, out := &in.NFS, &out.NFS - *out = new(NFSInitParameters) + *out = new(TemplateVolumesNFSInitParameters) (*in).DeepCopyInto(*out) } if in.Name != nil { @@ -9949,14 +10453,19 @@ func (in *V2ServiceTemplateVolumesObservation) DeepCopyInto(out *V2ServiceTempla *out = new(VolumesCloudSQLInstanceObservation) (*in).DeepCopyInto(*out) } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(TemplateVolumesEmptyDirObservation) + (*in).DeepCopyInto(*out) + } if in.Gcs != nil { in, out := &in.Gcs, &out.Gcs - *out = new(GcsObservation) + *out = new(VolumesGcsObservation) (*in).DeepCopyInto(*out) } if in.NFS != nil { in, out := &in.NFS, &out.NFS - *out = new(NFSObservation) + *out = new(TemplateVolumesNFSObservation) (*in).DeepCopyInto(*out) } if in.Name != nil { @@ -9989,14 +10498,19 @@ func (in *V2ServiceTemplateVolumesParameters) DeepCopyInto(out *V2ServiceTemplat *out = new(VolumesCloudSQLInstanceParameters) (*in).DeepCopyInto(*out) } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(TemplateVolumesEmptyDirParameters) + (*in).DeepCopyInto(*out) + } if in.Gcs != nil { in, out := &in.Gcs, &out.Gcs - *out = new(GcsParameters) + *out = new(VolumesGcsParameters) (*in).DeepCopyInto(*out) } if in.NFS != nil { in, out := &in.NFS, &out.NFS - *out = new(NFSParameters) + *out = new(TemplateVolumesNFSParameters) (*in).DeepCopyInto(*out) } if in.Name != nil { @@ -10807,9 +11321,194 @@ func (in *VolumesCloudSQLInstanceParameters) DeepCopy() *VolumesCloudSQLInstance return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesEmptyDirInitParameters) DeepCopyInto(out *VolumesEmptyDirInitParameters) { + *out = *in + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesEmptyDirInitParameters. +func (in *VolumesEmptyDirInitParameters) DeepCopy() *VolumesEmptyDirInitParameters { + if in == nil { + return nil + } + out := new(VolumesEmptyDirInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesEmptyDirObservation) DeepCopyInto(out *VolumesEmptyDirObservation) { + *out = *in + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesEmptyDirObservation. +func (in *VolumesEmptyDirObservation) DeepCopy() *VolumesEmptyDirObservation { + if in == nil { + return nil + } + out := new(VolumesEmptyDirObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesEmptyDirParameters) DeepCopyInto(out *VolumesEmptyDirParameters) { + *out = *in + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesEmptyDirParameters. +func (in *VolumesEmptyDirParameters) DeepCopy() *VolumesEmptyDirParameters { + if in == nil { + return nil + } + out := new(VolumesEmptyDirParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesGcsInitParameters) DeepCopyInto(out *VolumesGcsInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesGcsInitParameters. +func (in *VolumesGcsInitParameters) DeepCopy() *VolumesGcsInitParameters { + if in == nil { + return nil + } + out := new(VolumesGcsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesGcsObservation) DeepCopyInto(out *VolumesGcsObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesGcsObservation. +func (in *VolumesGcsObservation) DeepCopy() *VolumesGcsObservation { + if in == nil { + return nil + } + out := new(VolumesGcsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesGcsParameters) DeepCopyInto(out *VolumesGcsParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesGcsParameters. +func (in *VolumesGcsParameters) DeepCopy() *VolumesGcsParameters { + if in == nil { + return nil + } + out := new(VolumesGcsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumesInitParameters) DeepCopyInto(out *VolumesInitParameters) { *out = *in + if in.Csi != nil { + in, out := &in.Csi, &out.Csi + *out = new(CsiInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSInitParameters) + (*in).DeepCopyInto(*out) + } if in.Name != nil { in, out := &in.Name, &out.Name *out = new(string) @@ -10832,9 +11531,114 @@ func (in *VolumesInitParameters) DeepCopy() *VolumesInitParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesNFSInitParameters) DeepCopyInto(out *VolumesNFSInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesNFSInitParameters. +func (in *VolumesNFSInitParameters) DeepCopy() *VolumesNFSInitParameters { + if in == nil { + return nil + } + out := new(VolumesNFSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesNFSObservation) DeepCopyInto(out *VolumesNFSObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesNFSObservation. +func (in *VolumesNFSObservation) DeepCopy() *VolumesNFSObservation { + if in == nil { + return nil + } + out := new(VolumesNFSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesNFSParameters) DeepCopyInto(out *VolumesNFSParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesNFSParameters. +func (in *VolumesNFSParameters) DeepCopy() *VolumesNFSParameters { + if in == nil { + return nil + } + out := new(VolumesNFSParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumesObservation) DeepCopyInto(out *VolumesObservation) { *out = *in + if in.Csi != nil { + in, out := &in.Csi, &out.Csi + *out = new(CsiObservation) + (*in).DeepCopyInto(*out) + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirObservation) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSObservation) + (*in).DeepCopyInto(*out) + } if in.Name != nil { in, out := &in.Name, &out.Name *out = new(string) @@ -10860,6 +11664,21 @@ func (in *VolumesObservation) DeepCopy() *VolumesObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumesParameters) DeepCopyInto(out *VolumesParameters) { *out = *in + if in.Csi != nil { + in, out := &in.Csi, &out.Csi + *out = new(CsiParameters) + (*in).DeepCopyInto(*out) + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirParameters) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSParameters) + (*in).DeepCopyInto(*out) + } if in.Name != nil { in, out := &in.Name, &out.Name *out = new(string) diff --git a/apis/cloudrun/v1beta2/zz_service_types.go b/apis/cloudrun/v1beta2/zz_service_types.go index 4bdd29aec..d59339a34 100755 --- a/apis/cloudrun/v1beta2/zz_service_types.go +++ b/apis/cloudrun/v1beta2/zz_service_types.go @@ -231,6 +231,77 @@ type ContainersParameters struct { WorkingDir *string `json:"workingDir,omitempty" tf:"working_dir,omitempty"` } +type CsiInitParameters struct { + + // Unique name representing the type of file system to be created. Cloud Run supports the following values: + Driver *string `json:"driver,omitempty" tf:"driver,omitempty"` + + // If true, mount the NFS volume as read only in all mounts. Defaults to false. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // Driver-specific attributes. The following options are supported for available drivers: + // +mapType=granular + VolumeAttributes map[string]*string `json:"volumeAttributes,omitempty" tf:"volume_attributes,omitempty"` +} + +type CsiObservation struct { + + // Unique name representing the type of file system to be created. Cloud Run supports the following values: + Driver *string `json:"driver,omitempty" tf:"driver,omitempty"` + + // If true, mount the NFS volume as read only in all mounts. Defaults to false. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // Driver-specific attributes. The following options are supported for available drivers: + // +mapType=granular + VolumeAttributes map[string]*string `json:"volumeAttributes,omitempty" tf:"volume_attributes,omitempty"` +} + +type CsiParameters struct { + + // Unique name representing the type of file system to be created. Cloud Run supports the following values: + // +kubebuilder:validation:Optional + Driver *string `json:"driver" tf:"driver,omitempty"` + + // If true, mount the NFS volume as read only in all mounts. Defaults to false. + // +kubebuilder:validation:Optional + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // Driver-specific attributes. The following options are supported for available drivers: + // +kubebuilder:validation:Optional + // +mapType=granular + VolumeAttributes map[string]*string `json:"volumeAttributes,omitempty" tf:"volume_attributes,omitempty"` +} + +type EmptyDirInitParameters struct { + + // The medium on which the data is stored. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. + SizeLimit *string `json:"sizeLimit,omitempty" tf:"size_limit,omitempty"` +} + +type EmptyDirObservation struct { + + // The medium on which the data is stored. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. + SizeLimit *string `json:"sizeLimit,omitempty" tf:"size_limit,omitempty"` +} + +type EmptyDirParameters struct { + + // The medium on which the data is stored. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. + // +kubebuilder:validation:Optional + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. + // +kubebuilder:validation:Optional + SizeLimit *string `json:"sizeLimit,omitempty" tf:"size_limit,omitempty"` +} + type EnvFromInitParameters struct { // The ConfigMap to select from. @@ -622,6 +693,45 @@ type LocalObjectReferenceParameters struct { Name *string `json:"name" tf:"name,omitempty"` } +type NFSInitParameters struct { + + // Path exported by the NFS server + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // If true, mount the NFS volume as read only in all mounts. Defaults to false. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // IP address or hostname of the NFS server + Server *string `json:"server,omitempty" tf:"server,omitempty"` +} + +type NFSObservation struct { + + // Path exported by the NFS server + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // If true, mount the NFS volume as read only in all mounts. Defaults to false. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // IP address or hostname of the NFS server + Server *string `json:"server,omitempty" tf:"server,omitempty"` +} + +type NFSParameters struct { + + // Path exported by the NFS server + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // If true, mount the NFS volume as read only in all mounts. Defaults to false. + // +kubebuilder:validation:Optional + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // IP address or hostname of the NFS server + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` +} + type PortsInitParameters struct { // Port number the container listens on. This must be a valid port number (between 1 and 65535). Defaults to "8080". @@ -1856,6 +1966,19 @@ type VolumeMountsParameters struct { type VolumesInitParameters struct { + // A filesystem specified by the Container Storage Interface (CSI). + // Structure is documented below. + Csi *CsiInitParameters `json:"csi,omitempty" tf:"csi,omitempty"` + + // Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). + // Structure is documented below. + EmptyDir *EmptyDirInitParameters `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // A filesystem backed by a Network File System share. This filesystem requires the + // run.googleapis.com/execution-environment annotation to be unset or set to "gen2" + // Structure is documented below. + NFS *NFSInitParameters `json:"nfs,omitempty" tf:"nfs,omitempty"` + // Volume's name. Name *string `json:"name,omitempty" tf:"name,omitempty"` @@ -1868,6 +1991,19 @@ type VolumesInitParameters struct { type VolumesObservation struct { + // A filesystem specified by the Container Storage Interface (CSI). + // Structure is documented below. + Csi *CsiObservation `json:"csi,omitempty" tf:"csi,omitempty"` + + // Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). + // Structure is documented below. + EmptyDir *EmptyDirObservation `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // A filesystem backed by a Network File System share. This filesystem requires the + // run.googleapis.com/execution-environment annotation to be unset or set to "gen2" + // Structure is documented below. + NFS *NFSObservation `json:"nfs,omitempty" tf:"nfs,omitempty"` + // Volume's name. Name *string `json:"name,omitempty" tf:"name,omitempty"` @@ -1880,6 +2016,22 @@ type VolumesObservation struct { type VolumesParameters struct { + // A filesystem specified by the Container Storage Interface (CSI). + // Structure is documented below. + // +kubebuilder:validation:Optional + Csi *CsiParameters `json:"csi,omitempty" tf:"csi,omitempty"` + + // Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). + // Structure is documented below. + // +kubebuilder:validation:Optional + EmptyDir *EmptyDirParameters `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // A filesystem backed by a Network File System share. This filesystem requires the + // run.googleapis.com/execution-environment annotation to be unset or set to "gen2" + // Structure is documented below. + // +kubebuilder:validation:Optional + NFS *NFSParameters `json:"nfs,omitempty" tf:"nfs,omitempty"` + // Volume's name. // +kubebuilder:validation:Optional Name *string `json:"name" tf:"name,omitempty"` diff --git a/apis/cloudrun/v1beta2/zz_v2job_types.go b/apis/cloudrun/v1beta2/zz_v2job_types.go index cae5ab573..5b8050e21 100755 --- a/apis/cloudrun/v1beta2/zz_v2job_types.go +++ b/apis/cloudrun/v1beta2/zz_v2job_types.go @@ -213,6 +213,35 @@ type ContainersVolumeMountsParameters struct { Name *string `json:"name" tf:"name,omitempty"` } +type GcsInitParameters struct { + + // Name of the cloud storage bucket to back the volume. The resource service account must have permission to access the bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // If true, mount this volume as read-only in all mounts. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type GcsObservation struct { + + // Name of the cloud storage bucket to back the volume. The resource service account must have permission to access the bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // If true, mount this volume as read-only in all mounts. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type GcsParameters struct { + + // Name of the cloud storage bucket to back the volume. The resource service account must have permission to access the bucket. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` + + // If true, mount this volume as read-only in all mounts. + // +kubebuilder:validation:Optional + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + type LatestCreatedExecutionInitParameters struct { } @@ -550,6 +579,18 @@ type TemplateVolumesInitParameters struct { // Structure is documented below. CloudSQLInstance *CloudSQLInstanceInitParameters `json:"cloudSqlInstance,omitempty" tf:"cloud_sql_instance,omitempty"` + // Ephemeral storage used as a shared volume. + // Structure is documented below. + EmptyDir *VolumesEmptyDirInitParameters `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // Cloud Storage bucket mounted as a volume using GCSFuse. + // Structure is documented below. + Gcs *GcsInitParameters `json:"gcs,omitempty" tf:"gcs,omitempty"` + + // NFS share mounted as a volume. + // Structure is documented below. + NFS *VolumesNFSInitParameters `json:"nfs,omitempty" tf:"nfs,omitempty"` + // Volume's name. Name *string `json:"name,omitempty" tf:"name,omitempty"` @@ -564,6 +605,18 @@ type TemplateVolumesObservation struct { // Structure is documented below. CloudSQLInstance *CloudSQLInstanceObservation `json:"cloudSqlInstance,omitempty" tf:"cloud_sql_instance,omitempty"` + // Ephemeral storage used as a shared volume. + // Structure is documented below. + EmptyDir *VolumesEmptyDirObservation `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // Cloud Storage bucket mounted as a volume using GCSFuse. + // Structure is documented below. + Gcs *GcsObservation `json:"gcs,omitempty" tf:"gcs,omitempty"` + + // NFS share mounted as a volume. + // Structure is documented below. + NFS *VolumesNFSObservation `json:"nfs,omitempty" tf:"nfs,omitempty"` + // Volume's name. Name *string `json:"name,omitempty" tf:"name,omitempty"` @@ -579,6 +632,21 @@ type TemplateVolumesParameters struct { // +kubebuilder:validation:Optional CloudSQLInstance *CloudSQLInstanceParameters `json:"cloudSqlInstance,omitempty" tf:"cloud_sql_instance,omitempty"` + // Ephemeral storage used as a shared volume. + // Structure is documented below. + // +kubebuilder:validation:Optional + EmptyDir *VolumesEmptyDirParameters `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // Cloud Storage bucket mounted as a volume using GCSFuse. + // Structure is documented below. + // +kubebuilder:validation:Optional + Gcs *GcsParameters `json:"gcs,omitempty" tf:"gcs,omitempty"` + + // NFS share mounted as a volume. + // Structure is documented below. + // +kubebuilder:validation:Optional + NFS *VolumesNFSParameters `json:"nfs,omitempty" tf:"nfs,omitempty"` + // Volume's name. // +kubebuilder:validation:Optional Name *string `json:"name" tf:"name,omitempty"` @@ -747,6 +815,10 @@ type V2JobObservation struct { // The deletion time. DeleteTime *string `json:"deleteTime,omitempty" tf:"delete_time,omitempty"` + // Defaults to true. + // When the field is set to false, deleting the job is allowed. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // +mapType=granular EffectiveAnnotations map[string]*string `json:"effectiveAnnotations,omitempty" tf:"effective_annotations,omitempty"` @@ -759,7 +831,7 @@ type V2JobObservation struct { // Number of executions created for this job. ExecutionCount *float64 `json:"executionCount,omitempty" tf:"execution_count,omitempty"` - // For a deleted resource, the time after which it will be permamently deleted. + // For a deleted resource, the time after which it will be permanently deleted. ExpireTime *string `json:"expireTime,omitempty" tf:"expire_time,omitempty"` // A number that monotonically increases every time the user modifies the desired state. @@ -1085,6 +1157,80 @@ type ValueSourceSecretKeyRefParameters struct { Version *string `json:"version" tf:"version,omitempty"` } +type VolumesEmptyDirInitParameters struct { + + // The different types of medium supported for EmptyDir. + // Default value is MEMORY. + // Possible values are: MEMORY. + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. + SizeLimit *string `json:"sizeLimit,omitempty" tf:"size_limit,omitempty"` +} + +type VolumesEmptyDirObservation struct { + + // The different types of medium supported for EmptyDir. + // Default value is MEMORY. + // Possible values are: MEMORY. + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. + SizeLimit *string `json:"sizeLimit,omitempty" tf:"size_limit,omitempty"` +} + +type VolumesEmptyDirParameters struct { + + // The different types of medium supported for EmptyDir. + // Default value is MEMORY. + // Possible values are: MEMORY. + // +kubebuilder:validation:Optional + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. + // +kubebuilder:validation:Optional + SizeLimit *string `json:"sizeLimit,omitempty" tf:"size_limit,omitempty"` +} + +type VolumesNFSInitParameters struct { + + // Path that is exported by the NFS server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // If true, mount this volume as read-only in all mounts. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // Hostname or IP address of the NFS server. + Server *string `json:"server,omitempty" tf:"server,omitempty"` +} + +type VolumesNFSObservation struct { + + // Path that is exported by the NFS server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // If true, mount this volume as read-only in all mounts. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // Hostname or IP address of the NFS server. + Server *string `json:"server,omitempty" tf:"server,omitempty"` +} + +type VolumesNFSParameters struct { + + // Path that is exported by the NFS server. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // If true, mount this volume as read-only in all mounts. + // +kubebuilder:validation:Optional + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // Hostname or IP address of the NFS server. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` +} + type VolumesSecretInitParameters struct { // Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. diff --git a/apis/cloudrun/v1beta2/zz_v2service_types.go b/apis/cloudrun/v1beta2/zz_v2service_types.go index 2c04f178f..520d69222 100755 --- a/apis/cloudrun/v1beta2/zz_v2service_types.go +++ b/apis/cloudrun/v1beta2/zz_v2service_types.go @@ -350,53 +350,6 @@ type EnvValueSourceSecretKeyRefParameters struct { Version *string `json:"version,omitempty" tf:"version,omitempty"` } -type GcsInitParameters struct { - - // GCS Bucket name - // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/storage/v1beta2.Bucket - Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` - - // Reference to a Bucket in storage to populate bucket. - // +kubebuilder:validation:Optional - BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` - - // Selector for a Bucket in storage to populate bucket. - // +kubebuilder:validation:Optional - BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` - - // If true, mount the NFS volume as read only - ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` -} - -type GcsObservation struct { - - // GCS Bucket name - Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` - - // If true, mount the NFS volume as read only - ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` -} - -type GcsParameters struct { - - // GCS Bucket name - // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/storage/v1beta2.Bucket - // +kubebuilder:validation:Optional - Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` - - // Reference to a Bucket in storage to populate bucket. - // +kubebuilder:validation:Optional - BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` - - // Selector for a Bucket in storage to populate bucket. - // +kubebuilder:validation:Optional - BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` - - // If true, mount the NFS volume as read only - // +kubebuilder:validation:Optional - ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` -} - type LivenessProbeGRPCInitParameters struct { // Port number to access on the container. Must be in the range 1 to 65535. @@ -540,70 +493,21 @@ type LivenessProbeTCPSocketParameters struct { Port *float64 `json:"port" tf:"port,omitempty"` } -type NFSInitParameters struct { - - // Path that is exported by the NFS server. - Path *string `json:"path,omitempty" tf:"path,omitempty"` - - // If true, mount the NFS volume as read only - ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` - - // Hostname or IP address of the NFS server - Server *string `json:"server,omitempty" tf:"server,omitempty"` -} - -type NFSObservation struct { - - // Path that is exported by the NFS server. - Path *string `json:"path,omitempty" tf:"path,omitempty"` - - // If true, mount the NFS volume as read only - ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` - - // Hostname or IP address of the NFS server - Server *string `json:"server,omitempty" tf:"server,omitempty"` -} - -type NFSParameters struct { - - // Path that is exported by the NFS server. - // +kubebuilder:validation:Optional - Path *string `json:"path" tf:"path,omitempty"` - - // If true, mount the NFS volume as read only - // +kubebuilder:validation:Optional - ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` - - // Hostname or IP address of the NFS server - // +kubebuilder:validation:Optional - Server *string `json:"server" tf:"server,omitempty"` -} - type ScalingInitParameters struct { - // Maximum number of serving instances that this resource should have. - MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` - - // Minimum number of serving instances that this resource should have. + // Minimum number of serving instances that this resource should have. Defaults to 0. Must not be greater than maximum instance count. MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` } type ScalingObservation struct { - // Maximum number of serving instances that this resource should have. - MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` - - // Minimum number of serving instances that this resource should have. + // Minimum number of serving instances that this resource should have. Defaults to 0. Must not be greater than maximum instance count. MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` } type ScalingParameters struct { - // Maximum number of serving instances that this resource should have. - // +kubebuilder:validation:Optional - MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` - - // Minimum number of serving instances that this resource should have. + // Minimum number of serving instances that this resource should have. Defaults to 0. Must not be greater than maximum instance count. // +kubebuilder:validation:Optional MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` } @@ -739,7 +643,7 @@ type TemplateContainersResourcesInitParameters struct { // resources is set, this field must be explicitly set to true to preserve the default behavior. CPUIdle *bool `json:"cpuIdle,omitempty" tf:"cpu_idle,omitempty"` - // Only memory and CPU are supported. Use key cpu for CPU limit and memory for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + // Only memory, CPU, and nvidia.com/gpu are supported. Use key cpu for CPU limit, memory for memory limit, nvidia.com/gpu for gpu limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go // +mapType=granular Limits map[string]*string `json:"limits,omitempty" tf:"limits,omitempty"` @@ -753,7 +657,7 @@ type TemplateContainersResourcesObservation struct { // resources is set, this field must be explicitly set to true to preserve the default behavior. CPUIdle *bool `json:"cpuIdle,omitempty" tf:"cpu_idle,omitempty"` - // Only memory and CPU are supported. Use key cpu for CPU limit and memory for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + // Only memory, CPU, and nvidia.com/gpu are supported. Use key cpu for CPU limit, memory for memory limit, nvidia.com/gpu for gpu limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go // +mapType=granular Limits map[string]*string `json:"limits,omitempty" tf:"limits,omitempty"` @@ -768,7 +672,7 @@ type TemplateContainersResourcesParameters struct { // +kubebuilder:validation:Optional CPUIdle *bool `json:"cpuIdle,omitempty" tf:"cpu_idle,omitempty"` - // Only memory and CPU are supported. Use key cpu for CPU limit and memory for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + // Only memory, CPU, and nvidia.com/gpu are supported. Use key cpu for CPU limit, memory for memory limit, nvidia.com/gpu for gpu limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go // +kubebuilder:validation:Optional // +mapType=granular Limits map[string]*string `json:"limits,omitempty" tf:"limits,omitempty"` @@ -807,6 +711,38 @@ type TemplateContainersVolumeMountsParameters struct { Name *string `json:"name" tf:"name,omitempty"` } +type TemplateScalingInitParameters struct { + + // Maximum number of serving instances that this resource should have. Must not be less than minimum instance count. If absent, Cloud Run will calculate + // a default value based on the project's available container instances quota in the region and specified instance size. + MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` + + // Minimum number of serving instances that this resource should have. Defaults to 0. Must not be greater than maximum instance count. + MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` +} + +type TemplateScalingObservation struct { + + // Maximum number of serving instances that this resource should have. Must not be less than minimum instance count. If absent, Cloud Run will calculate + // a default value based on the project's available container instances quota in the region and specified instance size. + MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` + + // Minimum number of serving instances that this resource should have. Defaults to 0. Must not be greater than maximum instance count. + MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` +} + +type TemplateScalingParameters struct { + + // Maximum number of serving instances that this resource should have. Must not be less than minimum instance count. If absent, Cloud Run will calculate + // a default value based on the project's available container instances quota in the region and specified instance size. + // +kubebuilder:validation:Optional + MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` + + // Minimum number of serving instances that this resource should have. Defaults to 0. Must not be greater than maximum instance count. + // +kubebuilder:validation:Optional + MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` +} + type TemplateVPCAccessInitParameters struct { // VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. @@ -852,6 +788,80 @@ type TemplateVPCAccessParameters struct { NetworkInterfaces []VPCAccessNetworkInterfacesParameters `json:"networkInterfaces,omitempty" tf:"network_interfaces,omitempty"` } +type TemplateVolumesEmptyDirInitParameters struct { + + // The different types of medium supported for EmptyDir. + // Default value is MEMORY. + // Possible values are: MEMORY. + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. + SizeLimit *string `json:"sizeLimit,omitempty" tf:"size_limit,omitempty"` +} + +type TemplateVolumesEmptyDirObservation struct { + + // The different types of medium supported for EmptyDir. + // Default value is MEMORY. + // Possible values are: MEMORY. + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. + SizeLimit *string `json:"sizeLimit,omitempty" tf:"size_limit,omitempty"` +} + +type TemplateVolumesEmptyDirParameters struct { + + // The different types of medium supported for EmptyDir. + // Default value is MEMORY. + // Possible values are: MEMORY. + // +kubebuilder:validation:Optional + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. + // +kubebuilder:validation:Optional + SizeLimit *string `json:"sizeLimit,omitempty" tf:"size_limit,omitempty"` +} + +type TemplateVolumesNFSInitParameters struct { + + // Path that is exported by the NFS server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // If true, mount the NFS volume as read only + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // Hostname or IP address of the NFS server + Server *string `json:"server,omitempty" tf:"server,omitempty"` +} + +type TemplateVolumesNFSObservation struct { + + // Path that is exported by the NFS server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // If true, mount the NFS volume as read only + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // Hostname or IP address of the NFS server + Server *string `json:"server,omitempty" tf:"server,omitempty"` +} + +type TemplateVolumesNFSParameters struct { + + // Path that is exported by the NFS server. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // If true, mount the NFS volume as read only + // +kubebuilder:validation:Optional + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + + // Hostname or IP address of the NFS server + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` +} + type TemplateVolumesSecretInitParameters struct { // Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. @@ -1055,6 +1065,9 @@ type V2ServiceInitParameters struct { // Possible values are: INGRESS_TRAFFIC_ALL, INGRESS_TRAFFIC_INTERNAL_ONLY, INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER. Ingress *string `json:"ingress,omitempty" tf:"ingress,omitempty"` + // Disables IAM permission check for run.routes.invoke for callers of this service. This feature is available by invitation only. For more information, visit https://cloud.google.com/run/docs/securing/managing-access#invoker_check. + InvokerIAMDisabled *bool `json:"invokerIamDisabled,omitempty" tf:"invoker_iam_disabled,omitempty"` + // Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. // For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. // Cloud Run API v2 does not support labels with run.googleapis.com, cloud.googleapis.com, serving.knative.dev, or autoscaling.knative.dev namespaces, and they will be rejected. @@ -1072,6 +1085,10 @@ type V2ServiceInitParameters struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + // Scaling settings for this Revision. + // Structure is documented below. + Scaling *ScalingInitParameters `json:"scaling,omitempty" tf:"scaling,omitempty"` + // The template used to create revisions for this Service. // Structure is documented below. Template *V2ServiceTemplateInitParameters `json:"template,omitempty" tf:"template,omitempty"` @@ -1117,6 +1134,10 @@ type V2ServiceObservation struct { // The deletion time. DeleteTime *string `json:"deleteTime,omitempty" tf:"delete_time,omitempty"` + // Defaults to true. + // When the field is set to false, deleting the service is allowed. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // User-provided description of the Service. This field currently has a 512-character limit. Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -1129,7 +1150,7 @@ type V2ServiceObservation struct { // A system-generated fingerprint for this version of the resource. May be used to detect modification conflict during updates. Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` - // For a deleted resource, the time after which it will be permamently deleted. + // For a deleted resource, the time after which it will be permanently deleted. ExpireTime *string `json:"expireTime,omitempty" tf:"expire_time,omitempty"` // A number that monotonically increases every time the user modifies the desired state. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a string instead of an integer. @@ -1142,6 +1163,9 @@ type V2ServiceObservation struct { // Possible values are: INGRESS_TRAFFIC_ALL, INGRESS_TRAFFIC_INTERNAL_ONLY, INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER. Ingress *string `json:"ingress,omitempty" tf:"ingress,omitempty"` + // Disables IAM permission check for run.routes.invoke for callers of this service. This feature is available by invitation only. For more information, visit https://cloud.google.com/run/docs/securing/managing-access#invoker_check. + InvokerIAMDisabled *bool `json:"invokerIamDisabled,omitempty" tf:"invoker_iam_disabled,omitempty"` + // Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. // For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. // Cloud Run API v2 does not support labels with run.googleapis.com, cloud.googleapis.com, serving.knative.dev, or autoscaling.knative.dev namespaces, and they will be rejected. @@ -1180,6 +1204,10 @@ type V2ServiceObservation struct { // If reconciliation failed, trafficStatuses, observedGeneration, and latestReadyRevision will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in terminalCondition and conditions. Reconciling *bool `json:"reconciling,omitempty" tf:"reconciling,omitempty"` + // Scaling settings for this Revision. + // Structure is documented below. + Scaling *ScalingObservation `json:"scaling,omitempty" tf:"scaling,omitempty"` + // The template used to create revisions for this Service. // Structure is documented below. Template *V2ServiceTemplateObservation `json:"template,omitempty" tf:"template,omitempty"` @@ -1209,6 +1237,9 @@ type V2ServiceObservation struct { // The last-modified time. UpdateTime *string `json:"updateTime,omitempty" tf:"update_time,omitempty"` + + // All URLs serving traffic for this Service. + Urls []*string `json:"urls,omitempty" tf:"urls,omitempty"` } type V2ServiceParameters struct { @@ -1248,6 +1279,10 @@ type V2ServiceParameters struct { // +kubebuilder:validation:Optional Ingress *string `json:"ingress,omitempty" tf:"ingress,omitempty"` + // Disables IAM permission check for run.routes.invoke for callers of this service. This feature is available by invitation only. For more information, visit https://cloud.google.com/run/docs/securing/managing-access#invoker_check. + // +kubebuilder:validation:Optional + InvokerIAMDisabled *bool `json:"invokerIamDisabled,omitempty" tf:"invoker_iam_disabled,omitempty"` + // Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. // For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. // Cloud Run API v2 does not support labels with run.googleapis.com, cloud.googleapis.com, serving.knative.dev, or autoscaling.knative.dev namespaces, and they will be rejected. @@ -1272,6 +1307,11 @@ type V2ServiceParameters struct { // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` + // Scaling settings for this Revision. + // Structure is documented below. + // +kubebuilder:validation:Optional + Scaling *ScalingParameters `json:"scaling,omitempty" tf:"scaling,omitempty"` + // The template used to create revisions for this Service. // Structure is documented below. // +kubebuilder:validation:Optional @@ -1469,7 +1509,7 @@ type V2ServiceTemplateInitParameters struct { // Scaling settings for this Revision. // Structure is documented below. - Scaling *ScalingInitParameters `json:"scaling,omitempty" tf:"scaling,omitempty"` + Scaling *TemplateScalingInitParameters `json:"scaling,omitempty" tf:"scaling,omitempty"` // Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. ServiceAccount *string `json:"serviceAccount,omitempty" tf:"service_account,omitempty"` @@ -1526,7 +1566,7 @@ type V2ServiceTemplateObservation struct { // Scaling settings for this Revision. // Structure is documented below. - Scaling *ScalingObservation `json:"scaling,omitempty" tf:"scaling,omitempty"` + Scaling *TemplateScalingObservation `json:"scaling,omitempty" tf:"scaling,omitempty"` // Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. ServiceAccount *string `json:"serviceAccount,omitempty" tf:"service_account,omitempty"` @@ -1591,7 +1631,7 @@ type V2ServiceTemplateParameters struct { // Scaling settings for this Revision. // Structure is documented below. // +kubebuilder:validation:Optional - Scaling *ScalingParameters `json:"scaling,omitempty" tf:"scaling,omitempty"` + Scaling *TemplateScalingParameters `json:"scaling,omitempty" tf:"scaling,omitempty"` // Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. // +kubebuilder:validation:Optional @@ -1623,13 +1663,17 @@ type V2ServiceTemplateVolumesInitParameters struct { // Structure is documented below. CloudSQLInstance *VolumesCloudSQLInstanceInitParameters `json:"cloudSqlInstance,omitempty" tf:"cloud_sql_instance,omitempty"` - // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + // Ephemeral storage used as a shared volume. + // Structure is documented below. + EmptyDir *TemplateVolumesEmptyDirInitParameters `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. // Structure is documented below. - Gcs *GcsInitParameters `json:"gcs,omitempty" tf:"gcs,omitempty"` + Gcs *VolumesGcsInitParameters `json:"gcs,omitempty" tf:"gcs,omitempty"` // Represents an NFS mount. // Structure is documented below. - NFS *NFSInitParameters `json:"nfs,omitempty" tf:"nfs,omitempty"` + NFS *TemplateVolumesNFSInitParameters `json:"nfs,omitempty" tf:"nfs,omitempty"` // Volume's name. Name *string `json:"name,omitempty" tf:"name,omitempty"` @@ -1645,13 +1689,17 @@ type V2ServiceTemplateVolumesObservation struct { // Structure is documented below. CloudSQLInstance *VolumesCloudSQLInstanceObservation `json:"cloudSqlInstance,omitempty" tf:"cloud_sql_instance,omitempty"` - // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + // Ephemeral storage used as a shared volume. // Structure is documented below. - Gcs *GcsObservation `json:"gcs,omitempty" tf:"gcs,omitempty"` + EmptyDir *TemplateVolumesEmptyDirObservation `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. + // Structure is documented below. + Gcs *VolumesGcsObservation `json:"gcs,omitempty" tf:"gcs,omitempty"` // Represents an NFS mount. // Structure is documented below. - NFS *NFSObservation `json:"nfs,omitempty" tf:"nfs,omitempty"` + NFS *TemplateVolumesNFSObservation `json:"nfs,omitempty" tf:"nfs,omitempty"` // Volume's name. Name *string `json:"name,omitempty" tf:"name,omitempty"` @@ -1668,15 +1716,20 @@ type V2ServiceTemplateVolumesParameters struct { // +kubebuilder:validation:Optional CloudSQLInstance *VolumesCloudSQLInstanceParameters `json:"cloudSqlInstance,omitempty" tf:"cloud_sql_instance,omitempty"` - // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + // Ephemeral storage used as a shared volume. // Structure is documented below. // +kubebuilder:validation:Optional - Gcs *GcsParameters `json:"gcs,omitempty" tf:"gcs,omitempty"` + EmptyDir *TemplateVolumesEmptyDirParameters `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. + // Structure is documented below. + // +kubebuilder:validation:Optional + Gcs *VolumesGcsParameters `json:"gcs,omitempty" tf:"gcs,omitempty"` // Represents an NFS mount. // Structure is documented below. // +kubebuilder:validation:Optional - NFS *NFSParameters `json:"nfs,omitempty" tf:"nfs,omitempty"` + NFS *TemplateVolumesNFSParameters `json:"nfs,omitempty" tf:"nfs,omitempty"` // Volume's name. // +kubebuilder:validation:Optional @@ -1874,6 +1927,53 @@ type VolumesCloudSQLInstanceParameters struct { InstancesSelector *v1.Selector `json:"instancesSelector,omitempty" tf:"-"` } +type VolumesGcsInitParameters struct { + + // GCS Bucket name + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/storage/v1beta2.Bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // If true, mount the NFS volume as read only + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type VolumesGcsObservation struct { + + // GCS Bucket name + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // If true, mount the NFS volume as read only + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type VolumesGcsParameters struct { + + // GCS Bucket name + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/storage/v1beta2.Bucket + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // If true, mount the NFS volume as read only + // +kubebuilder:validation:Optional + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + type VolumesSecretItemsInitParameters struct { // Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used. diff --git a/apis/cloudtasks/v1beta2/zz_generated.deepcopy.go b/apis/cloudtasks/v1beta2/zz_generated.deepcopy.go index 6de75b1c3..4ed3aa1b8 100644 --- a/apis/cloudtasks/v1beta2/zz_generated.deepcopy.go +++ b/apis/cloudtasks/v1beta2/zz_generated.deepcopy.go @@ -108,6 +108,577 @@ func (in *AppEngineRoutingOverrideParameters) DeepCopy() *AppEngineRoutingOverri return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPTargetInitParameters) DeepCopyInto(out *HTTPTargetInitParameters) { + *out = *in + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = new(string) + **out = **in + } + if in.HeaderOverrides != nil { + in, out := &in.HeaderOverrides, &out.HeaderOverrides + *out = make([]HeaderOverridesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OAuthToken != nil { + in, out := &in.OAuthToken, &out.OAuthToken + *out = new(OAuthTokenInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OidcToken != nil { + in, out := &in.OidcToken, &out.OidcToken + *out = new(OidcTokenInitParameters) + (*in).DeepCopyInto(*out) + } + if in.URIOverride != nil { + in, out := &in.URIOverride, &out.URIOverride + *out = new(URIOverrideInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPTargetInitParameters. +func (in *HTTPTargetInitParameters) DeepCopy() *HTTPTargetInitParameters { + if in == nil { + return nil + } + out := new(HTTPTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPTargetObservation) DeepCopyInto(out *HTTPTargetObservation) { + *out = *in + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = new(string) + **out = **in + } + if in.HeaderOverrides != nil { + in, out := &in.HeaderOverrides, &out.HeaderOverrides + *out = make([]HeaderOverridesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OAuthToken != nil { + in, out := &in.OAuthToken, &out.OAuthToken + *out = new(OAuthTokenObservation) + (*in).DeepCopyInto(*out) + } + if in.OidcToken != nil { + in, out := &in.OidcToken, &out.OidcToken + *out = new(OidcTokenObservation) + (*in).DeepCopyInto(*out) + } + if in.URIOverride != nil { + in, out := &in.URIOverride, &out.URIOverride + *out = new(URIOverrideObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPTargetObservation. +func (in *HTTPTargetObservation) DeepCopy() *HTTPTargetObservation { + if in == nil { + return nil + } + out := new(HTTPTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPTargetParameters) DeepCopyInto(out *HTTPTargetParameters) { + *out = *in + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = new(string) + **out = **in + } + if in.HeaderOverrides != nil { + in, out := &in.HeaderOverrides, &out.HeaderOverrides + *out = make([]HeaderOverridesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OAuthToken != nil { + in, out := &in.OAuthToken, &out.OAuthToken + *out = new(OAuthTokenParameters) + (*in).DeepCopyInto(*out) + } + if in.OidcToken != nil { + in, out := &in.OidcToken, &out.OidcToken + *out = new(OidcTokenParameters) + (*in).DeepCopyInto(*out) + } + if in.URIOverride != nil { + in, out := &in.URIOverride, &out.URIOverride + *out = new(URIOverrideParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPTargetParameters. +func (in *HTTPTargetParameters) DeepCopy() *HTTPTargetParameters { + if in == nil { + return nil + } + out := new(HTTPTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderInitParameters) DeepCopyInto(out *HeaderInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderInitParameters. +func (in *HeaderInitParameters) DeepCopy() *HeaderInitParameters { + if in == nil { + return nil + } + out := new(HeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderObservation) DeepCopyInto(out *HeaderObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderObservation. +func (in *HeaderObservation) DeepCopy() *HeaderObservation { + if in == nil { + return nil + } + out := new(HeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderOverridesInitParameters) DeepCopyInto(out *HeaderOverridesInitParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(HeaderInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderOverridesInitParameters. +func (in *HeaderOverridesInitParameters) DeepCopy() *HeaderOverridesInitParameters { + if in == nil { + return nil + } + out := new(HeaderOverridesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderOverridesObservation) DeepCopyInto(out *HeaderOverridesObservation) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(HeaderObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderOverridesObservation. +func (in *HeaderOverridesObservation) DeepCopy() *HeaderOverridesObservation { + if in == nil { + return nil + } + out := new(HeaderOverridesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderOverridesParameters) DeepCopyInto(out *HeaderOverridesParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(HeaderParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderOverridesParameters. +func (in *HeaderOverridesParameters) DeepCopy() *HeaderOverridesParameters { + if in == nil { + return nil + } + out := new(HeaderOverridesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderParameters) DeepCopyInto(out *HeaderParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderParameters. +func (in *HeaderParameters) DeepCopy() *HeaderParameters { + if in == nil { + return nil + } + out := new(HeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTokenInitParameters) DeepCopyInto(out *OAuthTokenInitParameters) { + *out = *in + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.ServiceAccountEmail != nil { + in, out := &in.ServiceAccountEmail, &out.ServiceAccountEmail + *out = new(string) + **out = **in + } + if in.ServiceAccountEmailRef != nil { + in, out := &in.ServiceAccountEmailRef, &out.ServiceAccountEmailRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountEmailSelector != nil { + in, out := &in.ServiceAccountEmailSelector, &out.ServiceAccountEmailSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTokenInitParameters. +func (in *OAuthTokenInitParameters) DeepCopy() *OAuthTokenInitParameters { + if in == nil { + return nil + } + out := new(OAuthTokenInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTokenObservation) DeepCopyInto(out *OAuthTokenObservation) { + *out = *in + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.ServiceAccountEmail != nil { + in, out := &in.ServiceAccountEmail, &out.ServiceAccountEmail + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTokenObservation. +func (in *OAuthTokenObservation) DeepCopy() *OAuthTokenObservation { + if in == nil { + return nil + } + out := new(OAuthTokenObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTokenParameters) DeepCopyInto(out *OAuthTokenParameters) { + *out = *in + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.ServiceAccountEmail != nil { + in, out := &in.ServiceAccountEmail, &out.ServiceAccountEmail + *out = new(string) + **out = **in + } + if in.ServiceAccountEmailRef != nil { + in, out := &in.ServiceAccountEmailRef, &out.ServiceAccountEmailRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountEmailSelector != nil { + in, out := &in.ServiceAccountEmailSelector, &out.ServiceAccountEmailSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTokenParameters. +func (in *OAuthTokenParameters) DeepCopy() *OAuthTokenParameters { + if in == nil { + return nil + } + out := new(OAuthTokenParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcTokenInitParameters) DeepCopyInto(out *OidcTokenInitParameters) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.ServiceAccountEmail != nil { + in, out := &in.ServiceAccountEmail, &out.ServiceAccountEmail + *out = new(string) + **out = **in + } + if in.ServiceAccountEmailRef != nil { + in, out := &in.ServiceAccountEmailRef, &out.ServiceAccountEmailRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountEmailSelector != nil { + in, out := &in.ServiceAccountEmailSelector, &out.ServiceAccountEmailSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcTokenInitParameters. +func (in *OidcTokenInitParameters) DeepCopy() *OidcTokenInitParameters { + if in == nil { + return nil + } + out := new(OidcTokenInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcTokenObservation) DeepCopyInto(out *OidcTokenObservation) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.ServiceAccountEmail != nil { + in, out := &in.ServiceAccountEmail, &out.ServiceAccountEmail + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcTokenObservation. +func (in *OidcTokenObservation) DeepCopy() *OidcTokenObservation { + if in == nil { + return nil + } + out := new(OidcTokenObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcTokenParameters) DeepCopyInto(out *OidcTokenParameters) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.ServiceAccountEmail != nil { + in, out := &in.ServiceAccountEmail, &out.ServiceAccountEmail + *out = new(string) + **out = **in + } + if in.ServiceAccountEmailRef != nil { + in, out := &in.ServiceAccountEmailRef, &out.ServiceAccountEmailRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountEmailSelector != nil { + in, out := &in.ServiceAccountEmailSelector, &out.ServiceAccountEmailSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcTokenParameters. +func (in *OidcTokenParameters) DeepCopy() *OidcTokenParameters { + if in == nil { + return nil + } + out := new(OidcTokenParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathOverrideInitParameters) DeepCopyInto(out *PathOverrideInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathOverrideInitParameters. +func (in *PathOverrideInitParameters) DeepCopy() *PathOverrideInitParameters { + if in == nil { + return nil + } + out := new(PathOverrideInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathOverrideObservation) DeepCopyInto(out *PathOverrideObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathOverrideObservation. +func (in *PathOverrideObservation) DeepCopy() *PathOverrideObservation { + if in == nil { + return nil + } + out := new(PathOverrideObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathOverrideParameters) DeepCopyInto(out *PathOverrideParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathOverrideParameters. +func (in *PathOverrideParameters) DeepCopy() *PathOverrideParameters { + if in == nil { + return nil + } + out := new(PathOverrideParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryOverrideInitParameters) DeepCopyInto(out *QueryOverrideInitParameters) { + *out = *in + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryOverrideInitParameters. +func (in *QueryOverrideInitParameters) DeepCopy() *QueryOverrideInitParameters { + if in == nil { + return nil + } + out := new(QueryOverrideInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryOverrideObservation) DeepCopyInto(out *QueryOverrideObservation) { + *out = *in + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryOverrideObservation. +func (in *QueryOverrideObservation) DeepCopy() *QueryOverrideObservation { + if in == nil { + return nil + } + out := new(QueryOverrideObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryOverrideParameters) DeepCopyInto(out *QueryOverrideParameters) { + *out = *in + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryOverrideParameters. +func (in *QueryOverrideParameters) DeepCopy() *QueryOverrideParameters { + if in == nil { + return nil + } + out := new(QueryOverrideParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Queue) DeepCopyInto(out *Queue) { *out = *in @@ -143,6 +714,11 @@ func (in *QueueInitParameters) DeepCopyInto(out *QueueInitParameters) { *out = new(AppEngineRoutingOverrideInitParameters) (*in).DeepCopyInto(*out) } + if in.HTTPTarget != nil { + in, out := &in.HTTPTarget, &out.HTTPTarget + *out = new(HTTPTargetInitParameters) + (*in).DeepCopyInto(*out) + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -225,6 +801,11 @@ func (in *QueueObservation) DeepCopyInto(out *QueueObservation) { *out = new(AppEngineRoutingOverrideObservation) (*in).DeepCopyInto(*out) } + if in.HTTPTarget != nil { + in, out := &in.HTTPTarget, &out.HTTPTarget + *out = new(HTTPTargetObservation) + (*in).DeepCopyInto(*out) + } if in.ID != nil { in, out := &in.ID, &out.ID *out = new(string) @@ -275,6 +856,11 @@ func (in *QueueParameters) DeepCopyInto(out *QueueParameters) { *out = new(AppEngineRoutingOverrideParameters) (*in).DeepCopyInto(*out) } + if in.HTTPTarget != nil { + in, out := &in.HTTPTarget, &out.HTTPTarget + *out = new(HTTPTargetParameters) + (*in).DeepCopyInto(*out) + } if in.Location != nil { in, out := &in.Location, &out.Location *out = new(string) @@ -616,3 +1202,138 @@ func (in *StackdriverLoggingConfigParameters) DeepCopy() *StackdriverLoggingConf in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URIOverrideInitParameters) DeepCopyInto(out *URIOverrideInitParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.PathOverride != nil { + in, out := &in.PathOverride, &out.PathOverride + *out = new(PathOverrideInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.QueryOverride != nil { + in, out := &in.QueryOverride, &out.QueryOverride + *out = new(QueryOverrideInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.URIOverrideEnforceMode != nil { + in, out := &in.URIOverrideEnforceMode, &out.URIOverrideEnforceMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URIOverrideInitParameters. +func (in *URIOverrideInitParameters) DeepCopy() *URIOverrideInitParameters { + if in == nil { + return nil + } + out := new(URIOverrideInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URIOverrideObservation) DeepCopyInto(out *URIOverrideObservation) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.PathOverride != nil { + in, out := &in.PathOverride, &out.PathOverride + *out = new(PathOverrideObservation) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.QueryOverride != nil { + in, out := &in.QueryOverride, &out.QueryOverride + *out = new(QueryOverrideObservation) + (*in).DeepCopyInto(*out) + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.URIOverrideEnforceMode != nil { + in, out := &in.URIOverrideEnforceMode, &out.URIOverrideEnforceMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URIOverrideObservation. +func (in *URIOverrideObservation) DeepCopy() *URIOverrideObservation { + if in == nil { + return nil + } + out := new(URIOverrideObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URIOverrideParameters) DeepCopyInto(out *URIOverrideParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.PathOverride != nil { + in, out := &in.PathOverride, &out.PathOverride + *out = new(PathOverrideParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.QueryOverride != nil { + in, out := &in.QueryOverride, &out.QueryOverride + *out = new(QueryOverrideParameters) + (*in).DeepCopyInto(*out) + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.URIOverrideEnforceMode != nil { + in, out := &in.URIOverrideEnforceMode, &out.URIOverrideEnforceMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URIOverrideParameters. +func (in *URIOverrideParameters) DeepCopy() *URIOverrideParameters { + if in == nil { + return nil + } + out := new(URIOverrideParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cloudtasks/v1beta2/zz_generated.resolvers.go b/apis/cloudtasks/v1beta2/zz_generated.resolvers.go index e3ddf56e0..977604906 100644 --- a/apis/cloudtasks/v1beta2/zz_generated.resolvers.go +++ b/apis/cloudtasks/v1beta2/zz_generated.resolvers.go @@ -9,6 +9,7 @@ package v1beta2 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" @@ -25,12 +26,58 @@ func (mg *Queue) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error + + if mg.Spec.ForProvider.HTTPTarget != nil { + if mg.Spec.ForProvider.HTTPTarget.OAuthToken != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudplatform.gcp.upbound.io", "v1beta1", "ServiceAccount", "ServiceAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.HTTPTarget.OAuthToken.ServiceAccountEmail), + Extract: resource.ExtractParamPath("email", true), + Reference: mg.Spec.ForProvider.HTTPTarget.OAuthToken.ServiceAccountEmailRef, + Selector: mg.Spec.ForProvider.HTTPTarget.OAuthToken.ServiceAccountEmailSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.HTTPTarget.OAuthToken.ServiceAccountEmail") + } + mg.Spec.ForProvider.HTTPTarget.OAuthToken.ServiceAccountEmail = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.HTTPTarget.OAuthToken.ServiceAccountEmailRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.HTTPTarget != nil { + if mg.Spec.ForProvider.HTTPTarget.OidcToken != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudplatform.gcp.upbound.io", "v1beta1", "ServiceAccount", "ServiceAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.HTTPTarget.OidcToken.ServiceAccountEmail), + Extract: resource.ExtractParamPath("email", true), + Reference: mg.Spec.ForProvider.HTTPTarget.OidcToken.ServiceAccountEmailRef, + Selector: mg.Spec.ForProvider.HTTPTarget.OidcToken.ServiceAccountEmailSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.HTTPTarget.OidcToken.ServiceAccountEmail") + } + mg.Spec.ForProvider.HTTPTarget.OidcToken.ServiceAccountEmail = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.HTTPTarget.OidcToken.ServiceAccountEmailRef = rsp.ResolvedReference + + } + } { m, l, err = apisresolver.GetManagedResource("cloudplatform.gcp.upbound.io", "v1beta1", "Project", "ProjectList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Project), Extract: reference.ExternalName(), @@ -44,12 +91,58 @@ func (mg *Queue) ResolveReferences(ctx context.Context, c client.Reader) error { } mg.Spec.ForProvider.Project = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ProjectRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.HTTPTarget != nil { + if mg.Spec.InitProvider.HTTPTarget.OAuthToken != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudplatform.gcp.upbound.io", "v1beta1", "ServiceAccount", "ServiceAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.HTTPTarget.OAuthToken.ServiceAccountEmail), + Extract: resource.ExtractParamPath("email", true), + Reference: mg.Spec.InitProvider.HTTPTarget.OAuthToken.ServiceAccountEmailRef, + Selector: mg.Spec.InitProvider.HTTPTarget.OAuthToken.ServiceAccountEmailSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.HTTPTarget.OAuthToken.ServiceAccountEmail") + } + mg.Spec.InitProvider.HTTPTarget.OAuthToken.ServiceAccountEmail = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.HTTPTarget.OAuthToken.ServiceAccountEmailRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.HTTPTarget != nil { + if mg.Spec.InitProvider.HTTPTarget.OidcToken != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudplatform.gcp.upbound.io", "v1beta1", "ServiceAccount", "ServiceAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.HTTPTarget.OidcToken.ServiceAccountEmail), + Extract: resource.ExtractParamPath("email", true), + Reference: mg.Spec.InitProvider.HTTPTarget.OidcToken.ServiceAccountEmailRef, + Selector: mg.Spec.InitProvider.HTTPTarget.OidcToken.ServiceAccountEmailSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.HTTPTarget.OidcToken.ServiceAccountEmail") + } + mg.Spec.InitProvider.HTTPTarget.OidcToken.ServiceAccountEmail = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.HTTPTarget.OidcToken.ServiceAccountEmailRef = rsp.ResolvedReference + + } + } { m, l, err = apisresolver.GetManagedResource("cloudplatform.gcp.upbound.io", "v1beta1", "Project", "ProjectList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Project), Extract: reference.ExternalName(), diff --git a/apis/cloudtasks/v1beta2/zz_queue_types.go b/apis/cloudtasks/v1beta2/zz_queue_types.go index 60dbb4b7a..515c49506 100755 --- a/apis/cloudtasks/v1beta2/zz_queue_types.go +++ b/apis/cloudtasks/v1beta2/zz_queue_types.go @@ -65,6 +65,321 @@ type AppEngineRoutingOverrideParameters struct { Version *string `json:"version,omitempty" tf:"version,omitempty"` } +type HTTPTargetInitParameters struct { + + // The HTTP method to use for the request. + // When specified, it overrides HttpRequest for the task. + // Note that if the value is set to GET the body of the task will be ignored at execution time. + // Possible values are: HTTP_METHOD_UNSPECIFIED, POST, GET, HEAD, PUT, DELETE, PATCH, OPTIONS. + HTTPMethod *string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + + // HTTP target headers. + // This map contains the header field names and values. + // Headers will be set when running the CreateTask and/or BufferTask. + // These headers represent a subset of the headers that will be configured for the task's HTTP request. + // Some HTTP request headers will be ignored or replaced. + // Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + // The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + // Structure is documented below. + HeaderOverrides []HeaderOverridesInitParameters `json:"headerOverrides,omitempty" tf:"header_overrides,omitempty"` + + // If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + // This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + // Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + // Structure is documented below. + OAuthToken *OAuthTokenInitParameters `json:"oauthToken,omitempty" tf:"oauth_token,omitempty"` + + // If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + // This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + // Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + // Structure is documented below. + OidcToken *OidcTokenInitParameters `json:"oidcToken,omitempty" tf:"oidc_token,omitempty"` + + // URI override. + // When specified, overrides the execution URI for all the tasks in the queue. + // Structure is documented below. + URIOverride *URIOverrideInitParameters `json:"uriOverride,omitempty" tf:"uri_override,omitempty"` +} + +type HTTPTargetObservation struct { + + // The HTTP method to use for the request. + // When specified, it overrides HttpRequest for the task. + // Note that if the value is set to GET the body of the task will be ignored at execution time. + // Possible values are: HTTP_METHOD_UNSPECIFIED, POST, GET, HEAD, PUT, DELETE, PATCH, OPTIONS. + HTTPMethod *string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + + // HTTP target headers. + // This map contains the header field names and values. + // Headers will be set when running the CreateTask and/or BufferTask. + // These headers represent a subset of the headers that will be configured for the task's HTTP request. + // Some HTTP request headers will be ignored or replaced. + // Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + // The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + // Structure is documented below. + HeaderOverrides []HeaderOverridesObservation `json:"headerOverrides,omitempty" tf:"header_overrides,omitempty"` + + // If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + // This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + // Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + // Structure is documented below. + OAuthToken *OAuthTokenObservation `json:"oauthToken,omitempty" tf:"oauth_token,omitempty"` + + // If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + // This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + // Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + // Structure is documented below. + OidcToken *OidcTokenObservation `json:"oidcToken,omitempty" tf:"oidc_token,omitempty"` + + // URI override. + // When specified, overrides the execution URI for all the tasks in the queue. + // Structure is documented below. + URIOverride *URIOverrideObservation `json:"uriOverride,omitempty" tf:"uri_override,omitempty"` +} + +type HTTPTargetParameters struct { + + // The HTTP method to use for the request. + // When specified, it overrides HttpRequest for the task. + // Note that if the value is set to GET the body of the task will be ignored at execution time. + // Possible values are: HTTP_METHOD_UNSPECIFIED, POST, GET, HEAD, PUT, DELETE, PATCH, OPTIONS. + // +kubebuilder:validation:Optional + HTTPMethod *string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + + // HTTP target headers. + // This map contains the header field names and values. + // Headers will be set when running the CreateTask and/or BufferTask. + // These headers represent a subset of the headers that will be configured for the task's HTTP request. + // Some HTTP request headers will be ignored or replaced. + // Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + // The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + // Structure is documented below. + // +kubebuilder:validation:Optional + HeaderOverrides []HeaderOverridesParameters `json:"headerOverrides,omitempty" tf:"header_overrides,omitempty"` + + // If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + // This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + // Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + // Structure is documented below. + // +kubebuilder:validation:Optional + OAuthToken *OAuthTokenParameters `json:"oauthToken,omitempty" tf:"oauth_token,omitempty"` + + // If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + // This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + // Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + // Structure is documented below. + // +kubebuilder:validation:Optional + OidcToken *OidcTokenParameters `json:"oidcToken,omitempty" tf:"oidc_token,omitempty"` + + // URI override. + // When specified, overrides the execution URI for all the tasks in the queue. + // Structure is documented below. + // +kubebuilder:validation:Optional + URIOverride *URIOverrideParameters `json:"uriOverride,omitempty" tf:"uri_override,omitempty"` +} + +type HeaderInitParameters struct { + + // The Key of the header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The Value of the header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeaderObservation struct { + + // The Key of the header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The Value of the header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeaderOverridesInitParameters struct { + + // Header embodying a key and a value. + // Structure is documented below. + Header *HeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` +} + +type HeaderOverridesObservation struct { + + // Header embodying a key and a value. + // Structure is documented below. + Header *HeaderObservation `json:"header,omitempty" tf:"header,omitempty"` +} + +type HeaderOverridesParameters struct { + + // Header embodying a key and a value. + // Structure is documented below. + // +kubebuilder:validation:Optional + Header *HeaderParameters `json:"header" tf:"header,omitempty"` +} + +type HeaderParameters struct { + + // The Key of the header. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The Value of the header. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type OAuthTokenInitParameters struct { + + // OAuth scope to be used for generating OAuth access token. + // If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Service account email to be used for generating OIDC token. + // The service account must be within the same project as the queue. + // The caller must have iam.serviceAccounts.actAs permission for the service account. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/cloudplatform/v1beta1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("email",true) + ServiceAccountEmail *string `json:"serviceAccountEmail,omitempty" tf:"service_account_email,omitempty"` + + // Reference to a ServiceAccount in cloudplatform to populate serviceAccountEmail. + // +kubebuilder:validation:Optional + ServiceAccountEmailRef *v1.Reference `json:"serviceAccountEmailRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in cloudplatform to populate serviceAccountEmail. + // +kubebuilder:validation:Optional + ServiceAccountEmailSelector *v1.Selector `json:"serviceAccountEmailSelector,omitempty" tf:"-"` +} + +type OAuthTokenObservation struct { + + // OAuth scope to be used for generating OAuth access token. + // If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Service account email to be used for generating OIDC token. + // The service account must be within the same project as the queue. + // The caller must have iam.serviceAccounts.actAs permission for the service account. + ServiceAccountEmail *string `json:"serviceAccountEmail,omitempty" tf:"service_account_email,omitempty"` +} + +type OAuthTokenParameters struct { + + // OAuth scope to be used for generating OAuth access token. + // If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + // +kubebuilder:validation:Optional + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Service account email to be used for generating OIDC token. + // The service account must be within the same project as the queue. + // The caller must have iam.serviceAccounts.actAs permission for the service account. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/cloudplatform/v1beta1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("email",true) + // +kubebuilder:validation:Optional + ServiceAccountEmail *string `json:"serviceAccountEmail,omitempty" tf:"service_account_email,omitempty"` + + // Reference to a ServiceAccount in cloudplatform to populate serviceAccountEmail. + // +kubebuilder:validation:Optional + ServiceAccountEmailRef *v1.Reference `json:"serviceAccountEmailRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in cloudplatform to populate serviceAccountEmail. + // +kubebuilder:validation:Optional + ServiceAccountEmailSelector *v1.Selector `json:"serviceAccountEmailSelector,omitempty" tf:"-"` +} + +type OidcTokenInitParameters struct { + + // Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // Service account email to be used for generating OIDC token. + // The service account must be within the same project as the queue. + // The caller must have iam.serviceAccounts.actAs permission for the service account. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/cloudplatform/v1beta1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("email",true) + ServiceAccountEmail *string `json:"serviceAccountEmail,omitempty" tf:"service_account_email,omitempty"` + + // Reference to a ServiceAccount in cloudplatform to populate serviceAccountEmail. + // +kubebuilder:validation:Optional + ServiceAccountEmailRef *v1.Reference `json:"serviceAccountEmailRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in cloudplatform to populate serviceAccountEmail. + // +kubebuilder:validation:Optional + ServiceAccountEmailSelector *v1.Selector `json:"serviceAccountEmailSelector,omitempty" tf:"-"` +} + +type OidcTokenObservation struct { + + // Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // Service account email to be used for generating OIDC token. + // The service account must be within the same project as the queue. + // The caller must have iam.serviceAccounts.actAs permission for the service account. + ServiceAccountEmail *string `json:"serviceAccountEmail,omitempty" tf:"service_account_email,omitempty"` +} + +type OidcTokenParameters struct { + + // Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + // +kubebuilder:validation:Optional + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // Service account email to be used for generating OIDC token. + // The service account must be within the same project as the queue. + // The caller must have iam.serviceAccounts.actAs permission for the service account. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/cloudplatform/v1beta1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("email",true) + // +kubebuilder:validation:Optional + ServiceAccountEmail *string `json:"serviceAccountEmail,omitempty" tf:"service_account_email,omitempty"` + + // Reference to a ServiceAccount in cloudplatform to populate serviceAccountEmail. + // +kubebuilder:validation:Optional + ServiceAccountEmailRef *v1.Reference `json:"serviceAccountEmailRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in cloudplatform to populate serviceAccountEmail. + // +kubebuilder:validation:Optional + ServiceAccountEmailSelector *v1.Selector `json:"serviceAccountEmailSelector,omitempty" tf:"-"` +} + +type PathOverrideInitParameters struct { + + // The URI path (e.g., /users/1234). Default is an empty string. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type PathOverrideObservation struct { + + // The URI path (e.g., /users/1234). Default is an empty string. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type PathOverrideParameters struct { + + // The URI path (e.g., /users/1234). Default is an empty string. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type QueryOverrideInitParameters struct { + + // The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + QueryParams *string `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type QueryOverrideObservation struct { + + // The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + QueryParams *string `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type QueryOverrideParameters struct { + + // The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + // +kubebuilder:validation:Optional + QueryParams *string `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + type QueueInitParameters struct { // Overrides for task-level appEngineRouting. These settings apply only @@ -72,6 +387,10 @@ type QueueInitParameters struct { // Structure is documented below. AppEngineRoutingOverride *AppEngineRoutingOverrideInitParameters `json:"appEngineRoutingOverride,omitempty" tf:"app_engine_routing_override,omitempty"` + // Modifies HTTP target for HTTP tasks. + // Structure is documented below. + HTTPTarget *HTTPTargetInitParameters `json:"httpTarget,omitempty" tf:"http_target,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/cloudplatform/v1beta1.Project @@ -105,6 +424,10 @@ type QueueObservation struct { // Structure is documented below. AppEngineRoutingOverride *AppEngineRoutingOverrideObservation `json:"appEngineRoutingOverride,omitempty" tf:"app_engine_routing_override,omitempty"` + // Modifies HTTP target for HTTP tasks. + // Structure is documented below. + HTTPTarget *HTTPTargetObservation `json:"httpTarget,omitempty" tf:"http_target,omitempty"` + // an identifier for the resource with format projects/{{project}}/locations/{{location}}/queues/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` @@ -136,6 +459,11 @@ type QueueParameters struct { // +kubebuilder:validation:Optional AppEngineRoutingOverride *AppEngineRoutingOverrideParameters `json:"appEngineRoutingOverride,omitempty" tf:"app_engine_routing_override,omitempty"` + // Modifies HTTP target for HTTP tasks. + // Structure is documented below. + // +kubebuilder:validation:Optional + HTTPTarget *HTTPTargetParameters `json:"httpTarget,omitempty" tf:"http_target,omitempty"` + // The location of the queue // +kubebuilder:validation:Required Location *string `json:"location" tf:"location,omitempty"` @@ -357,6 +685,126 @@ type StackdriverLoggingConfigParameters struct { SamplingRatio *float64 `json:"samplingRatio" tf:"sampling_ratio,omitempty"` } +type URIOverrideInitParameters struct { + + // Host override. + // When specified, replaces the host part of the task URL. + // For example, if the task URL is "https://www.google.com", and host value + // is set to "example.net", the overridden URI will be changed to "https://example.net". + // Host value cannot be an empty string (INVALID_ARGUMENT). + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // URI path. + // When specified, replaces the existing path of the task URL. + // Setting the path value to an empty string clears the URI path segment. + // Structure is documented below. + PathOverride *PathOverrideInitParameters `json:"pathOverride,omitempty" tf:"path_override,omitempty"` + + // Port override. + // When specified, replaces the port part of the task URI. + // For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + // Note that the port value must be a positive integer. + // Setting the port to 0 (Zero) clears the URI port. + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // URI query. + // When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + // Structure is documented below. + QueryOverride *QueryOverrideInitParameters `json:"queryOverride,omitempty" tf:"query_override,omitempty"` + + // Scheme override. + // When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + // Possible values are: HTTP, HTTPS. + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` + + // URI Override Enforce Mode + // When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + // Possible values are: ALWAYS, IF_NOT_EXISTS. + URIOverrideEnforceMode *string `json:"uriOverrideEnforceMode,omitempty" tf:"uri_override_enforce_mode,omitempty"` +} + +type URIOverrideObservation struct { + + // Host override. + // When specified, replaces the host part of the task URL. + // For example, if the task URL is "https://www.google.com", and host value + // is set to "example.net", the overridden URI will be changed to "https://example.net". + // Host value cannot be an empty string (INVALID_ARGUMENT). + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // URI path. + // When specified, replaces the existing path of the task URL. + // Setting the path value to an empty string clears the URI path segment. + // Structure is documented below. + PathOverride *PathOverrideObservation `json:"pathOverride,omitempty" tf:"path_override,omitempty"` + + // Port override. + // When specified, replaces the port part of the task URI. + // For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + // Note that the port value must be a positive integer. + // Setting the port to 0 (Zero) clears the URI port. + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // URI query. + // When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + // Structure is documented below. + QueryOverride *QueryOverrideObservation `json:"queryOverride,omitempty" tf:"query_override,omitempty"` + + // Scheme override. + // When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + // Possible values are: HTTP, HTTPS. + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` + + // URI Override Enforce Mode + // When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + // Possible values are: ALWAYS, IF_NOT_EXISTS. + URIOverrideEnforceMode *string `json:"uriOverrideEnforceMode,omitempty" tf:"uri_override_enforce_mode,omitempty"` +} + +type URIOverrideParameters struct { + + // Host override. + // When specified, replaces the host part of the task URL. + // For example, if the task URL is "https://www.google.com", and host value + // is set to "example.net", the overridden URI will be changed to "https://example.net". + // Host value cannot be an empty string (INVALID_ARGUMENT). + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // URI path. + // When specified, replaces the existing path of the task URL. + // Setting the path value to an empty string clears the URI path segment. + // Structure is documented below. + // +kubebuilder:validation:Optional + PathOverride *PathOverrideParameters `json:"pathOverride,omitempty" tf:"path_override,omitempty"` + + // Port override. + // When specified, replaces the port part of the task URI. + // For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + // Note that the port value must be a positive integer. + // Setting the port to 0 (Zero) clears the URI port. + // +kubebuilder:validation:Optional + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // URI query. + // When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + // Structure is documented below. + // +kubebuilder:validation:Optional + QueryOverride *QueryOverrideParameters `json:"queryOverride,omitempty" tf:"query_override,omitempty"` + + // Scheme override. + // When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + // Possible values are: HTTP, HTTPS. + // +kubebuilder:validation:Optional + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` + + // URI Override Enforce Mode + // When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + // Possible values are: ALWAYS, IF_NOT_EXISTS. + // +kubebuilder:validation:Optional + URIOverrideEnforceMode *string `json:"uriOverrideEnforceMode,omitempty" tf:"uri_override_enforce_mode,omitempty"` +} + // QueueSpec defines the desired state of Queue type QueueSpec struct { v1.ResourceSpec `json:",inline"` diff --git a/apis/composer/v1beta2/zz_environment_types.go b/apis/composer/v1beta2/zz_environment_types.go index f9bb543db..f1a4ed43f 100755 --- a/apis/composer/v1beta2/zz_environment_types.go +++ b/apis/composer/v1beta2/zz_environment_types.go @@ -13,6 +13,27 @@ import ( v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" ) +type AirflowMetadataRetentionConfigInitParameters struct { + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + RetentionMode *string `json:"retentionMode,omitempty" tf:"retention_mode,omitempty"` +} + +type AirflowMetadataRetentionConfigObservation struct { + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + RetentionMode *string `json:"retentionMode,omitempty" tf:"retention_mode,omitempty"` +} + +type AirflowMetadataRetentionConfigParameters struct { + + // +kubebuilder:validation:Optional + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + // +kubebuilder:validation:Optional + RetentionMode *string `json:"retentionMode,omitempty" tf:"retention_mode,omitempty"` +} + type AllowedIPRangeInitParameters struct { // A description of this ip range. @@ -80,9 +101,28 @@ type CidrBlocksParameters struct { DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` } +type CloudDataLineageIntegrationInitParameters struct { + + // When enabled, Cloud Composer periodically saves snapshots of your environment to a Cloud Storage bucket. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type CloudDataLineageIntegrationObservation struct { + + // When enabled, Cloud Composer periodically saves snapshots of your environment to a Cloud Storage bucket. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type CloudDataLineageIntegrationParameters struct { + + // When enabled, Cloud Composer periodically saves snapshots of your environment to a Cloud Storage bucket. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + type ConfigInitParameters struct { - // Configuration setting for Airflow database retention mechanism. Structure is + // Configuration setting for airflow data rentention mechanism. Structure is // documented below. DataRetentionConfig *DataRetentionConfigInitParameters `json:"dataRetentionConfig,omitempty" tf:"data_retention_config,omitempty"` @@ -90,6 +130,13 @@ type ConfigInitParameters struct { // by Apache Airflow software. DatabaseConfig *DatabaseConfigInitParameters `json:"databaseConfig,omitempty" tf:"database_config,omitempty"` + // If true, builds performed during operations that install Python packages have only private connectivity to Google services. + // If false, the builds also have access to the internet. + EnablePrivateBuildsOnly *bool `json:"enablePrivateBuildsOnly,omitempty" tf:"enable_private_builds_only,omitempty"` + + // If true, a private Composer environment will be created. + EnablePrivateEnvironment *bool `json:"enablePrivateEnvironment,omitempty" tf:"enable_private_environment,omitempty"` + // The encryption options for the Cloud Composer environment and its // dependencies. EncryptionConfig *EncryptionConfigInitParameters `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` @@ -156,7 +203,7 @@ type ConfigObservation struct { // reside in a simulated directory with this prefix. DagGcsPrefix *string `json:"dagGcsPrefix,omitempty" tf:"dag_gcs_prefix,omitempty"` - // Configuration setting for Airflow database retention mechanism. Structure is + // Configuration setting for airflow data rentention mechanism. Structure is // documented below. DataRetentionConfig *DataRetentionConfigObservation `json:"dataRetentionConfig,omitempty" tf:"data_retention_config,omitempty"` @@ -164,6 +211,13 @@ type ConfigObservation struct { // by Apache Airflow software. DatabaseConfig *DatabaseConfigObservation `json:"databaseConfig,omitempty" tf:"database_config,omitempty"` + // If true, builds performed during operations that install Python packages have only private connectivity to Google services. + // If false, the builds also have access to the internet. + EnablePrivateBuildsOnly *bool `json:"enablePrivateBuildsOnly,omitempty" tf:"enable_private_builds_only,omitempty"` + + // If true, a private Composer environment will be created. + EnablePrivateEnvironment *bool `json:"enablePrivateEnvironment,omitempty" tf:"enable_private_environment,omitempty"` + // The encryption options for the Cloud Composer environment and its // dependencies. EncryptionConfig *EncryptionConfigObservation `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` @@ -222,7 +276,7 @@ type ConfigObservation struct { type ConfigParameters struct { - // Configuration setting for Airflow database retention mechanism. Structure is + // Configuration setting for airflow data rentention mechanism. Structure is // documented below. // +kubebuilder:validation:Optional DataRetentionConfig *DataRetentionConfigParameters `json:"dataRetentionConfig,omitempty" tf:"data_retention_config,omitempty"` @@ -232,6 +286,15 @@ type ConfigParameters struct { // +kubebuilder:validation:Optional DatabaseConfig *DatabaseConfigParameters `json:"databaseConfig,omitempty" tf:"database_config,omitempty"` + // If true, builds performed during operations that install Python packages have only private connectivity to Google services. + // If false, the builds also have access to the internet. + // +kubebuilder:validation:Optional + EnablePrivateBuildsOnly *bool `json:"enablePrivateBuildsOnly,omitempty" tf:"enable_private_builds_only,omitempty"` + + // If true, a private Composer environment will be created. + // +kubebuilder:validation:Optional + EnablePrivateEnvironment *bool `json:"enablePrivateEnvironment,omitempty" tf:"enable_private_environment,omitempty"` + // The encryption options for the Cloud Composer environment and its // dependencies. // +kubebuilder:validation:Optional @@ -298,8 +361,60 @@ type ConfigParameters struct { WorkloadsConfig *WorkloadsConfigParameters `json:"workloadsConfig,omitempty" tf:"workloads_config,omitempty"` } +type DagProcessorInitParameters struct { + + // The number of CPUs for a single Airflow worker. + CPU *float64 `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // The number of Airflow triggerers. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The amount of memory (GB) for a single Airflow worker. + MemoryGb *float64 `json:"memoryGb,omitempty" tf:"memory_gb,omitempty"` + + // The amount of storage (GB) for a single Airflow worker. + StorageGb *float64 `json:"storageGb,omitempty" tf:"storage_gb,omitempty"` +} + +type DagProcessorObservation struct { + + // The number of CPUs for a single Airflow worker. + CPU *float64 `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // The number of Airflow triggerers. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The amount of memory (GB) for a single Airflow worker. + MemoryGb *float64 `json:"memoryGb,omitempty" tf:"memory_gb,omitempty"` + + // The amount of storage (GB) for a single Airflow worker. + StorageGb *float64 `json:"storageGb,omitempty" tf:"storage_gb,omitempty"` +} + +type DagProcessorParameters struct { + + // The number of CPUs for a single Airflow worker. + // +kubebuilder:validation:Optional + CPU *float64 `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // The number of Airflow triggerers. + // +kubebuilder:validation:Optional + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The amount of memory (GB) for a single Airflow worker. + // +kubebuilder:validation:Optional + MemoryGb *float64 `json:"memoryGb,omitempty" tf:"memory_gb,omitempty"` + + // The amount of storage (GB) for a single Airflow worker. + // +kubebuilder:validation:Optional + StorageGb *float64 `json:"storageGb,omitempty" tf:"storage_gb,omitempty"` +} + type DataRetentionConfigInitParameters struct { + // Configuration parameters for this environment Structure is documented below. + AirflowMetadataRetentionConfig []AirflowMetadataRetentionConfigInitParameters `json:"airflowMetadataRetentionConfig,omitempty" tf:"airflow_metadata_retention_config,omitempty"` + // The configuration setting for Task Logs. Structure is // documented below. TaskLogsRetentionConfig []TaskLogsRetentionConfigInitParameters `json:"taskLogsRetentionConfig,omitempty" tf:"task_logs_retention_config,omitempty"` @@ -307,6 +422,9 @@ type DataRetentionConfigInitParameters struct { type DataRetentionConfigObservation struct { + // Configuration parameters for this environment Structure is documented below. + AirflowMetadataRetentionConfig []AirflowMetadataRetentionConfigObservation `json:"airflowMetadataRetentionConfig,omitempty" tf:"airflow_metadata_retention_config,omitempty"` + // The configuration setting for Task Logs. Structure is // documented below. TaskLogsRetentionConfig []TaskLogsRetentionConfigObservation `json:"taskLogsRetentionConfig,omitempty" tf:"task_logs_retention_config,omitempty"` @@ -314,10 +432,14 @@ type DataRetentionConfigObservation struct { type DataRetentionConfigParameters struct { + // Configuration parameters for this environment Structure is documented below. + // +kubebuilder:validation:Optional + AirflowMetadataRetentionConfig []AirflowMetadataRetentionConfigParameters `json:"airflowMetadataRetentionConfig,omitempty" tf:"airflow_metadata_retention_config,omitempty"` + // The configuration setting for Task Logs. Structure is // documented below. // +kubebuilder:validation:Optional - TaskLogsRetentionConfig []TaskLogsRetentionConfigParameters `json:"taskLogsRetentionConfig" tf:"task_logs_retention_config,omitempty"` + TaskLogsRetentionConfig []TaskLogsRetentionConfigParameters `json:"taskLogsRetentionConfig,omitempty" tf:"task_logs_retention_config,omitempty"` } type DatabaseConfigInitParameters struct { @@ -515,12 +637,12 @@ type IPAllocationPolicyInitParameters struct { // Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks // (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. // Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both. - ClusterIPv4CidrBlock *string `json:"clusterIpv4CidrBlock,omitempty" tf:"cluster_ipv4_cidr_block"` + ClusterIPv4CidrBlock *string `json:"clusterIpv4CidrBlock,omitempty" tf:"cluster_ipv4_cidr_block,omitempty"` // The name of the cluster's secondary range used to allocate IP addresses to pods. // Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both. // For Cloud Composer 1 environments, this field is applicable only when use_ip_aliases is true. - ClusterSecondaryRangeName *string `json:"clusterSecondaryRangeName,omitempty" tf:"cluster_secondary_range_name"` + ClusterSecondaryRangeName *string `json:"clusterSecondaryRangeName,omitempty" tf:"cluster_secondary_range_name,omitempty"` // The IP address range used to allocate IP addresses in this cluster. // For Cloud Composer 1 environments, this field is applicable only when use_ip_aliases is true. @@ -529,16 +651,16 @@ type IPAllocationPolicyInitParameters struct { // Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks // (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. // Specify either services_secondary_range_name or services_ipv4_cidr_block but not both. - ServicesIPv4CidrBlock *string `json:"servicesIpv4CidrBlock,omitempty" tf:"services_ipv4_cidr_block"` + ServicesIPv4CidrBlock *string `json:"servicesIpv4CidrBlock,omitempty" tf:"services_ipv4_cidr_block,omitempty"` // The name of the services' secondary range used to allocate IP addresses to the cluster. // Specify either services_secondary_range_name or services_ipv4_cidr_block but not both. // For Cloud Composer 1 environments, this field is applicable only when use_ip_aliases is true. - ServicesSecondaryRangeName *string `json:"servicesSecondaryRangeName,omitempty" tf:"services_secondary_range_name"` + ServicesSecondaryRangeName *string `json:"servicesSecondaryRangeName,omitempty" tf:"services_secondary_range_name,omitempty"` // Whether or not to enable Alias IPs in the GKE cluster. If true, a VPC-native cluster is created. // Defaults to true if the ip_allocation_policy block is present in config. - UseIPAliases *bool `json:"useIpAliases,omitempty" tf:"use_ip_aliases"` + UseIPAliases *bool `json:"useIpAliases,omitempty" tf:"use_ip_aliases,omitempty"` } type IPAllocationPolicyObservation struct { @@ -586,13 +708,13 @@ type IPAllocationPolicyParameters struct { // (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. // Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both. // +kubebuilder:validation:Optional - ClusterIPv4CidrBlock *string `json:"clusterIpv4CidrBlock,omitempty" tf:"cluster_ipv4_cidr_block"` + ClusterIPv4CidrBlock *string `json:"clusterIpv4CidrBlock,omitempty" tf:"cluster_ipv4_cidr_block,omitempty"` // The name of the cluster's secondary range used to allocate IP addresses to pods. // Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both. // For Cloud Composer 1 environments, this field is applicable only when use_ip_aliases is true. // +kubebuilder:validation:Optional - ClusterSecondaryRangeName *string `json:"clusterSecondaryRangeName,omitempty" tf:"cluster_secondary_range_name"` + ClusterSecondaryRangeName *string `json:"clusterSecondaryRangeName,omitempty" tf:"cluster_secondary_range_name,omitempty"` // The IP address range used to allocate IP addresses in this cluster. // For Cloud Composer 1 environments, this field is applicable only when use_ip_aliases is true. @@ -602,18 +724,18 @@ type IPAllocationPolicyParameters struct { // (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. // Specify either services_secondary_range_name or services_ipv4_cidr_block but not both. // +kubebuilder:validation:Optional - ServicesIPv4CidrBlock *string `json:"servicesIpv4CidrBlock,omitempty" tf:"services_ipv4_cidr_block"` + ServicesIPv4CidrBlock *string `json:"servicesIpv4CidrBlock,omitempty" tf:"services_ipv4_cidr_block,omitempty"` // The name of the services' secondary range used to allocate IP addresses to the cluster. // Specify either services_secondary_range_name or services_ipv4_cidr_block but not both. // For Cloud Composer 1 environments, this field is applicable only when use_ip_aliases is true. // +kubebuilder:validation:Optional - ServicesSecondaryRangeName *string `json:"servicesSecondaryRangeName,omitempty" tf:"services_secondary_range_name"` + ServicesSecondaryRangeName *string `json:"servicesSecondaryRangeName,omitempty" tf:"services_secondary_range_name,omitempty"` // Whether or not to enable Alias IPs in the GKE cluster. If true, a VPC-native cluster is created. // Defaults to true if the ip_allocation_policy block is present in config. // +kubebuilder:validation:Optional - UseIPAliases *bool `json:"useIpAliases,omitempty" tf:"use_ip_aliases"` + UseIPAliases *bool `json:"useIpAliases,omitempty" tf:"use_ip_aliases,omitempty"` } type MaintenanceWindowInitParameters struct { @@ -695,6 +817,15 @@ type MasterAuthorizedNetworksConfigParameters struct { type NodeConfigInitParameters struct { + // /20 IPv4 cidr range that will be used by Composer internal components. + // Cannot be updated. + ComposerInternalIPv4CidrBlock *string `json:"composerInternalIpv4CidrBlock,omitempty" tf:"composer_internal_ipv4_cidr_block,omitempty"` + + // PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment + // and point Cloud Composer environment to use. It is possible to share network attachment among many environments, + // provided enough IP addresses are available. + ComposerNetworkAttachment *string `json:"composerNetworkAttachment,omitempty" tf:"composer_network_attachment,omitempty"` + // The disk size in GB used for node VMs. Minimum size is 20GB. // If unspecified, defaults to 100GB. Cannot be updated. DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` @@ -786,6 +917,15 @@ type NodeConfigInitParameters struct { type NodeConfigObservation struct { + // /20 IPv4 cidr range that will be used by Composer internal components. + // Cannot be updated. + ComposerInternalIPv4CidrBlock *string `json:"composerInternalIpv4CidrBlock,omitempty" tf:"composer_internal_ipv4_cidr_block,omitempty"` + + // PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment + // and point Cloud Composer environment to use. It is possible to share network attachment among many environments, + // provided enough IP addresses are available. + ComposerNetworkAttachment *string `json:"composerNetworkAttachment,omitempty" tf:"composer_network_attachment,omitempty"` + // The disk size in GB used for node VMs. Minimum size is 20GB. // If unspecified, defaults to 100GB. Cannot be updated. DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` @@ -847,6 +987,17 @@ type NodeConfigObservation struct { type NodeConfigParameters struct { + // /20 IPv4 cidr range that will be used by Composer internal components. + // Cannot be updated. + // +kubebuilder:validation:Optional + ComposerInternalIPv4CidrBlock *string `json:"composerInternalIpv4CidrBlock,omitempty" tf:"composer_internal_ipv4_cidr_block,omitempty"` + + // PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment + // and point Cloud Composer environment to use. It is possible to share network attachment among many environments, + // provided enough IP addresses are available. + // +kubebuilder:validation:Optional + ComposerNetworkAttachment *string `json:"composerNetworkAttachment,omitempty" tf:"composer_network_attachment,omitempty"` + // The disk size in GB used for node VMs. Minimum size is 20GB. // If unspecified, defaults to 100GB. Cannot be updated. // +kubebuilder:validation:Optional @@ -1189,6 +1340,10 @@ type SoftwareConfigInitParameters struct { // +mapType=granular AirflowConfigOverrides map[string]*string `json:"airflowConfigOverrides,omitempty" tf:"airflow_config_overrides,omitempty"` + // The configuration for Cloud Data Lineage integration. Structure is + // documented below. + CloudDataLineageIntegration *CloudDataLineageIntegrationInitParameters `json:"cloudDataLineageIntegration,omitempty" tf:"cloud_data_lineage_integration,omitempty"` + // Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. // Environment variable names must match the regular expression [a-zA-Z_][a-zA-Z0-9_]*. // They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression @@ -1212,6 +1367,9 @@ type SoftwareConfigInitParameters struct { // The number of schedulers for Airflow. SchedulerCount *float64 `json:"schedulerCount,omitempty" tf:"scheduler_count,omitempty"` + + // Web server plugins configuration. Can be either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. + WebServerPluginsMode *string `json:"webServerPluginsMode,omitempty" tf:"web_server_plugins_mode,omitempty"` } type SoftwareConfigObservation struct { @@ -1221,6 +1379,10 @@ type SoftwareConfigObservation struct { // +mapType=granular AirflowConfigOverrides map[string]*string `json:"airflowConfigOverrides,omitempty" tf:"airflow_config_overrides,omitempty"` + // The configuration for Cloud Data Lineage integration. Structure is + // documented below. + CloudDataLineageIntegration *CloudDataLineageIntegrationObservation `json:"cloudDataLineageIntegration,omitempty" tf:"cloud_data_lineage_integration,omitempty"` + // Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. // Environment variable names must match the regular expression [a-zA-Z_][a-zA-Z0-9_]*. // They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression @@ -1244,6 +1406,9 @@ type SoftwareConfigObservation struct { // The number of schedulers for Airflow. SchedulerCount *float64 `json:"schedulerCount,omitempty" tf:"scheduler_count,omitempty"` + + // Web server plugins configuration. Can be either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. + WebServerPluginsMode *string `json:"webServerPluginsMode,omitempty" tf:"web_server_plugins_mode,omitempty"` } type SoftwareConfigParameters struct { @@ -1254,6 +1419,11 @@ type SoftwareConfigParameters struct { // +mapType=granular AirflowConfigOverrides map[string]*string `json:"airflowConfigOverrides,omitempty" tf:"airflow_config_overrides,omitempty"` + // The configuration for Cloud Data Lineage integration. Structure is + // documented below. + // +kubebuilder:validation:Optional + CloudDataLineageIntegration *CloudDataLineageIntegrationParameters `json:"cloudDataLineageIntegration,omitempty" tf:"cloud_data_lineage_integration,omitempty"` + // Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. // Environment variable names must match the regular expression [a-zA-Z_][a-zA-Z0-9_]*. // They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression @@ -1282,6 +1452,10 @@ type SoftwareConfigParameters struct { // The number of schedulers for Airflow. // +kubebuilder:validation:Optional SchedulerCount *float64 `json:"schedulerCount,omitempty" tf:"scheduler_count,omitempty"` + + // Web server plugins configuration. Can be either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. + // +kubebuilder:validation:Optional + WebServerPluginsMode *string `json:"webServerPluginsMode,omitempty" tf:"web_server_plugins_mode,omitempty"` } type StorageConfigInitParameters struct { @@ -1523,6 +1697,9 @@ type WorkerParameters struct { type WorkloadsConfigInitParameters struct { + // Configuration for resources used by DAG processor. + DagProcessor *DagProcessorInitParameters `json:"dagProcessor,omitempty" tf:"dag_processor,omitempty"` + // Configuration for resources used by Airflow schedulers. Scheduler *SchedulerInitParameters `json:"scheduler,omitempty" tf:"scheduler,omitempty"` @@ -1538,6 +1715,9 @@ type WorkloadsConfigInitParameters struct { type WorkloadsConfigObservation struct { + // Configuration for resources used by DAG processor. + DagProcessor *DagProcessorObservation `json:"dagProcessor,omitempty" tf:"dag_processor,omitempty"` + // Configuration for resources used by Airflow schedulers. Scheduler *SchedulerObservation `json:"scheduler,omitempty" tf:"scheduler,omitempty"` @@ -1553,6 +1733,10 @@ type WorkloadsConfigObservation struct { type WorkloadsConfigParameters struct { + // Configuration for resources used by DAG processor. + // +kubebuilder:validation:Optional + DagProcessor *DagProcessorParameters `json:"dagProcessor,omitempty" tf:"dag_processor,omitempty"` + // Configuration for resources used by Airflow schedulers. // +kubebuilder:validation:Optional Scheduler *SchedulerParameters `json:"scheduler,omitempty" tf:"scheduler,omitempty"` diff --git a/apis/composer/v1beta2/zz_generated.deepcopy.go b/apis/composer/v1beta2/zz_generated.deepcopy.go index e7b087171..3481c9209 100644 --- a/apis/composer/v1beta2/zz_generated.deepcopy.go +++ b/apis/composer/v1beta2/zz_generated.deepcopy.go @@ -13,6 +13,81 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AirflowMetadataRetentionConfigInitParameters) DeepCopyInto(out *AirflowMetadataRetentionConfigInitParameters) { + *out = *in + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.RetentionMode != nil { + in, out := &in.RetentionMode, &out.RetentionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowMetadataRetentionConfigInitParameters. +func (in *AirflowMetadataRetentionConfigInitParameters) DeepCopy() *AirflowMetadataRetentionConfigInitParameters { + if in == nil { + return nil + } + out := new(AirflowMetadataRetentionConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AirflowMetadataRetentionConfigObservation) DeepCopyInto(out *AirflowMetadataRetentionConfigObservation) { + *out = *in + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.RetentionMode != nil { + in, out := &in.RetentionMode, &out.RetentionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowMetadataRetentionConfigObservation. +func (in *AirflowMetadataRetentionConfigObservation) DeepCopy() *AirflowMetadataRetentionConfigObservation { + if in == nil { + return nil + } + out := new(AirflowMetadataRetentionConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AirflowMetadataRetentionConfigParameters) DeepCopyInto(out *AirflowMetadataRetentionConfigParameters) { + *out = *in + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.RetentionMode != nil { + in, out := &in.RetentionMode, &out.RetentionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowMetadataRetentionConfigParameters. +func (in *AirflowMetadataRetentionConfigParameters) DeepCopy() *AirflowMetadataRetentionConfigParameters { + if in == nil { + return nil + } + out := new(AirflowMetadataRetentionConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllowedIPRangeInitParameters) DeepCopyInto(out *AllowedIPRangeInitParameters) { *out = *in @@ -163,6 +238,66 @@ func (in *CidrBlocksParameters) DeepCopy() *CidrBlocksParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudDataLineageIntegrationInitParameters) DeepCopyInto(out *CloudDataLineageIntegrationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudDataLineageIntegrationInitParameters. +func (in *CloudDataLineageIntegrationInitParameters) DeepCopy() *CloudDataLineageIntegrationInitParameters { + if in == nil { + return nil + } + out := new(CloudDataLineageIntegrationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudDataLineageIntegrationObservation) DeepCopyInto(out *CloudDataLineageIntegrationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudDataLineageIntegrationObservation. +func (in *CloudDataLineageIntegrationObservation) DeepCopy() *CloudDataLineageIntegrationObservation { + if in == nil { + return nil + } + out := new(CloudDataLineageIntegrationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudDataLineageIntegrationParameters) DeepCopyInto(out *CloudDataLineageIntegrationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudDataLineageIntegrationParameters. +func (in *CloudDataLineageIntegrationParameters) DeepCopy() *CloudDataLineageIntegrationParameters { + if in == nil { + return nil + } + out := new(CloudDataLineageIntegrationParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigInitParameters) DeepCopyInto(out *ConfigInitParameters) { *out = *in @@ -176,6 +311,16 @@ func (in *ConfigInitParameters) DeepCopyInto(out *ConfigInitParameters) { *out = new(DatabaseConfigInitParameters) (*in).DeepCopyInto(*out) } + if in.EnablePrivateBuildsOnly != nil { + in, out := &in.EnablePrivateBuildsOnly, &out.EnablePrivateBuildsOnly + *out = new(bool) + **out = **in + } + if in.EnablePrivateEnvironment != nil { + in, out := &in.EnablePrivateEnvironment, &out.EnablePrivateEnvironment + *out = new(bool) + **out = **in + } if in.EncryptionConfig != nil { in, out := &in.EncryptionConfig, &out.EncryptionConfig *out = new(EncryptionConfigInitParameters) @@ -276,6 +421,16 @@ func (in *ConfigObservation) DeepCopyInto(out *ConfigObservation) { *out = new(DatabaseConfigObservation) (*in).DeepCopyInto(*out) } + if in.EnablePrivateBuildsOnly != nil { + in, out := &in.EnablePrivateBuildsOnly, &out.EnablePrivateBuildsOnly + *out = new(bool) + **out = **in + } + if in.EnablePrivateEnvironment != nil { + in, out := &in.EnablePrivateEnvironment, &out.EnablePrivateEnvironment + *out = new(bool) + **out = **in + } if in.EncryptionConfig != nil { in, out := &in.EncryptionConfig, &out.EncryptionConfig *out = new(EncryptionConfigObservation) @@ -371,6 +526,16 @@ func (in *ConfigParameters) DeepCopyInto(out *ConfigParameters) { *out = new(DatabaseConfigParameters) (*in).DeepCopyInto(*out) } + if in.EnablePrivateBuildsOnly != nil { + in, out := &in.EnablePrivateBuildsOnly, &out.EnablePrivateBuildsOnly + *out = new(bool) + **out = **in + } + if in.EnablePrivateEnvironment != nil { + in, out := &in.EnablePrivateEnvironment, &out.EnablePrivateEnvironment + *out = new(bool) + **out = **in + } if in.EncryptionConfig != nil { in, out := &in.EncryptionConfig, &out.EncryptionConfig *out = new(EncryptionConfigParameters) @@ -448,9 +613,121 @@ func (in *ConfigParameters) DeepCopy() *ConfigParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DagProcessorInitParameters) DeepCopyInto(out *DagProcessorInitParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(float64) + **out = **in + } + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.MemoryGb != nil { + in, out := &in.MemoryGb, &out.MemoryGb + *out = new(float64) + **out = **in + } + if in.StorageGb != nil { + in, out := &in.StorageGb, &out.StorageGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DagProcessorInitParameters. +func (in *DagProcessorInitParameters) DeepCopy() *DagProcessorInitParameters { + if in == nil { + return nil + } + out := new(DagProcessorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DagProcessorObservation) DeepCopyInto(out *DagProcessorObservation) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(float64) + **out = **in + } + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.MemoryGb != nil { + in, out := &in.MemoryGb, &out.MemoryGb + *out = new(float64) + **out = **in + } + if in.StorageGb != nil { + in, out := &in.StorageGb, &out.StorageGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DagProcessorObservation. +func (in *DagProcessorObservation) DeepCopy() *DagProcessorObservation { + if in == nil { + return nil + } + out := new(DagProcessorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DagProcessorParameters) DeepCopyInto(out *DagProcessorParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(float64) + **out = **in + } + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.MemoryGb != nil { + in, out := &in.MemoryGb, &out.MemoryGb + *out = new(float64) + **out = **in + } + if in.StorageGb != nil { + in, out := &in.StorageGb, &out.StorageGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DagProcessorParameters. +func (in *DagProcessorParameters) DeepCopy() *DagProcessorParameters { + if in == nil { + return nil + } + out := new(DagProcessorParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataRetentionConfigInitParameters) DeepCopyInto(out *DataRetentionConfigInitParameters) { *out = *in + if in.AirflowMetadataRetentionConfig != nil { + in, out := &in.AirflowMetadataRetentionConfig, &out.AirflowMetadataRetentionConfig + *out = make([]AirflowMetadataRetentionConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.TaskLogsRetentionConfig != nil { in, out := &in.TaskLogsRetentionConfig, &out.TaskLogsRetentionConfig *out = make([]TaskLogsRetentionConfigInitParameters, len(*in)) @@ -473,6 +750,13 @@ func (in *DataRetentionConfigInitParameters) DeepCopy() *DataRetentionConfigInit // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataRetentionConfigObservation) DeepCopyInto(out *DataRetentionConfigObservation) { *out = *in + if in.AirflowMetadataRetentionConfig != nil { + in, out := &in.AirflowMetadataRetentionConfig, &out.AirflowMetadataRetentionConfig + *out = make([]AirflowMetadataRetentionConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.TaskLogsRetentionConfig != nil { in, out := &in.TaskLogsRetentionConfig, &out.TaskLogsRetentionConfig *out = make([]TaskLogsRetentionConfigObservation, len(*in)) @@ -495,6 +779,13 @@ func (in *DataRetentionConfigObservation) DeepCopy() *DataRetentionConfigObserva // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataRetentionConfigParameters) DeepCopyInto(out *DataRetentionConfigParameters) { *out = *in + if in.AirflowMetadataRetentionConfig != nil { + in, out := &in.AirflowMetadataRetentionConfig, &out.AirflowMetadataRetentionConfig + *out = make([]AirflowMetadataRetentionConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.TaskLogsRetentionConfig != nil { in, out := &in.TaskLogsRetentionConfig, &out.TaskLogsRetentionConfig *out = make([]TaskLogsRetentionConfigParameters, len(*in)) @@ -1242,6 +1533,16 @@ func (in *MasterAuthorizedNetworksConfigParameters) DeepCopy() *MasterAuthorized // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeConfigInitParameters) DeepCopyInto(out *NodeConfigInitParameters) { *out = *in + if in.ComposerInternalIPv4CidrBlock != nil { + in, out := &in.ComposerInternalIPv4CidrBlock, &out.ComposerInternalIPv4CidrBlock + *out = new(string) + **out = **in + } + if in.ComposerNetworkAttachment != nil { + in, out := &in.ComposerNetworkAttachment, &out.ComposerNetworkAttachment + *out = new(string) + **out = **in + } if in.DiskSizeGb != nil { in, out := &in.DiskSizeGb, &out.DiskSizeGb *out = new(float64) @@ -1349,6 +1650,16 @@ func (in *NodeConfigInitParameters) DeepCopy() *NodeConfigInitParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeConfigObservation) DeepCopyInto(out *NodeConfigObservation) { *out = *in + if in.ComposerInternalIPv4CidrBlock != nil { + in, out := &in.ComposerInternalIPv4CidrBlock, &out.ComposerInternalIPv4CidrBlock + *out = new(string) + **out = **in + } + if in.ComposerNetworkAttachment != nil { + in, out := &in.ComposerNetworkAttachment, &out.ComposerNetworkAttachment + *out = new(string) + **out = **in + } if in.DiskSizeGb != nil { in, out := &in.DiskSizeGb, &out.DiskSizeGb *out = new(float64) @@ -1426,6 +1737,16 @@ func (in *NodeConfigObservation) DeepCopy() *NodeConfigObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeConfigParameters) DeepCopyInto(out *NodeConfigParameters) { *out = *in + if in.ComposerInternalIPv4CidrBlock != nil { + in, out := &in.ComposerInternalIPv4CidrBlock, &out.ComposerInternalIPv4CidrBlock + *out = new(string) + **out = **in + } + if in.ComposerNetworkAttachment != nil { + in, out := &in.ComposerNetworkAttachment, &out.ComposerNetworkAttachment + *out = new(string) + **out = **in + } if in.DiskSizeGb != nil { in, out := &in.DiskSizeGb, &out.DiskSizeGb *out = new(float64) @@ -1984,6 +2305,11 @@ func (in *SoftwareConfigInitParameters) DeepCopyInto(out *SoftwareConfigInitPara (*out)[key] = outVal } } + if in.CloudDataLineageIntegration != nil { + in, out := &in.CloudDataLineageIntegration, &out.CloudDataLineageIntegration + *out = new(CloudDataLineageIntegrationInitParameters) + (*in).DeepCopyInto(*out) + } if in.EnvVariables != nil { in, out := &in.EnvVariables, &out.EnvVariables *out = make(map[string]*string, len(*in)) @@ -2031,6 +2357,11 @@ func (in *SoftwareConfigInitParameters) DeepCopyInto(out *SoftwareConfigInitPara *out = new(float64) **out = **in } + if in.WebServerPluginsMode != nil { + in, out := &in.WebServerPluginsMode, &out.WebServerPluginsMode + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareConfigInitParameters. @@ -2062,6 +2393,11 @@ func (in *SoftwareConfigObservation) DeepCopyInto(out *SoftwareConfigObservation (*out)[key] = outVal } } + if in.CloudDataLineageIntegration != nil { + in, out := &in.CloudDataLineageIntegration, &out.CloudDataLineageIntegration + *out = new(CloudDataLineageIntegrationObservation) + (*in).DeepCopyInto(*out) + } if in.EnvVariables != nil { in, out := &in.EnvVariables, &out.EnvVariables *out = make(map[string]*string, len(*in)) @@ -2109,6 +2445,11 @@ func (in *SoftwareConfigObservation) DeepCopyInto(out *SoftwareConfigObservation *out = new(float64) **out = **in } + if in.WebServerPluginsMode != nil { + in, out := &in.WebServerPluginsMode, &out.WebServerPluginsMode + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareConfigObservation. @@ -2140,6 +2481,11 @@ func (in *SoftwareConfigParameters) DeepCopyInto(out *SoftwareConfigParameters) (*out)[key] = outVal } } + if in.CloudDataLineageIntegration != nil { + in, out := &in.CloudDataLineageIntegration, &out.CloudDataLineageIntegration + *out = new(CloudDataLineageIntegrationParameters) + (*in).DeepCopyInto(*out) + } if in.EnvVariables != nil { in, out := &in.EnvVariables, &out.EnvVariables *out = make(map[string]*string, len(*in)) @@ -2187,6 +2533,11 @@ func (in *SoftwareConfigParameters) DeepCopyInto(out *SoftwareConfigParameters) *out = new(float64) **out = **in } + if in.WebServerPluginsMode != nil { + in, out := &in.WebServerPluginsMode, &out.WebServerPluginsMode + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareConfigParameters. @@ -2748,6 +3099,11 @@ func (in *WorkerParameters) DeepCopy() *WorkerParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkloadsConfigInitParameters) DeepCopyInto(out *WorkloadsConfigInitParameters) { *out = *in + if in.DagProcessor != nil { + in, out := &in.DagProcessor, &out.DagProcessor + *out = new(DagProcessorInitParameters) + (*in).DeepCopyInto(*out) + } if in.Scheduler != nil { in, out := &in.Scheduler, &out.Scheduler *out = new(SchedulerInitParameters) @@ -2783,6 +3139,11 @@ func (in *WorkloadsConfigInitParameters) DeepCopy() *WorkloadsConfigInitParamete // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkloadsConfigObservation) DeepCopyInto(out *WorkloadsConfigObservation) { *out = *in + if in.DagProcessor != nil { + in, out := &in.DagProcessor, &out.DagProcessor + *out = new(DagProcessorObservation) + (*in).DeepCopyInto(*out) + } if in.Scheduler != nil { in, out := &in.Scheduler, &out.Scheduler *out = new(SchedulerObservation) @@ -2818,6 +3179,11 @@ func (in *WorkloadsConfigObservation) DeepCopy() *WorkloadsConfigObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkloadsConfigParameters) DeepCopyInto(out *WorkloadsConfigParameters) { *out = *in + if in.DagProcessor != nil { + in, out := &in.DagProcessor, &out.DagProcessor + *out = new(DagProcessorParameters) + (*in).DeepCopyInto(*out) + } if in.Scheduler != nil { in, out := &in.Scheduler, &out.Scheduler *out = new(SchedulerParameters) diff --git a/apis/compute/v1beta1/zz_attacheddisk_types.go b/apis/compute/v1beta1/zz_attacheddisk_types.go index edbe86e91..adeb57526 100755 --- a/apis/compute/v1beta1/zz_attacheddisk_types.go +++ b/apis/compute/v1beta1/zz_attacheddisk_types.go @@ -51,6 +51,9 @@ type AttachedDiskInitParameters struct { // +kubebuilder:validation:Optional InstanceSelector *v1.Selector `json:"instanceSelector,omitempty" tf:"-"` + // The disk interface used for attaching this disk. + Interface *string `json:"interface,omitempty" tf:"interface,omitempty"` + // The mode in which to attach this disk, either READ_WRITE or // READ_ONLY. If not specified, the default is to attach the disk in // READ_WRITE mode. @@ -86,6 +89,9 @@ type AttachedDiskObservation struct { // as properties on the resource or provider. Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` + // The disk interface used for attaching this disk. + Interface *string `json:"interface,omitempty" tf:"interface,omitempty"` + // The mode in which to attach this disk, either READ_WRITE or // READ_ONLY. If not specified, the default is to attach the disk in // READ_WRITE mode. @@ -141,6 +147,10 @@ type AttachedDiskParameters struct { // +kubebuilder:validation:Optional InstanceSelector *v1.Selector `json:"instanceSelector,omitempty" tf:"-"` + // The disk interface used for attaching this disk. + // +kubebuilder:validation:Optional + Interface *string `json:"interface,omitempty" tf:"interface,omitempty"` + // The mode in which to attach this disk, either READ_WRITE or // READ_ONLY. If not specified, the default is to attach the disk in // READ_WRITE mode. diff --git a/apis/compute/v1beta1/zz_externalvpngateway_types.go b/apis/compute/v1beta1/zz_externalvpngateway_types.go index 2bfc33e85..8dddd2138 100755 --- a/apis/compute/v1beta1/zz_externalvpngateway_types.go +++ b/apis/compute/v1beta1/zz_externalvpngateway_types.go @@ -119,6 +119,14 @@ type InterfaceInitParameters struct { // your on-premise gateway or another Cloud provider's VPN gateway, // it cannot be an IP address from Google Compute Engine. IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // IPv6 address of the interface in the external VPN gateway. This IPv6 + // address can be either from your on-premise gateway or another Cloud + // provider's VPN gateway, it cannot be an IP address from Google Compute + // Engine. Must specify an IPv6 address (not IPV4-mapped) using any format + // described in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format + // is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0). + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` } type InterfaceObservation struct { @@ -132,6 +140,14 @@ type InterfaceObservation struct { // your on-premise gateway or another Cloud provider's VPN gateway, // it cannot be an IP address from Google Compute Engine. IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // IPv6 address of the interface in the external VPN gateway. This IPv6 + // address can be either from your on-premise gateway or another Cloud + // provider's VPN gateway, it cannot be an IP address from Google Compute + // Engine. Must specify an IPv6 address (not IPV4-mapped) using any format + // described in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format + // is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0). + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` } type InterfaceParameters struct { @@ -147,6 +163,15 @@ type InterfaceParameters struct { // it cannot be an IP address from Google Compute Engine. // +kubebuilder:validation:Optional IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // IPv6 address of the interface in the external VPN gateway. This IPv6 + // address can be either from your on-premise gateway or another Cloud + // provider's VPN gateway, it cannot be an IP address from Google Compute + // Engine. Must specify an IPv6 address (not IPV4-mapped) using any format + // described in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format + // is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0). + // +kubebuilder:validation:Optional + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` } // ExternalVPNGatewaySpec defines the desired state of ExternalVPNGateway diff --git a/apis/compute/v1beta1/zz_firewallpolicyassociation_types.go b/apis/compute/v1beta1/zz_firewallpolicyassociation_types.go index ddf909aa8..5526a4992 100755 --- a/apis/compute/v1beta1/zz_firewallpolicyassociation_types.go +++ b/apis/compute/v1beta1/zz_firewallpolicyassociation_types.go @@ -28,7 +28,7 @@ type FirewallPolicyAssociationInitParameters struct { // +kubebuilder:validation:Optional AttachmentTargetSelector *v1.Selector `json:"attachmentTargetSelector,omitempty" tf:"-"` - // The firewall policy ID of the association. + // The firewall policy of the resource. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.FirewallPolicy // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() FirewallPolicy *string `json:"firewallPolicy,omitempty" tf:"firewall_policy,omitempty"` @@ -50,7 +50,7 @@ type FirewallPolicyAssociationObservation struct { // The target that the firewall policy is attached to. AttachmentTarget *string `json:"attachmentTarget,omitempty" tf:"attachment_target,omitempty"` - // The firewall policy ID of the association. + // The firewall policy of the resource. FirewallPolicy *string `json:"firewallPolicy,omitempty" tf:"firewall_policy,omitempty"` // an identifier for the resource with format locations/global/firewallPolicies/{{firewall_policy}}/associations/{{name}} @@ -79,7 +79,7 @@ type FirewallPolicyAssociationParameters struct { // +kubebuilder:validation:Optional AttachmentTargetSelector *v1.Selector `json:"attachmentTargetSelector,omitempty" tf:"-"` - // The firewall policy ID of the association. + // The firewall policy of the resource. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.FirewallPolicy // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional @@ -125,7 +125,7 @@ type FirewallPolicyAssociationStatus struct { // +kubebuilder:subresource:status // +kubebuilder:storageversion -// FirewallPolicyAssociation is the Schema for the FirewallPolicyAssociations API. Applies a hierarchical firewall policy to a target resource +// FirewallPolicyAssociation is the Schema for the FirewallPolicyAssociations API. Allows associating hierarchical firewall policies with the target where they are applied. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" // +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" diff --git a/apis/compute/v1beta1/zz_generated.deepcopy.go b/apis/compute/v1beta1/zz_generated.deepcopy.go index 8f60ae361..0c4a8d4d7 100644 --- a/apis/compute/v1beta1/zz_generated.deepcopy.go +++ b/apis/compute/v1beta1/zz_generated.deepcopy.go @@ -1682,6 +1682,11 @@ func (in *AttachedDiskInitParameters) DeepCopyInto(out *AttachedDiskInitParamete *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.Interface != nil { + in, out := &in.Interface, &out.Interface + *out = new(string) + **out = **in + } if in.Mode != nil { in, out := &in.Mode, &out.Mode *out = new(string) @@ -1764,6 +1769,11 @@ func (in *AttachedDiskObservation) DeepCopyInto(out *AttachedDiskObservation) { *out = new(string) **out = **in } + if in.Interface != nil { + in, out := &in.Interface, &out.Interface + *out = new(string) + **out = **in + } if in.Mode != nil { in, out := &in.Mode, &out.Mode *out = new(string) @@ -1829,6 +1839,11 @@ func (in *AttachedDiskParameters) DeepCopyInto(out *AttachedDiskParameters) { *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.Interface != nil { + in, out := &in.Interface, &out.Interface + *out = new(string) + **out = **in + } if in.Mode != nil { in, out := &in.Mode, &out.Mode *out = new(string) @@ -7222,11 +7237,26 @@ func (in *ConnectedEndpointsInitParameters) DeepCopy() *ConnectedEndpointsInitPa // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConnectedEndpointsObservation) DeepCopyInto(out *ConnectedEndpointsObservation) { *out = *in + if in.ConsumerNetwork != nil { + in, out := &in.ConsumerNetwork, &out.ConsumerNetwork + *out = new(string) + **out = **in + } if in.Endpoint != nil { in, out := &in.Endpoint, &out.Endpoint *out = new(string) **out = **in } + if in.PropagatedConnectionCount != nil { + in, out := &in.PropagatedConnectionCount, &out.PropagatedConnectionCount + *out = new(float64) + **out = **in + } + if in.PscConnectionID != nil { + in, out := &in.PscConnectionID, &out.PscConnectionID + *out = new(string) + **out = **in + } if in.Status != nil { in, out := &in.Status, &out.Status *out = new(string) @@ -29893,6 +29923,11 @@ func (in *InterfaceInitParameters) DeepCopyInto(out *InterfaceInitParameters) { *out = new(string) **out = **in } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceInitParameters. @@ -29918,6 +29953,11 @@ func (in *InterfaceObservation) DeepCopyInto(out *InterfaceObservation) { *out = new(string) **out = **in } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceObservation. @@ -29943,6 +29983,11 @@ func (in *InterfaceParameters) DeepCopyInto(out *InterfaceParameters) { *out = new(string) **out = **in } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceParameters. @@ -34598,6 +34643,11 @@ func (in *NetworkFirewallPolicyRuleObservation) DeepCopyInto(out *NetworkFirewal *out = new(string) **out = **in } + if in.CreationTimestamp != nil { + in, out := &in.CreationTimestamp, &out.CreationTimestamp + *out = new(string) + **out = **in + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -35589,6 +35639,11 @@ func (in *NetworkObservation) DeepCopyInto(out *NetworkObservation) { *out = new(string) **out = **in } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } if in.NumericID != nil { in, out := &in.NumericID, &out.NumericID *out = new(string) @@ -53512,6 +53567,11 @@ func (in *RegionNetworkEndpointObservation) DeepCopyInto(out *RegionNetworkEndpo *out = new(string) **out = **in } + if in.NetworkEndpointID != nil { + in, out := &in.NetworkEndpointID, &out.NetworkEndpointID + *out = new(float64) + **out = **in + } if in.Port != nil { in, out := &in.Port, &out.Port *out = new(float64) @@ -55199,6 +55259,11 @@ func (in *RegionTargetHTTPProxyInitParameters) DeepCopyInto(out *RegionTargetHTT *out = new(string) **out = **in } + if in.HTTPKeepAliveTimeoutSec != nil { + in, out := &in.HTTPKeepAliveTimeoutSec, &out.HTTPKeepAliveTimeoutSec + *out = new(float64) + **out = **in + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -55276,6 +55341,11 @@ func (in *RegionTargetHTTPProxyObservation) DeepCopyInto(out *RegionTargetHTTPPr *out = new(string) **out = **in } + if in.HTTPKeepAliveTimeoutSec != nil { + in, out := &in.HTTPKeepAliveTimeoutSec, &out.HTTPKeepAliveTimeoutSec + *out = new(float64) + **out = **in + } if in.ID != nil { in, out := &in.ID, &out.ID *out = new(string) @@ -55326,6 +55396,11 @@ func (in *RegionTargetHTTPProxyParameters) DeepCopyInto(out *RegionTargetHTTPPro *out = new(string) **out = **in } + if in.HTTPKeepAliveTimeoutSec != nil { + in, out := &in.HTTPKeepAliveTimeoutSec, &out.HTTPKeepAliveTimeoutSec + *out = new(float64) + **out = **in + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -55444,6 +55519,11 @@ func (in *RegionTargetHTTPSProxyInitParameters) DeepCopyInto(out *RegionTargetHT *out = new(string) **out = **in } + if in.HTTPKeepAliveTimeoutSec != nil { + in, out := &in.HTTPKeepAliveTimeoutSec, &out.HTTPKeepAliveTimeoutSec + *out = new(float64) + **out = **in + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -55565,6 +55645,11 @@ func (in *RegionTargetHTTPSProxyObservation) DeepCopyInto(out *RegionTargetHTTPS *out = new(string) **out = **in } + if in.HTTPKeepAliveTimeoutSec != nil { + in, out := &in.HTTPKeepAliveTimeoutSec, &out.HTTPKeepAliveTimeoutSec + *out = new(float64) + **out = **in + } if in.ID != nil { in, out := &in.ID, &out.ID *out = new(string) @@ -55647,6 +55732,11 @@ func (in *RegionTargetHTTPSProxyParameters) DeepCopyInto(out *RegionTargetHTTPSP *out = new(string) **out = **in } + if in.HTTPKeepAliveTimeoutSec != nil { + in, out := &in.HTTPKeepAliveTimeoutSec, &out.HTTPKeepAliveTimeoutSec + *out = new(float64) + **out = **in + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -67211,6 +67301,11 @@ func (in *ServiceAttachmentInitParameters) DeepCopyInto(out *ServiceAttachmentIn *out = new(string) **out = **in } + if in.PropagatedConnectionLimit != nil { + in, out := &in.PropagatedConnectionLimit, &out.PropagatedConnectionLimit + *out = new(float64) + **out = **in + } if in.ReconcileConnections != nil { in, out := &in.ReconcileConnections, &out.ReconcileConnections *out = new(bool) @@ -67355,6 +67450,11 @@ func (in *ServiceAttachmentObservation) DeepCopyInto(out *ServiceAttachmentObser *out = new(string) **out = **in } + if in.PropagatedConnectionLimit != nil { + in, out := &in.PropagatedConnectionLimit, &out.PropagatedConnectionLimit + *out = new(float64) + **out = **in + } if in.ReconcileConnections != nil { in, out := &in.ReconcileConnections, &out.ReconcileConnections *out = new(bool) @@ -67462,6 +67562,11 @@ func (in *ServiceAttachmentParameters) DeepCopyInto(out *ServiceAttachmentParame *out = new(string) **out = **in } + if in.PropagatedConnectionLimit != nil { + in, out := &in.PropagatedConnectionLimit, &out.PropagatedConnectionLimit + *out = new(float64) + **out = **in + } if in.ReconcileConnections != nil { in, out := &in.ReconcileConnections, &out.ReconcileConnections *out = new(bool) diff --git a/apis/compute/v1beta1/zz_generated.resolvers.go b/apis/compute/v1beta1/zz_generated.resolvers.go index 4fede4185..bf43fc7cb 100644 --- a/apis/compute/v1beta1/zz_generated.resolvers.go +++ b/apis/compute/v1beta1/zz_generated.resolvers.go @@ -2673,7 +2673,7 @@ func (mg *NetworkFirewallPolicyAssociation) ResolveReferences(ctx context.Contex rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FirewallPolicy), - Extract: reference.ExternalName(), + Extract: resource.ExtractResourceID(), Reference: mg.Spec.ForProvider.FirewallPolicyRef, Selector: mg.Spec.ForProvider.FirewallPolicySelector, To: reference.To{List: l, Managed: m}, @@ -4209,7 +4209,7 @@ func (mg *RegionNetworkFirewallPolicyAssociation) ResolveReferences(ctx context. rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FirewallPolicy), - Extract: reference.ExternalName(), + Extract: resource.ExtractResourceID(), Reference: mg.Spec.ForProvider.FirewallPolicyRef, Selector: mg.Spec.ForProvider.FirewallPolicySelector, To: reference.To{List: l, Managed: m}, diff --git a/apis/compute/v1beta1/zz_havpngateway_terraformed.go b/apis/compute/v1beta1/zz_havpngateway_terraformed.go index 249f787be..513b0b0c2 100755 --- a/apis/compute/v1beta1/zz_havpngateway_terraformed.go +++ b/apis/compute/v1beta1/zz_havpngateway_terraformed.go @@ -125,5 +125,5 @@ func (tr *HaVPNGateway) LateInitialize(attrs []byte) (bool, error) { // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *HaVPNGateway) GetTerraformSchemaVersion() int { - return 0 + return 1 } diff --git a/apis/compute/v1beta1/zz_network_types.go b/apis/compute/v1beta1/zz_network_types.go index a5778f1cb..089898190 100755 --- a/apis/compute/v1beta1/zz_network_types.go +++ b/apis/compute/v1beta1/zz_network_types.go @@ -113,6 +113,10 @@ type NetworkObservation struct { // Possible values are: BEFORE_CLASSIC_FIREWALL, AFTER_CLASSIC_FIREWALL. NetworkFirewallPolicyEnforcementOrder *string `json:"networkFirewallPolicyEnforcementOrder,omitempty" tf:"network_firewall_policy_enforcement_order,omitempty"` + // The unique identifier for the resource. This identifier is defined by the server. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // (Deprecated) // The unique identifier for the resource. This identifier is defined by the server. NumericID *string `json:"numericId,omitempty" tf:"numeric_id,omitempty"` diff --git a/apis/compute/v1beta1/zz_networkfirewallpolicyassociation_types.go b/apis/compute/v1beta1/zz_networkfirewallpolicyassociation_types.go index 2f35721dc..db757f613 100755 --- a/apis/compute/v1beta1/zz_networkfirewallpolicyassociation_types.go +++ b/apis/compute/v1beta1/zz_networkfirewallpolicyassociation_types.go @@ -28,7 +28,8 @@ type NetworkFirewallPolicyAssociationInitParameters struct { // +kubebuilder:validation:Optional AttachmentTargetSelector *v1.Selector `json:"attachmentTargetSelector,omitempty" tf:"-"` - // The project for the resource + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` } @@ -37,13 +38,14 @@ type NetworkFirewallPolicyAssociationObservation struct { // The target that the firewall policy is attached to. AttachmentTarget *string `json:"attachmentTarget,omitempty" tf:"attachment_target,omitempty"` - // The firewall policy ID of the association. + // The firewall policy of the resource. FirewallPolicy *string `json:"firewallPolicy,omitempty" tf:"firewall_policy,omitempty"` // an identifier for the resource with format projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/associations/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` - // The project for the resource + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` // The short name of the firewall policy of the association. @@ -66,8 +68,9 @@ type NetworkFirewallPolicyAssociationParameters struct { // +kubebuilder:validation:Optional AttachmentTargetSelector *v1.Selector `json:"attachmentTargetSelector,omitempty" tf:"-"` - // The firewall policy ID of the association. + // The firewall policy of the resource. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.NetworkFirewallPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional FirewallPolicy *string `json:"firewallPolicy,omitempty" tf:"firewall_policy,omitempty"` @@ -79,7 +82,8 @@ type NetworkFirewallPolicyAssociationParameters struct { // +kubebuilder:validation:Optional FirewallPolicySelector *v1.Selector `json:"firewallPolicySelector,omitempty" tf:"-"` - // The project for the resource + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` } diff --git a/apis/compute/v1beta1/zz_networkfirewallpolicyrule_types.go b/apis/compute/v1beta1/zz_networkfirewallpolicyrule_types.go index 2d03fa296..5c24a8ae6 100755 --- a/apis/compute/v1beta1/zz_networkfirewallpolicyrule_types.go +++ b/apis/compute/v1beta1/zz_networkfirewallpolicyrule_types.go @@ -15,29 +15,35 @@ import ( type MatchLayer4ConfigsInitParameters struct { - // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. + // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + // This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. IPProtocol *string `json:"ipProtocol,omitempty" tf:"ip_protocol,omitempty"` - // An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: “. + // An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. + // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. Ports []*string `json:"ports,omitempty" tf:"ports,omitempty"` } type MatchLayer4ConfigsObservation struct { - // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. + // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + // This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. IPProtocol *string `json:"ipProtocol,omitempty" tf:"ip_protocol,omitempty"` - // An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: “. + // An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. + // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. Ports []*string `json:"ports,omitempty" tf:"ports,omitempty"` } type MatchLayer4ConfigsParameters struct { - // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. + // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + // This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. // +kubebuilder:validation:Optional IPProtocol *string `json:"ipProtocol" tf:"ip_protocol,omitempty"` - // An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: “. + // An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. + // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. // +kubebuilder:validation:Optional Ports []*string `json:"ports,omitempty" tf:"ports,omitempty"` } @@ -50,31 +56,45 @@ type NetworkFirewallPolicyRuleInitParameters struct { // An optional description for this resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` - // The direction in which this rule applies. Possible values: INGRESS, EGRESS + // The direction in which this rule applies. + // Possible values are: INGRESS, EGRESS. Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` - // Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. + // Denotes whether the firewall policy rule is disabled. + // When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + // If this is unspecified, the firewall policy rule will be enabled. Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - // Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. + // Denotes whether to enable logging for a particular rule. + // If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + // Logs may be exported to BigQuery or Pub/Sub. + // Note: you cannot enable logging on "goto_next" rules. EnableLogging *bool `json:"enableLogging,omitempty" tf:"enable_logging,omitempty"` // A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + // Structure is documented below. Match *NetworkFirewallPolicyRuleMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` - // The project for the resource + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` // An optional name for the rule. This field is not a unique identifier and can be updated. RuleName *string `json:"ruleName,omitempty" tf:"rule_name,omitempty"` - // A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. + // A fully-qualified URL of a SecurityProfile resource instance. + // Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + // Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. SecurityProfileGroup *string `json:"securityProfileGroup,omitempty" tf:"security_profile_group,omitempty"` - // Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + // Boolean flag indicating if the traffic should be TLS decrypted. + // Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. TLSInspect *bool `json:"tlsInspect,omitempty" tf:"tls_inspect,omitempty"` - // A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. + // A list of secure tags that controls which instances the firewall rule applies to. + // If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored. + // targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. + // Structure is documented below. TargetSecureTags []TargetSecureTagsInitParameters `json:"targetSecureTags,omitempty" tf:"target_secure_tags,omitempty"` // A list of service accounts indicating the sets of instances that are applied with this rule. @@ -83,25 +103,26 @@ type NetworkFirewallPolicyRuleInitParameters struct { type NetworkFirewallPolicyRuleMatchInitParameters struct { - // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules. + // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. DestAddressGroups []*string `json:"destAddressGroups,omitempty" tf:"dest_address_groups,omitempty"` - // Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. DestFqdns []*string `json:"destFqdns,omitempty" tf:"dest_fqdns,omitempty"` // CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. DestIPRanges []*string `json:"destIpRanges,omitempty" tf:"dest_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress. + // Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. DestRegionCodes []*string `json:"destRegionCodes,omitempty" tf:"dest_region_codes,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. DestThreatIntelligences []*string `json:"destThreatIntelligences,omitempty" tf:"dest_threat_intelligences,omitempty"` // Pairs of IP protocols and ports that the rule should match. + // Structure is documented below. Layer4Configs []MatchLayer4ConfigsInitParameters `json:"layer4Configs,omitempty" tf:"layer4_configs,omitempty"` - // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules. + // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/networksecurity/v1beta1.AddressGroup // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SrcAddressGroups []*string `json:"srcAddressGroups,omitempty" tf:"src_address_groups,omitempty"` @@ -114,68 +135,71 @@ type NetworkFirewallPolicyRuleMatchInitParameters struct { // +kubebuilder:validation:Optional SrcAddressGroupsSelector *v1.Selector `json:"srcAddressGroupsSelector,omitempty" tf:"-"` - // Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. SrcFqdns []*string `json:"srcFqdns,omitempty" tf:"src_fqdns,omitempty"` // CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. SrcIPRanges []*string `json:"srcIpRanges,omitempty" tf:"src_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress. + // Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. SrcRegionCodes []*string `json:"srcRegionCodes,omitempty" tf:"src_region_codes,omitempty"` // List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. + // Structure is documented below. SrcSecureTags []SrcSecureTagsInitParameters `json:"srcSecureTags,omitempty" tf:"src_secure_tags,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. SrcThreatIntelligences []*string `json:"srcThreatIntelligences,omitempty" tf:"src_threat_intelligences,omitempty"` } type NetworkFirewallPolicyRuleMatchObservation struct { - // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules. + // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. DestAddressGroups []*string `json:"destAddressGroups,omitempty" tf:"dest_address_groups,omitempty"` - // Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. DestFqdns []*string `json:"destFqdns,omitempty" tf:"dest_fqdns,omitempty"` // CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. DestIPRanges []*string `json:"destIpRanges,omitempty" tf:"dest_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress. + // Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. DestRegionCodes []*string `json:"destRegionCodes,omitempty" tf:"dest_region_codes,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. DestThreatIntelligences []*string `json:"destThreatIntelligences,omitempty" tf:"dest_threat_intelligences,omitempty"` // Pairs of IP protocols and ports that the rule should match. + // Structure is documented below. Layer4Configs []MatchLayer4ConfigsObservation `json:"layer4Configs,omitempty" tf:"layer4_configs,omitempty"` - // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules. + // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. SrcAddressGroups []*string `json:"srcAddressGroups,omitempty" tf:"src_address_groups,omitempty"` - // Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. SrcFqdns []*string `json:"srcFqdns,omitempty" tf:"src_fqdns,omitempty"` // CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. SrcIPRanges []*string `json:"srcIpRanges,omitempty" tf:"src_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress. + // Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. SrcRegionCodes []*string `json:"srcRegionCodes,omitempty" tf:"src_region_codes,omitempty"` // List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. + // Structure is documented below. SrcSecureTags []SrcSecureTagsObservation `json:"srcSecureTags,omitempty" tf:"src_secure_tags,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. SrcThreatIntelligences []*string `json:"srcThreatIntelligences,omitempty" tf:"src_threat_intelligences,omitempty"` } type NetworkFirewallPolicyRuleMatchParameters struct { - // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules. + // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. // +kubebuilder:validation:Optional DestAddressGroups []*string `json:"destAddressGroups,omitempty" tf:"dest_address_groups,omitempty"` - // Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. // +kubebuilder:validation:Optional DestFqdns []*string `json:"destFqdns,omitempty" tf:"dest_fqdns,omitempty"` @@ -183,19 +207,20 @@ type NetworkFirewallPolicyRuleMatchParameters struct { // +kubebuilder:validation:Optional DestIPRanges []*string `json:"destIpRanges,omitempty" tf:"dest_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress. + // Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. // +kubebuilder:validation:Optional DestRegionCodes []*string `json:"destRegionCodes,omitempty" tf:"dest_region_codes,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. // +kubebuilder:validation:Optional DestThreatIntelligences []*string `json:"destThreatIntelligences,omitempty" tf:"dest_threat_intelligences,omitempty"` // Pairs of IP protocols and ports that the rule should match. + // Structure is documented below. // +kubebuilder:validation:Optional Layer4Configs []MatchLayer4ConfigsParameters `json:"layer4Configs" tf:"layer4_configs,omitempty"` - // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules. + // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/networksecurity/v1beta1.AddressGroup // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional @@ -209,7 +234,7 @@ type NetworkFirewallPolicyRuleMatchParameters struct { // +kubebuilder:validation:Optional SrcAddressGroupsSelector *v1.Selector `json:"srcAddressGroupsSelector,omitempty" tf:"-"` - // Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. // +kubebuilder:validation:Optional SrcFqdns []*string `json:"srcFqdns,omitempty" tf:"src_fqdns,omitempty"` @@ -217,15 +242,16 @@ type NetworkFirewallPolicyRuleMatchParameters struct { // +kubebuilder:validation:Optional SrcIPRanges []*string `json:"srcIpRanges,omitempty" tf:"src_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress. + // Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. // +kubebuilder:validation:Optional SrcRegionCodes []*string `json:"srcRegionCodes,omitempty" tf:"src_region_codes,omitempty"` // List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. + // Structure is documented below. // +kubebuilder:validation:Optional SrcSecureTags []SrcSecureTagsParameters `json:"srcSecureTags,omitempty" tf:"src_secure_tags,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. // +kubebuilder:validation:Optional SrcThreatIntelligences []*string `json:"srcThreatIntelligences,omitempty" tf:"src_threat_intelligences,omitempty"` } @@ -235,16 +261,25 @@ type NetworkFirewallPolicyRuleObservation struct { // The Action to perform when the client connection triggers the rule. Valid actions are "allow", "deny", "goto_next" and "apply_security_profile_group". Action *string `json:"action,omitempty" tf:"action,omitempty"` + // Creation timestamp in RFC3339 text format. + CreationTimestamp *string `json:"creationTimestamp,omitempty" tf:"creation_timestamp,omitempty"` + // An optional description for this resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` - // The direction in which this rule applies. Possible values: INGRESS, EGRESS + // The direction in which this rule applies. + // Possible values are: INGRESS, EGRESS. Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` - // Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. + // Denotes whether the firewall policy rule is disabled. + // When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + // If this is unspecified, the firewall policy rule will be enabled. Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - // Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. + // Denotes whether to enable logging for a particular rule. + // If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + // Logs may be exported to BigQuery or Pub/Sub. + // Note: you cannot enable logging on "goto_next" rules. EnableLogging *bool `json:"enableLogging,omitempty" tf:"enable_logging,omitempty"` // The firewall policy of the resource. @@ -257,12 +292,16 @@ type NetworkFirewallPolicyRuleObservation struct { Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` // A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + // Structure is documented below. Match *NetworkFirewallPolicyRuleMatchObservation `json:"match,omitempty" tf:"match,omitempty"` - // An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. + // An integer indicating the priority of a rule in the list. + // The priority must be a positive value between 0 and 2147483647. + // Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` - // The project for the resource + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` // An optional name for the rule. This field is not a unique identifier and can be updated. @@ -271,13 +310,19 @@ type NetworkFirewallPolicyRuleObservation struct { // Calculation of the complexity of a single firewall policy rule. RuleTupleCount *float64 `json:"ruleTupleCount,omitempty" tf:"rule_tuple_count,omitempty"` - // A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. + // A fully-qualified URL of a SecurityProfile resource instance. + // Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + // Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. SecurityProfileGroup *string `json:"securityProfileGroup,omitempty" tf:"security_profile_group,omitempty"` - // Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + // Boolean flag indicating if the traffic should be TLS decrypted. + // Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. TLSInspect *bool `json:"tlsInspect,omitempty" tf:"tls_inspect,omitempty"` - // A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. + // A list of secure tags that controls which instances the firewall rule applies to. + // If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored. + // targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. + // Structure is documented below. TargetSecureTags []TargetSecureTagsObservation `json:"targetSecureTags,omitempty" tf:"target_secure_tags,omitempty"` // A list of service accounts indicating the sets of instances that are applied with this rule. @@ -294,15 +339,21 @@ type NetworkFirewallPolicyRuleParameters struct { // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` - // The direction in which this rule applies. Possible values: INGRESS, EGRESS + // The direction in which this rule applies. + // Possible values are: INGRESS, EGRESS. // +kubebuilder:validation:Optional Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` - // Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. + // Denotes whether the firewall policy rule is disabled. + // When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + // If this is unspecified, the firewall policy rule will be enabled. // +kubebuilder:validation:Optional Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - // Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. + // Denotes whether to enable logging for a particular rule. + // If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + // Logs may be exported to BigQuery or Pub/Sub. + // Note: you cannot enable logging on "goto_next" rules. // +kubebuilder:validation:Optional EnableLogging *bool `json:"enableLogging,omitempty" tf:"enable_logging,omitempty"` @@ -320,14 +371,18 @@ type NetworkFirewallPolicyRuleParameters struct { FirewallPolicySelector *v1.Selector `json:"firewallPolicySelector,omitempty" tf:"-"` // A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + // Structure is documented below. // +kubebuilder:validation:Optional Match *NetworkFirewallPolicyRuleMatchParameters `json:"match,omitempty" tf:"match,omitempty"` - // An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. + // An integer indicating the priority of a rule in the list. + // The priority must be a positive value between 0 and 2147483647. + // Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. // +kubebuilder:validation:Required Priority *float64 `json:"priority" tf:"priority,omitempty"` - // The project for the resource + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -335,15 +390,21 @@ type NetworkFirewallPolicyRuleParameters struct { // +kubebuilder:validation:Optional RuleName *string `json:"ruleName,omitempty" tf:"rule_name,omitempty"` - // A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. + // A fully-qualified URL of a SecurityProfile resource instance. + // Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + // Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. // +kubebuilder:validation:Optional SecurityProfileGroup *string `json:"securityProfileGroup,omitempty" tf:"security_profile_group,omitempty"` - // Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + // Boolean flag indicating if the traffic should be TLS decrypted. + // Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. // +kubebuilder:validation:Optional TLSInspect *bool `json:"tlsInspect,omitempty" tf:"tls_inspect,omitempty"` - // A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. + // A list of secure tags that controls which instances the firewall rule applies to. + // If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored. + // targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. + // Structure is documented below. // +kubebuilder:validation:Optional TargetSecureTags []TargetSecureTagsParameters `json:"targetSecureTags,omitempty" tf:"target_secure_tags,omitempty"` @@ -354,7 +415,7 @@ type NetworkFirewallPolicyRuleParameters struct { type SrcSecureTagsInitParameters struct { - // Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+ + // Name of the secure tag, created with TagManager's TagValue API. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/tags/v1beta1.TagValue Name *string `json:"name,omitempty" tf:"name,omitempty"` @@ -369,16 +430,17 @@ type SrcSecureTagsInitParameters struct { type SrcSecureTagsObservation struct { - // Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+ + // Name of the secure tag, created with TagManager's TagValue API. Name *string `json:"name,omitempty" tf:"name,omitempty"` - // [Output Only] State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. + // (Output) + // State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. State *string `json:"state,omitempty" tf:"state,omitempty"` } type SrcSecureTagsParameters struct { - // Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+ + // Name of the secure tag, created with TagManager's TagValue API. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/tags/v1beta1.TagValue // +kubebuilder:validation:Optional Name *string `json:"name,omitempty" tf:"name,omitempty"` @@ -394,24 +456,25 @@ type SrcSecureTagsParameters struct { type TargetSecureTagsInitParameters struct { - // Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+ + // Name of the secure tag, created with TagManager's TagValue API. Name *string `json:"name,omitempty" tf:"name,omitempty"` } type TargetSecureTagsObservation struct { - // Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+ + // Name of the secure tag, created with TagManager's TagValue API. Name *string `json:"name,omitempty" tf:"name,omitempty"` - // [Output Only] State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. + // (Output) + // State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. State *string `json:"state,omitempty" tf:"state,omitempty"` } type TargetSecureTagsParameters struct { - // Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+ + // Name of the secure tag, created with TagManager's TagValue API. // +kubebuilder:validation:Optional - Name *string `json:"name" tf:"name,omitempty"` + Name *string `json:"name,omitempty" tf:"name,omitempty"` } // NetworkFirewallPolicyRuleSpec defines the desired state of NetworkFirewallPolicyRule @@ -441,7 +504,7 @@ type NetworkFirewallPolicyRuleStatus struct { // +kubebuilder:subresource:status // +kubebuilder:storageversion -// NetworkFirewallPolicyRule is the Schema for the NetworkFirewallPolicyRules API. The Compute NetworkFirewallPolicyRule resource +// NetworkFirewallPolicyRule is the Schema for the NetworkFirewallPolicyRules API. Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" // +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" diff --git a/apis/compute/v1beta1/zz_regionnetworkendpoint_types.go b/apis/compute/v1beta1/zz_regionnetworkendpoint_types.go index 540622b7b..1858b75f1 100755 --- a/apis/compute/v1beta1/zz_regionnetworkendpoint_types.go +++ b/apis/compute/v1beta1/zz_regionnetworkendpoint_types.go @@ -59,6 +59,9 @@ type RegionNetworkEndpointObservation struct { // This can only be specified when network_endpoint_type of the NEG is INTERNET_IP_PORT. IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + // The unique identifier number for the resource. This identifier is defined by the server. + NetworkEndpointID *float64 `json:"networkEndpointId,omitempty" tf:"network_endpoint_id,omitempty"` + // Port number of network endpoint. Port *float64 `json:"port,omitempty" tf:"port,omitempty"` diff --git a/apis/compute/v1beta1/zz_regionnetworkfirewallpolicyassociation_types.go b/apis/compute/v1beta1/zz_regionnetworkfirewallpolicyassociation_types.go index afe1bcf58..ba2e79f40 100755 --- a/apis/compute/v1beta1/zz_regionnetworkfirewallpolicyassociation_types.go +++ b/apis/compute/v1beta1/zz_regionnetworkfirewallpolicyassociation_types.go @@ -28,7 +28,8 @@ type RegionNetworkFirewallPolicyAssociationInitParameters struct { // +kubebuilder:validation:Optional AttachmentTargetSelector *v1.Selector `json:"attachmentTargetSelector,omitempty" tf:"-"` - // The project for the resource + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` } @@ -37,13 +38,14 @@ type RegionNetworkFirewallPolicyAssociationObservation struct { // The target that the firewall policy is attached to. AttachmentTarget *string `json:"attachmentTarget,omitempty" tf:"attachment_target,omitempty"` - // The firewall policy ID of the association. + // The firewall policy of the resource. FirewallPolicy *string `json:"firewallPolicy,omitempty" tf:"firewall_policy,omitempty"` // an identifier for the resource with format projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/associations/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` - // The project for the resource + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` // The location of this resource. @@ -69,8 +71,9 @@ type RegionNetworkFirewallPolicyAssociationParameters struct { // +kubebuilder:validation:Optional AttachmentTargetSelector *v1.Selector `json:"attachmentTargetSelector,omitempty" tf:"-"` - // The firewall policy ID of the association. + // The firewall policy of the resource. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.RegionNetworkFirewallPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional FirewallPolicy *string `json:"firewallPolicy,omitempty" tf:"firewall_policy,omitempty"` @@ -82,7 +85,8 @@ type RegionNetworkFirewallPolicyAssociationParameters struct { // +kubebuilder:validation:Optional FirewallPolicySelector *v1.Selector `json:"firewallPolicySelector,omitempty" tf:"-"` - // The project for the resource + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` diff --git a/apis/compute/v1beta1/zz_regiontargethttpproxy_types.go b/apis/compute/v1beta1/zz_regiontargethttpproxy_types.go index b740b7dd1..ce72c86dd 100755 --- a/apis/compute/v1beta1/zz_regiontargethttpproxy_types.go +++ b/apis/compute/v1beta1/zz_regiontargethttpproxy_types.go @@ -18,6 +18,13 @@ type RegionTargetHTTPProxyInitParameters struct { // An optional description of this resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Specifies how long to keep a connection open, after completing a response, + // while there is no matching traffic (in seconds). If an HTTP keepalive is + // not specified, a default value (600 seconds) will be used. For Regional + // HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + // maximum allowed value is 600 seconds. + HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -45,6 +52,13 @@ type RegionTargetHTTPProxyObservation struct { // An optional description of this resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Specifies how long to keep a connection open, after completing a response, + // while there is no matching traffic (in seconds). If an HTTP keepalive is + // not specified, a default value (600 seconds) will be used. For Regional + // HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + // maximum allowed value is 600 seconds. + HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` + // an identifier for the resource with format projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` @@ -73,6 +87,14 @@ type RegionTargetHTTPProxyParameters struct { // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Specifies how long to keep a connection open, after completing a response, + // while there is no matching traffic (in seconds). If an HTTP keepalive is + // not specified, a default value (600 seconds) will be used. For Regional + // HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + // maximum allowed value is 600 seconds. + // +kubebuilder:validation:Optional + HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional diff --git a/apis/compute/v1beta1/zz_regiontargethttpsproxy_types.go b/apis/compute/v1beta1/zz_regiontargethttpsproxy_types.go index 952d34791..4e6a52dc4 100755 --- a/apis/compute/v1beta1/zz_regiontargethttpsproxy_types.go +++ b/apis/compute/v1beta1/zz_regiontargethttpsproxy_types.go @@ -23,6 +23,13 @@ type RegionTargetHTTPSProxyInitParameters struct { // An optional description of this resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Specifies how long to keep a connection open, after completing a response, + // while there is no matching traffic (in seconds). If an HTTP keepalive is + // not specified, a default value (600 seconds) will be used. For Regioanl + // HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + // maximum allowed value is 600 seconds. + HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -89,6 +96,13 @@ type RegionTargetHTTPSProxyObservation struct { // An optional description of this resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Specifies how long to keep a connection open, after completing a response, + // while there is no matching traffic (in seconds). If an HTTP keepalive is + // not specified, a default value (600 seconds) will be used. For Regioanl + // HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + // maximum allowed value is 600 seconds. + HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` + // an identifier for the resource with format projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` @@ -148,6 +162,14 @@ type RegionTargetHTTPSProxyParameters struct { // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Specifies how long to keep a connection open, after completing a response, + // while there is no matching traffic (in seconds). If an HTTP keepalive is + // not specified, a default value (600 seconds) will be used. For Regioanl + // HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + // maximum allowed value is 600 seconds. + // +kubebuilder:validation:Optional + HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional diff --git a/apis/compute/v1beta1/zz_serviceattachment_types.go b/apis/compute/v1beta1/zz_serviceattachment_types.go index f6d8207b8..136b199c9 100755 --- a/apis/compute/v1beta1/zz_serviceattachment_types.go +++ b/apis/compute/v1beta1/zz_serviceattachment_types.go @@ -18,10 +18,22 @@ type ConnectedEndpointsInitParameters struct { type ConnectedEndpointsObservation struct { + // (Output) + // The url of the consumer network. + ConsumerNetwork *string `json:"consumerNetwork,omitempty" tf:"consumer_network,omitempty"` + // (Output) // The URL of the consumer forwarding rule. Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + // (Output) + // The number of consumer Network Connectivity Center spokes that the connected Private Service Connect endpoint has propagated to. + PropagatedConnectionCount *float64 `json:"propagatedConnectionCount,omitempty" tf:"propagated_connection_count,omitempty"` + + // (Output) + // The PSC connection id of the connected endpoint. + PscConnectionID *string `json:"pscConnectionId,omitempty" tf:"psc_connection_id,omitempty"` + // (Output) // The status of the connection from the consumer forwarding rule to // this service attachment. @@ -144,6 +156,13 @@ type ServiceAttachmentInitParameters struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + // The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center. + // This limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer. + // If the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list. + // If the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint. + // If unspecified, the default propagated connection limit is 250. + PropagatedConnectionLimit *float64 `json:"propagatedConnectionLimit,omitempty" tf:"propagated_connection_limit,omitempty"` + // This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. // If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . // If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. @@ -211,6 +230,13 @@ type ServiceAttachmentObservation struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + // The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center. + // This limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer. + // If the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list. + // If the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint. + // If unspecified, the default propagated connection limit is 250. + PropagatedConnectionLimit *float64 `json:"propagatedConnectionLimit,omitempty" tf:"propagated_connection_limit,omitempty"` + // This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. // If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . // If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. @@ -279,6 +305,14 @@ type ServiceAttachmentParameters struct { // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` + // The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center. + // This limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer. + // If the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list. + // If the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint. + // If unspecified, the default propagated connection limit is 250. + // +kubebuilder:validation:Optional + PropagatedConnectionLimit *float64 `json:"propagatedConnectionLimit,omitempty" tf:"propagated_connection_limit,omitempty"` + // This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. // If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . // If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. diff --git a/apis/compute/v1beta1/zz_targethttpproxy_types.go b/apis/compute/v1beta1/zz_targethttpproxy_types.go index 39dfe9689..fe8f3905f 100755 --- a/apis/compute/v1beta1/zz_targethttpproxy_types.go +++ b/apis/compute/v1beta1/zz_targethttpproxy_types.go @@ -20,10 +20,13 @@ type TargetHTTPProxyInitParameters struct { // Specifies how long to keep a connection open, after completing a response, // while there is no matching traffic (in seconds). If an HTTP keepalive is - // not specified, a default value (610 seconds) will be used. For Global - // external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - // the maximum allowed value is 1200 seconds. For Global external HTTP(S) - // load balancer (classic), this option is not available publicly. + // not specified, a default value will be used. For Global + // external HTTP(S) load balancer, the default value is 610 seconds, the + // minimum allowed value is 5 seconds and the maximum allowed value is 1200 + // seconds. For cross-region internal HTTP(S) load balancer, the default + // value is 600 seconds, the minimum allowed value is 5 seconds, and the + // maximum allowed value is 600 seconds. For Global external HTTP(S) load + // balancer (classic), this option is not available publicly. HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` // The ID of the project in which the resource belongs. @@ -59,10 +62,13 @@ type TargetHTTPProxyObservation struct { // Specifies how long to keep a connection open, after completing a response, // while there is no matching traffic (in seconds). If an HTTP keepalive is - // not specified, a default value (610 seconds) will be used. For Global - // external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - // the maximum allowed value is 1200 seconds. For Global external HTTP(S) - // load balancer (classic), this option is not available publicly. + // not specified, a default value will be used. For Global + // external HTTP(S) load balancer, the default value is 610 seconds, the + // minimum allowed value is 5 seconds and the maximum allowed value is 1200 + // seconds. For cross-region internal HTTP(S) load balancer, the default + // value is 600 seconds, the minimum allowed value is 5 seconds, and the + // maximum allowed value is 600 seconds. For Global external HTTP(S) load + // balancer (classic), this option is not available publicly. HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` // an identifier for the resource with format projects/{{project}}/global/targetHttpProxies/{{name}} @@ -95,10 +101,13 @@ type TargetHTTPProxyParameters struct { // Specifies how long to keep a connection open, after completing a response, // while there is no matching traffic (in seconds). If an HTTP keepalive is - // not specified, a default value (610 seconds) will be used. For Global - // external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - // the maximum allowed value is 1200 seconds. For Global external HTTP(S) - // load balancer (classic), this option is not available publicly. + // not specified, a default value will be used. For Global + // external HTTP(S) load balancer, the default value is 610 seconds, the + // minimum allowed value is 5 seconds and the maximum allowed value is 1200 + // seconds. For cross-region internal HTTP(S) load balancer, the default + // value is 600 seconds, the minimum allowed value is 5 seconds, and the + // maximum allowed value is 600 seconds. For Global external HTTP(S) load + // balancer (classic), this option is not available publicly. // +kubebuilder:validation:Optional HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` diff --git a/apis/compute/v1beta1/zz_targethttpsproxy_types.go b/apis/compute/v1beta1/zz_targethttpsproxy_types.go index 713d3ad9f..7097d3e28 100755 --- a/apis/compute/v1beta1/zz_targethttpsproxy_types.go +++ b/apis/compute/v1beta1/zz_targethttpsproxy_types.go @@ -33,10 +33,13 @@ type TargetHTTPSProxyInitParameters struct { // Specifies how long to keep a connection open, after completing a response, // while there is no matching traffic (in seconds). If an HTTP keepalive is - // not specified, a default value (610 seconds) will be used. For Global - // external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - // the maximum allowed value is 1200 seconds. For Global external HTTP(S) - // load balancer (classic), this option is not available publicly. + // not specified, a default value will be used. For Global + // external HTTP(S) load balancer, the default value is 610 seconds, the + // minimum allowed value is 5 seconds and the maximum allowed value is 1200 + // seconds. For cross-region internal HTTP(S) load balancer, the default + // value is 600 seconds, the minimum allowed value is 5 seconds, and the + // maximum allowed value is 600 seconds. For Global external HTTP(S) load + // balancer (classic), this option is not available publicly. HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` // The ID of the project in which the resource belongs. @@ -83,6 +86,10 @@ type TargetHTTPSProxyInitParameters struct { // INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED // loadBalancingScheme consult ServerTlsPolicy documentation. // If left blank, communications are not encrypted. + // If you remove this field from your configuration at the same time as + // deleting or recreating a referenced ServerTlsPolicy resource, you will + // receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + // within the ServerTlsPolicy resource to avoid this. ServerTLSPolicy *string `json:"serverTlsPolicy,omitempty" tf:"server_tls_policy,omitempty"` // Specifies whether TLS 1.3 0-RTT Data (“Early Data”) should be accepted for this service. @@ -130,10 +137,13 @@ type TargetHTTPSProxyObservation struct { // Specifies how long to keep a connection open, after completing a response, // while there is no matching traffic (in seconds). If an HTTP keepalive is - // not specified, a default value (610 seconds) will be used. For Global - // external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - // the maximum allowed value is 1200 seconds. For Global external HTTP(S) - // load balancer (classic), this option is not available publicly. + // not specified, a default value will be used. For Global + // external HTTP(S) load balancer, the default value is 610 seconds, the + // minimum allowed value is 5 seconds and the maximum allowed value is 1200 + // seconds. For cross-region internal HTTP(S) load balancer, the default + // value is 600 seconds, the minimum allowed value is 5 seconds, and the + // maximum allowed value is 600 seconds. For Global external HTTP(S) load + // balancer (classic), this option is not available publicly. HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` // an identifier for the resource with format projects/{{project}}/global/targetHttpsProxies/{{name}} @@ -180,6 +190,10 @@ type TargetHTTPSProxyObservation struct { // INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED // loadBalancingScheme consult ServerTlsPolicy documentation. // If left blank, communications are not encrypted. + // If you remove this field from your configuration at the same time as + // deleting or recreating a referenced ServerTlsPolicy resource, you will + // receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + // within the ServerTlsPolicy resource to avoid this. ServerTLSPolicy *string `json:"serverTlsPolicy,omitempty" tf:"server_tls_policy,omitempty"` // Specifies whether TLS 1.3 0-RTT Data (“Early Data”) should be accepted for this service. @@ -217,10 +231,13 @@ type TargetHTTPSProxyParameters struct { // Specifies how long to keep a connection open, after completing a response, // while there is no matching traffic (in seconds). If an HTTP keepalive is - // not specified, a default value (610 seconds) will be used. For Global - // external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - // the maximum allowed value is 1200 seconds. For Global external HTTP(S) - // load balancer (classic), this option is not available publicly. + // not specified, a default value will be used. For Global + // external HTTP(S) load balancer, the default value is 610 seconds, the + // minimum allowed value is 5 seconds and the maximum allowed value is 1200 + // seconds. For cross-region internal HTTP(S) load balancer, the default + // value is 600 seconds, the minimum allowed value is 5 seconds, and the + // maximum allowed value is 600 seconds. For Global external HTTP(S) load + // balancer (classic), this option is not available publicly. // +kubebuilder:validation:Optional HTTPKeepAliveTimeoutSec *float64 `json:"httpKeepAliveTimeoutSec,omitempty" tf:"http_keep_alive_timeout_sec,omitempty"` @@ -273,6 +290,10 @@ type TargetHTTPSProxyParameters struct { // INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED // loadBalancingScheme consult ServerTlsPolicy documentation. // If left blank, communications are not encrypted. + // If you remove this field from your configuration at the same time as + // deleting or recreating a referenced ServerTlsPolicy resource, you will + // receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + // within the ServerTlsPolicy resource to avoid this. // +kubebuilder:validation:Optional ServerTLSPolicy *string `json:"serverTlsPolicy,omitempty" tf:"server_tls_policy,omitempty"` diff --git a/apis/compute/v1beta2/zz_autoscaler_types.go b/apis/compute/v1beta2/zz_autoscaler_types.go index 1cecff90a..1c6c58fc4 100755 --- a/apis/compute/v1beta2/zz_autoscaler_types.go +++ b/apis/compute/v1beta2/zz_autoscaler_types.go @@ -411,7 +411,7 @@ type MetricParameters struct { type ScaleInControlInitParameters struct { - // A nested object resource + // A nested object resource. // Structure is documented below. MaxScaledInReplicas *MaxScaledInReplicasInitParameters `json:"maxScaledInReplicas,omitempty" tf:"max_scaled_in_replicas,omitempty"` @@ -422,7 +422,7 @@ type ScaleInControlInitParameters struct { type ScaleInControlObservation struct { - // A nested object resource + // A nested object resource. // Structure is documented below. MaxScaledInReplicas *MaxScaledInReplicasObservation `json:"maxScaledInReplicas,omitempty" tf:"max_scaled_in_replicas,omitempty"` @@ -433,7 +433,7 @@ type ScaleInControlObservation struct { type ScaleInControlParameters struct { - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional MaxScaledInReplicas *MaxScaledInReplicasParameters `json:"maxScaledInReplicas,omitempty" tf:"max_scaled_in_replicas,omitempty"` diff --git a/apis/compute/v1beta2/zz_backendservice_types.go b/apis/compute/v1beta2/zz_backendservice_types.go index a6381443c..46fc67545 100755 --- a/apis/compute/v1beta2/zz_backendservice_types.go +++ b/apis/compute/v1beta2/zz_backendservice_types.go @@ -74,7 +74,6 @@ type BackendInitParameters struct { // and CONNECTION (for TCP/SSL). // See the Backend Services Overview // for an explanation of load balancing modes. - // From version 6.0.0 default value will be UTILIZATION to match default GCP value. // Default value is UTILIZATION. // Possible values are: UTILIZATION, RATE, CONNECTION. BalancingMode *string `json:"balancingMode,omitempty" tf:"balancing_mode,omitempty"` @@ -172,7 +171,6 @@ type BackendObservation struct { // and CONNECTION (for TCP/SSL). // See the Backend Services Overview // for an explanation of load balancing modes. - // From version 6.0.0 default value will be UTILIZATION to match default GCP value. // Default value is UTILIZATION. // Possible values are: UTILIZATION, RATE, CONNECTION. BalancingMode *string `json:"balancingMode,omitempty" tf:"balancing_mode,omitempty"` @@ -260,7 +258,6 @@ type BackendParameters struct { // and CONNECTION (for TCP/SSL). // See the Backend Services Overview // for an explanation of load balancing modes. - // From version 6.0.0 default value will be UTILIZATION to match default GCP value. // Default value is UTILIZATION. // Possible values are: UTILIZATION, RATE, CONNECTION. // +kubebuilder:validation:Optional @@ -597,6 +594,10 @@ type BackendServiceInitParameters struct { // +kubebuilder:validation:Optional HealthChecksSelector *v1.Selector `json:"healthChecksSelector,omitempty" tf:"-"` + // Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + // Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + IPAddressSelectionPolicy *string `json:"ipAddressSelectionPolicy,omitempty" tf:"ip_address_selection_policy,omitempty"` + // Settings for enabling Cloud Identity Aware Proxy // Structure is documented below. Iap *IapInitParameters `json:"iap,omitempty" tf:"iap,omitempty"` @@ -630,8 +631,6 @@ type BackendServiceInitParameters struct { // Settings controlling eviction of unhealthy hosts from the load balancing pool. // Applicable backend service types can be a global backend service with the // loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - // From version 6.0. - // Default values are enforce by GCP without providing them. // Structure is documented below. OutlierDetection *OutlierDetectionInitParameters `json:"outlierDetection,omitempty" tf:"outlier_detection,omitempty"` @@ -668,9 +667,13 @@ type BackendServiceInitParameters struct { // Type of session affinity to use. The default is NONE. Session affinity is // not applicable if the protocol is UDP. - // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE. + // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, STRONG_COOKIE_AFFINITY. SessionAffinity *string `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` + // Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + // Structure is documented below. + StrongSessionAffinityCookie *StrongSessionAffinityCookieInitParameters `json:"strongSessionAffinityCookie,omitempty" tf:"strong_session_affinity_cookie,omitempty"` + // The backend service timeout has a different meaning depending on the type of load balancer. // For more information see, Backend service settings. // The default is 30 seconds. @@ -760,6 +763,10 @@ type BackendServiceObservation struct { // an identifier for the resource with format projects/{{project}}/global/backendServices/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + // Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + IPAddressSelectionPolicy *string `json:"ipAddressSelectionPolicy,omitempty" tf:"ip_address_selection_policy,omitempty"` + // Settings for enabling Cloud Identity Aware Proxy // Structure is documented below. Iap *IapObservation `json:"iap,omitempty" tf:"iap,omitempty"` @@ -793,8 +800,6 @@ type BackendServiceObservation struct { // Settings controlling eviction of unhealthy hosts from the load balancing pool. // Applicable backend service types can be a global backend service with the // loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - // From version 6.0. - // Default values are enforce by GCP without providing them. // Structure is documented below. OutlierDetection *OutlierDetectionObservation `json:"outlierDetection,omitempty" tf:"outlier_detection,omitempty"` @@ -834,9 +839,13 @@ type BackendServiceObservation struct { // Type of session affinity to use. The default is NONE. Session affinity is // not applicable if the protocol is UDP. - // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE. + // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, STRONG_COOKIE_AFFINITY. SessionAffinity *string `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` + // Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + // Structure is documented below. + StrongSessionAffinityCookie *StrongSessionAffinityCookieObservation `json:"strongSessionAffinityCookie,omitempty" tf:"strong_session_affinity_cookie,omitempty"` + // The backend service timeout has a different meaning depending on the type of load balancer. // For more information see, Backend service settings. // The default is 30 seconds. @@ -936,6 +945,11 @@ type BackendServiceParameters struct { // +kubebuilder:validation:Optional HealthChecksSelector *v1.Selector `json:"healthChecksSelector,omitempty" tf:"-"` + // Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + // Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + // +kubebuilder:validation:Optional + IPAddressSelectionPolicy *string `json:"ipAddressSelectionPolicy,omitempty" tf:"ip_address_selection_policy,omitempty"` + // Settings for enabling Cloud Identity Aware Proxy // Structure is documented below. // +kubebuilder:validation:Optional @@ -974,8 +988,6 @@ type BackendServiceParameters struct { // Settings controlling eviction of unhealthy hosts from the load balancing pool. // Applicable backend service types can be a global backend service with the // loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - // From version 6.0. - // Default values are enforce by GCP without providing them. // Structure is documented below. // +kubebuilder:validation:Optional OutlierDetection *OutlierDetectionParameters `json:"outlierDetection,omitempty" tf:"outlier_detection,omitempty"` @@ -1019,10 +1031,15 @@ type BackendServiceParameters struct { // Type of session affinity to use. The default is NONE. Session affinity is // not applicable if the protocol is UDP. - // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE. + // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, STRONG_COOKIE_AFFINITY. // +kubebuilder:validation:Optional SessionAffinity *string `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` + // Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + // Structure is documented below. + // +kubebuilder:validation:Optional + StrongSessionAffinityCookie *StrongSessionAffinityCookieParameters `json:"strongSessionAffinityCookie,omitempty" tf:"strong_session_affinity_cookie,omitempty"` + // The backend service timeout has a different meaning depending on the type of load balancer. // For more information see, Backend service settings. // The default is 30 seconds. @@ -1224,8 +1241,8 @@ type CdnPolicyNegativeCachingPolicyInitParameters struct { // can be specified as values, and you cannot specify a status code more than once. Code *float64 `json:"code,omitempty" tf:"code,omitempty"` - // The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - // (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + // Lifetime of the cookie. + // Structure is documented below. TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` } @@ -1235,8 +1252,8 @@ type CdnPolicyNegativeCachingPolicyObservation struct { // can be specified as values, and you cannot specify a status code more than once. Code *float64 `json:"code,omitempty" tf:"code,omitempty"` - // The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - // (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + // Lifetime of the cookie. + // Structure is documented below. TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` } @@ -1247,8 +1264,8 @@ type CdnPolicyNegativeCachingPolicyParameters struct { // +kubebuilder:validation:Optional Code *float64 `json:"code,omitempty" tf:"code,omitempty"` - // The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - // (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + // Lifetime of the cookie. + // Structure is documented below. // +kubebuilder:validation:Optional TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` } @@ -1408,14 +1425,7 @@ type CustomPolicyInitParameters struct { // by a locally installed custom policy implementation. Data *string `json:"data,omitempty" tf:"data,omitempty"` - // The name of a locality load balancer policy to be used. The value - // should be one of the predefined ones as supported by localityLbPolicy, - // although at the moment only ROUND_ROBIN is supported. - // This field should only be populated when the customPolicy field is not - // used. - // Note that specifying the same policy more than once for a backend is - // not a valid configuration and will be rejected. - // The possible values are: + // Name of the cookie. Name *string `json:"name,omitempty" tf:"name,omitempty"` } @@ -1425,14 +1435,7 @@ type CustomPolicyObservation struct { // by a locally installed custom policy implementation. Data *string `json:"data,omitempty" tf:"data,omitempty"` - // The name of a locality load balancer policy to be used. The value - // should be one of the predefined ones as supported by localityLbPolicy, - // although at the moment only ROUND_ROBIN is supported. - // This field should only be populated when the customPolicy field is not - // used. - // Note that specifying the same policy more than once for a backend is - // not a valid configuration and will be rejected. - // The possible values are: + // Name of the cookie. Name *string `json:"name,omitempty" tf:"name,omitempty"` } @@ -1443,68 +1446,40 @@ type CustomPolicyParameters struct { // +kubebuilder:validation:Optional Data *string `json:"data,omitempty" tf:"data,omitempty"` - // The name of a locality load balancer policy to be used. The value - // should be one of the predefined ones as supported by localityLbPolicy, - // although at the moment only ROUND_ROBIN is supported. - // This field should only be populated when the customPolicy field is not - // used. - // Note that specifying the same policy more than once for a backend is - // not a valid configuration and will be rejected. - // The possible values are: + // Name of the cookie. // +kubebuilder:validation:Optional Name *string `json:"name" tf:"name,omitempty"` } type HTTPCookieInitParameters struct { - // The name of a locality load balancer policy to be used. The value - // should be one of the predefined ones as supported by localityLbPolicy, - // although at the moment only ROUND_ROBIN is supported. - // This field should only be populated when the customPolicy field is not - // used. - // Note that specifying the same policy more than once for a backend is - // not a valid configuration and will be rejected. - // The possible values are: + // Name of the cookie. Name *string `json:"name,omitempty" tf:"name,omitempty"` // Path to set for the cookie. Path *string `json:"path,omitempty" tf:"path,omitempty"` - // The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - // (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + // Lifetime of the cookie. + // Structure is documented below. TTL *TTLInitParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` } type HTTPCookieObservation struct { - // The name of a locality load balancer policy to be used. The value - // should be one of the predefined ones as supported by localityLbPolicy, - // although at the moment only ROUND_ROBIN is supported. - // This field should only be populated when the customPolicy field is not - // used. - // Note that specifying the same policy more than once for a backend is - // not a valid configuration and will be rejected. - // The possible values are: + // Name of the cookie. Name *string `json:"name,omitempty" tf:"name,omitempty"` // Path to set for the cookie. Path *string `json:"path,omitempty" tf:"path,omitempty"` - // The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - // (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + // Lifetime of the cookie. + // Structure is documented below. TTL *TTLObservation `json:"ttl,omitempty" tf:"ttl,omitempty"` } type HTTPCookieParameters struct { - // The name of a locality load balancer policy to be used. The value - // should be one of the predefined ones as supported by localityLbPolicy, - // although at the moment only ROUND_ROBIN is supported. - // This field should only be populated when the customPolicy field is not - // used. - // Note that specifying the same policy more than once for a backend is - // not a valid configuration and will be rejected. - // The possible values are: + // Name of the cookie. // +kubebuilder:validation:Optional Name *string `json:"name,omitempty" tf:"name,omitempty"` @@ -1512,38 +1487,48 @@ type HTTPCookieParameters struct { // +kubebuilder:validation:Optional Path *string `json:"path,omitempty" tf:"path,omitempty"` - // The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - // (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + // Lifetime of the cookie. + // Structure is documented below. // +kubebuilder:validation:Optional TTL *TTLParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` } type IapInitParameters struct { + // Whether the serving infrastructure will authenticate and authorize all incoming requests. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // OAuth2 Client ID for IAP Oauth2ClientID *string `json:"oauth2ClientId,omitempty" tf:"oauth2_client_id,omitempty"` // OAuth2 Client Secret for IAP // Note: This property is sensitive and will not be displayed in the plan. - Oauth2ClientSecretSecretRef v1.SecretKeySelector `json:"oauth2ClientSecretSecretRef" tf:"-"` + Oauth2ClientSecretSecretRef *v1.SecretKeySelector `json:"oauth2ClientSecretSecretRef,omitempty" tf:"-"` } type IapObservation struct { + // Whether the serving infrastructure will authenticate and authorize all incoming requests. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // OAuth2 Client ID for IAP Oauth2ClientID *string `json:"oauth2ClientId,omitempty" tf:"oauth2_client_id,omitempty"` } type IapParameters struct { + // Whether the serving infrastructure will authenticate and authorize all incoming requests. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + // OAuth2 Client ID for IAP // +kubebuilder:validation:Optional - Oauth2ClientID *string `json:"oauth2ClientId" tf:"oauth2_client_id,omitempty"` + Oauth2ClientID *string `json:"oauth2ClientId,omitempty" tf:"oauth2_client_id,omitempty"` // OAuth2 Client Secret for IAP // Note: This property is sensitive and will not be displayed in the plan. // +kubebuilder:validation:Optional - Oauth2ClientSecretSecretRef v1.SecretKeySelector `json:"oauth2ClientSecretSecretRef" tf:"-"` + Oauth2ClientSecretSecretRef *v1.SecretKeySelector `json:"oauth2ClientSecretSecretRef,omitempty" tf:"-"` } type IntervalInitParameters struct { @@ -1868,40 +1853,19 @@ type OutlierDetectionParameters struct { type PolicyInitParameters struct { - // The name of a locality load balancer policy to be used. The value - // should be one of the predefined ones as supported by localityLbPolicy, - // although at the moment only ROUND_ROBIN is supported. - // This field should only be populated when the customPolicy field is not - // used. - // Note that specifying the same policy more than once for a backend is - // not a valid configuration and will be rejected. - // The possible values are: + // Name of the cookie. Name *string `json:"name,omitempty" tf:"name,omitempty"` } type PolicyObservation struct { - // The name of a locality load balancer policy to be used. The value - // should be one of the predefined ones as supported by localityLbPolicy, - // although at the moment only ROUND_ROBIN is supported. - // This field should only be populated when the customPolicy field is not - // used. - // Note that specifying the same policy more than once for a backend is - // not a valid configuration and will be rejected. - // The possible values are: + // Name of the cookie. Name *string `json:"name,omitempty" tf:"name,omitempty"` } type PolicyParameters struct { - // The name of a locality load balancer policy to be used. The value - // should be one of the predefined ones as supported by localityLbPolicy, - // although at the moment only ROUND_ROBIN is supported. - // This field should only be populated when the customPolicy field is not - // used. - // Note that specifying the same policy more than once for a backend is - // not a valid configuration and will be rejected. - // The possible values are: + // Name of the cookie. // +kubebuilder:validation:Optional Name *string `json:"name" tf:"name,omitempty"` } @@ -1963,6 +1927,89 @@ type SecuritySettingsParameters struct { SubjectAltNames []*string `json:"subjectAltNames,omitempty" tf:"subject_alt_names,omitempty"` } +type StrongSessionAffinityCookieInitParameters struct { + + // Name of the cookie. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Path to set for the cookie. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Lifetime of the cookie. + // Structure is documented below. + TTL *StrongSessionAffinityCookieTTLInitParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type StrongSessionAffinityCookieObservation struct { + + // Name of the cookie. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Path to set for the cookie. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Lifetime of the cookie. + // Structure is documented below. + TTL *StrongSessionAffinityCookieTTLObservation `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type StrongSessionAffinityCookieParameters struct { + + // Name of the cookie. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Path to set for the cookie. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Lifetime of the cookie. + // Structure is documented below. + // +kubebuilder:validation:Optional + TTL *StrongSessionAffinityCookieTTLParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type StrongSessionAffinityCookieTTLInitParameters struct { + + // Span of time that's a fraction of a second at nanosecond + // resolution. Durations less than one second are represented + // with a 0 seconds field and a positive nanos field. Must + // be from 0 to 999,999,999 inclusive. + Nanos *float64 `json:"nanos,omitempty" tf:"nanos,omitempty"` + + // Span of time at a resolution of a second. + // Must be from 0 to 315,576,000,000 inclusive. + Seconds *float64 `json:"seconds,omitempty" tf:"seconds,omitempty"` +} + +type StrongSessionAffinityCookieTTLObservation struct { + + // Span of time that's a fraction of a second at nanosecond + // resolution. Durations less than one second are represented + // with a 0 seconds field and a positive nanos field. Must + // be from 0 to 999,999,999 inclusive. + Nanos *float64 `json:"nanos,omitempty" tf:"nanos,omitempty"` + + // Span of time at a resolution of a second. + // Must be from 0 to 315,576,000,000 inclusive. + Seconds *float64 `json:"seconds,omitempty" tf:"seconds,omitempty"` +} + +type StrongSessionAffinityCookieTTLParameters struct { + + // Span of time that's a fraction of a second at nanosecond + // resolution. Durations less than one second are represented + // with a 0 seconds field and a positive nanos field. Must + // be from 0 to 999,999,999 inclusive. + // +kubebuilder:validation:Optional + Nanos *float64 `json:"nanos,omitempty" tf:"nanos,omitempty"` + + // Span of time at a resolution of a second. + // Must be from 0 to 315,576,000,000 inclusive. + // +kubebuilder:validation:Optional + Seconds *float64 `json:"seconds" tf:"seconds,omitempty"` +} + type TTLInitParameters struct { // Span of time that's a fraction of a second at nanosecond diff --git a/apis/compute/v1beta2/zz_disk_types.go b/apis/compute/v1beta2/zz_disk_types.go index 83ae9d482..17a70634e 100755 --- a/apis/compute/v1beta2/zz_disk_types.go +++ b/apis/compute/v1beta2/zz_disk_types.go @@ -131,7 +131,7 @@ type DiskInitParameters struct { // For example: AccessMode *string `json:"accessMode,omitempty" tf:"access_mode,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. AsyncPrimaryDisk *AsyncPrimaryDiskInitParameters `json:"asyncPrimaryDisk,omitempty" tf:"async_primary_disk,omitempty"` @@ -231,7 +231,7 @@ type DiskInitParameters struct { // Structure is documented below. SourceSnapshotEncryptionKey *SourceSnapshotEncryptionKeyInitParameters `json:"sourceSnapshotEncryptionKey,omitempty" tf:"source_snapshot_encryption_key,omitempty"` - // The URL of the storage pool in which the new disk is created. + // The URL or the name of the storage pool in which the new disk is created. // For example: StoragePool *string `json:"storagePool,omitempty" tf:"storage_pool,omitempty"` @@ -246,7 +246,7 @@ type DiskObservation struct { // For example: AccessMode *string `json:"accessMode,omitempty" tf:"access_mode,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. AsyncPrimaryDisk *AsyncPrimaryDiskObservation `json:"asyncPrimaryDisk,omitempty" tf:"async_primary_disk,omitempty"` @@ -392,7 +392,7 @@ type DiskObservation struct { // used. SourceSnapshotID *string `json:"sourceSnapshotId,omitempty" tf:"source_snapshot_id,omitempty"` - // The URL of the storage pool in which the new disk is created. + // The URL or the name of the storage pool in which the new disk is created. // For example: StoragePool *string `json:"storagePool,omitempty" tf:"storage_pool,omitempty"` @@ -420,7 +420,7 @@ type DiskParameters struct { // +kubebuilder:validation:Optional AccessMode *string `json:"accessMode,omitempty" tf:"access_mode,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional AsyncPrimaryDisk *AsyncPrimaryDiskParameters `json:"asyncPrimaryDisk,omitempty" tf:"async_primary_disk,omitempty"` @@ -537,7 +537,7 @@ type DiskParameters struct { // +kubebuilder:validation:Optional SourceSnapshotEncryptionKey *SourceSnapshotEncryptionKeyParameters `json:"sourceSnapshotEncryptionKey,omitempty" tf:"source_snapshot_encryption_key,omitempty"` - // The URL of the storage pool in which the new disk is created. + // The URL or the name of the storage pool in which the new disk is created. // For example: // +kubebuilder:validation:Optional StoragePool *string `json:"storagePool,omitempty" tf:"storage_pool,omitempty"` diff --git a/apis/compute/v1beta2/zz_firewall_types.go b/apis/compute/v1beta2/zz_firewall_types.go index 0cfed1878..2411d6584 100755 --- a/apis/compute/v1beta2/zz_firewall_types.go +++ b/apis/compute/v1beta2/zz_firewall_types.go @@ -19,7 +19,7 @@ type AllowInitParameters struct { // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. - // Example inputs include: ["22"], ["80","443"], and + // Example inputs include: [22], [80, 443], and // ["12345-12349"]. Ports []*string `json:"ports,omitempty" tf:"ports,omitempty"` @@ -36,7 +36,7 @@ type AllowObservation struct { // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. - // Example inputs include: ["22"], ["80","443"], and + // Example inputs include: [22], [80, 443], and // ["12345-12349"]. Ports []*string `json:"ports,omitempty" tf:"ports,omitempty"` @@ -53,7 +53,7 @@ type AllowParameters struct { // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. - // Example inputs include: ["22"], ["80","443"], and + // Example inputs include: [22], [80, 443], and // ["12345-12349"]. // +kubebuilder:validation:Optional Ports []*string `json:"ports,omitempty" tf:"ports,omitempty"` @@ -72,7 +72,7 @@ type DenyInitParameters struct { // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. - // Example inputs include: ["22"], ["80","443"], and + // Example inputs include: [22], [80, 443], and // ["12345-12349"]. Ports []*string `json:"ports,omitempty" tf:"ports,omitempty"` @@ -89,7 +89,7 @@ type DenyObservation struct { // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. - // Example inputs include: ["22"], ["80","443"], and + // Example inputs include: [22], [80, 443], and // ["12345-12349"]. Ports []*string `json:"ports,omitempty" tf:"ports,omitempty"` @@ -106,7 +106,7 @@ type DenyParameters struct { // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. - // Example inputs include: ["22"], ["80","443"], and + // Example inputs include: [22], [80, 443], and // ["12345-12349"]. // +kubebuilder:validation:Optional Ports []*string `json:"ports,omitempty" tf:"ports,omitempty"` diff --git a/apis/compute/v1beta2/zz_firewallpolicyrule_types.go b/apis/compute/v1beta2/zz_firewallpolicyrule_types.go index f5370cace..ec4161149 100755 --- a/apis/compute/v1beta2/zz_firewallpolicyrule_types.go +++ b/apis/compute/v1beta2/zz_firewallpolicyrule_types.go @@ -21,13 +21,19 @@ type FirewallPolicyRuleInitParameters struct { // An optional description for this resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` - // The direction in which this rule applies. Possible values: INGRESS, EGRESS + // The direction in which this rule applies. + // Possible values are: INGRESS, EGRESS. Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` - // Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. + // Denotes whether the firewall policy rule is disabled. + // When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + // If this is unspecified, the firewall policy rule will be enabled. Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - // Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. + // Denotes whether to enable logging for a particular rule. + // If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + // Logs may be exported to BigQuery or Pub/Sub. + // Note: you cannot enable logging on "goto_next" rules. EnableLogging *bool `json:"enableLogging,omitempty" tf:"enable_logging,omitempty"` // The firewall policy of the resource. @@ -44,18 +50,26 @@ type FirewallPolicyRuleInitParameters struct { FirewallPolicySelector *v1.Selector `json:"firewallPolicySelector,omitempty" tf:"-"` // A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + // Structure is documented below. Match *MatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` - // An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. + // An integer indicating the priority of a rule in the list. + // The priority must be a positive value between 0 and 2147483647. + // Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` - // A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. + // A fully-qualified URL of a SecurityProfile resource instance. + // Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + // Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. SecurityProfileGroup *string `json:"securityProfileGroup,omitempty" tf:"security_profile_group,omitempty"` - // Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + // Boolean flag indicating if the traffic should be TLS decrypted. + // Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. TLSInspect *bool `json:"tlsInspect,omitempty" tf:"tls_inspect,omitempty"` - // A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. + // A list of network resource URLs to which this rule applies. + // This field allows you to control which network's VMs get this rule. + // If this field is left blank, all VMs within the organization will receive the rule. TargetResources []*string `json:"targetResources,omitempty" tf:"target_resources,omitempty"` // A list of service accounts indicating the sets of instances that are applied with this rule. @@ -67,16 +81,25 @@ type FirewallPolicyRuleObservation struct { // The Action to perform when the client connection triggers the rule. Valid actions are "allow", "deny", "goto_next" and "apply_security_profile_group". Action *string `json:"action,omitempty" tf:"action,omitempty"` + // Creation timestamp in RFC3339 text format. + CreationTimestamp *string `json:"creationTimestamp,omitempty" tf:"creation_timestamp,omitempty"` + // An optional description for this resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` - // The direction in which this rule applies. Possible values: INGRESS, EGRESS + // The direction in which this rule applies. + // Possible values are: INGRESS, EGRESS. Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` - // Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. + // Denotes whether the firewall policy rule is disabled. + // When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + // If this is unspecified, the firewall policy rule will be enabled. Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - // Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. + // Denotes whether to enable logging for a particular rule. + // If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + // Logs may be exported to BigQuery or Pub/Sub. + // Note: you cannot enable logging on "goto_next" rules. EnableLogging *bool `json:"enableLogging,omitempty" tf:"enable_logging,omitempty"` // The firewall policy of the resource. @@ -89,21 +112,29 @@ type FirewallPolicyRuleObservation struct { Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` // A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + // Structure is documented below. Match *MatchObservation `json:"match,omitempty" tf:"match,omitempty"` - // An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. + // An integer indicating the priority of a rule in the list. + // The priority must be a positive value between 0 and 2147483647. + // Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` // Calculation of the complexity of a single firewall policy rule. RuleTupleCount *float64 `json:"ruleTupleCount,omitempty" tf:"rule_tuple_count,omitempty"` - // A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. + // A fully-qualified URL of a SecurityProfile resource instance. + // Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + // Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. SecurityProfileGroup *string `json:"securityProfileGroup,omitempty" tf:"security_profile_group,omitempty"` - // Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + // Boolean flag indicating if the traffic should be TLS decrypted. + // Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. TLSInspect *bool `json:"tlsInspect,omitempty" tf:"tls_inspect,omitempty"` - // A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. + // A list of network resource URLs to which this rule applies. + // This field allows you to control which network's VMs get this rule. + // If this field is left blank, all VMs within the organization will receive the rule. TargetResources []*string `json:"targetResources,omitempty" tf:"target_resources,omitempty"` // A list of service accounts indicating the sets of instances that are applied with this rule. @@ -120,15 +151,21 @@ type FirewallPolicyRuleParameters struct { // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` - // The direction in which this rule applies. Possible values: INGRESS, EGRESS + // The direction in which this rule applies. + // Possible values are: INGRESS, EGRESS. // +kubebuilder:validation:Optional Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` - // Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. + // Denotes whether the firewall policy rule is disabled. + // When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + // If this is unspecified, the firewall policy rule will be enabled. // +kubebuilder:validation:Optional Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - // Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. + // Denotes whether to enable logging for a particular rule. + // If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + // Logs may be exported to BigQuery or Pub/Sub. + // Note: you cannot enable logging on "goto_next" rules. // +kubebuilder:validation:Optional EnableLogging *bool `json:"enableLogging,omitempty" tf:"enable_logging,omitempty"` @@ -147,22 +184,30 @@ type FirewallPolicyRuleParameters struct { FirewallPolicySelector *v1.Selector `json:"firewallPolicySelector,omitempty" tf:"-"` // A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + // Structure is documented below. // +kubebuilder:validation:Optional Match *MatchParameters `json:"match,omitempty" tf:"match,omitempty"` - // An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. + // An integer indicating the priority of a rule in the list. + // The priority must be a positive value between 0 and 2147483647. + // Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. // +kubebuilder:validation:Optional Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` - // A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. + // A fully-qualified URL of a SecurityProfile resource instance. + // Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + // Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. // +kubebuilder:validation:Optional SecurityProfileGroup *string `json:"securityProfileGroup,omitempty" tf:"security_profile_group,omitempty"` - // Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + // Boolean flag indicating if the traffic should be TLS decrypted. + // Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. // +kubebuilder:validation:Optional TLSInspect *bool `json:"tlsInspect,omitempty" tf:"tls_inspect,omitempty"` - // A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. + // A list of network resource URLs to which this rule applies. + // This field allows you to control which network's VMs get this rule. + // If this field is left blank, all VMs within the organization will receive the rule. // +kubebuilder:validation:Optional TargetResources []*string `json:"targetResources,omitempty" tf:"target_resources,omitempty"` @@ -173,7 +218,8 @@ type FirewallPolicyRuleParameters struct { type Layer4ConfigsInitParameters struct { - // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. + // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + // This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. IPProtocol *string `json:"ipProtocol,omitempty" tf:"ip_protocol,omitempty"` // An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. @@ -182,7 +228,8 @@ type Layer4ConfigsInitParameters struct { type Layer4ConfigsObservation struct { - // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. + // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + // This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. IPProtocol *string `json:"ipProtocol,omitempty" tf:"ip_protocol,omitempty"` // An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. @@ -191,7 +238,8 @@ type Layer4ConfigsObservation struct { type Layer4ConfigsParameters struct { - // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. + // The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + // This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. // +kubebuilder:validation:Optional IPProtocol *string `json:"ipProtocol" tf:"ip_protocol,omitempty"` @@ -202,7 +250,7 @@ type Layer4ConfigsParameters struct { type MatchInitParameters struct { - // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules. + // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/networksecurity/v1beta1.AddressGroup // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DestAddressGroups []*string `json:"destAddressGroups,omitempty" tf:"dest_address_groups,omitempty"` @@ -215,76 +263,78 @@ type MatchInitParameters struct { // +kubebuilder:validation:Optional DestAddressGroupsSelector *v1.Selector `json:"destAddressGroupsSelector,omitempty" tf:"-"` - // Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. DestFqdns []*string `json:"destFqdns,omitempty" tf:"dest_fqdns,omitempty"` - // CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 256. + // CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. DestIPRanges []*string `json:"destIpRanges,omitempty" tf:"dest_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress. + // Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. DestRegionCodes []*string `json:"destRegionCodes,omitempty" tf:"dest_region_codes,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. DestThreatIntelligences []*string `json:"destThreatIntelligences,omitempty" tf:"dest_threat_intelligences,omitempty"` // Pairs of IP protocols and ports that the rule should match. + // Structure is documented below. Layer4Configs []Layer4ConfigsInitParameters `json:"layer4Configs,omitempty" tf:"layer4_configs,omitempty"` - // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules. + // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. SrcAddressGroups []*string `json:"srcAddressGroups,omitempty" tf:"src_address_groups,omitempty"` - // Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. SrcFqdns []*string `json:"srcFqdns,omitempty" tf:"src_fqdns,omitempty"` - // CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 256. + // CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. SrcIPRanges []*string `json:"srcIpRanges,omitempty" tf:"src_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress. + // Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. SrcRegionCodes []*string `json:"srcRegionCodes,omitempty" tf:"src_region_codes,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. SrcThreatIntelligences []*string `json:"srcThreatIntelligences,omitempty" tf:"src_threat_intelligences,omitempty"` } type MatchObservation struct { - // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules. + // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. DestAddressGroups []*string `json:"destAddressGroups,omitempty" tf:"dest_address_groups,omitempty"` - // Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. DestFqdns []*string `json:"destFqdns,omitempty" tf:"dest_fqdns,omitempty"` - // CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 256. + // CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. DestIPRanges []*string `json:"destIpRanges,omitempty" tf:"dest_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress. + // Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. DestRegionCodes []*string `json:"destRegionCodes,omitempty" tf:"dest_region_codes,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. DestThreatIntelligences []*string `json:"destThreatIntelligences,omitempty" tf:"dest_threat_intelligences,omitempty"` // Pairs of IP protocols and ports that the rule should match. + // Structure is documented below. Layer4Configs []Layer4ConfigsObservation `json:"layer4Configs,omitempty" tf:"layer4_configs,omitempty"` - // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules. + // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. SrcAddressGroups []*string `json:"srcAddressGroups,omitempty" tf:"src_address_groups,omitempty"` - // Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. SrcFqdns []*string `json:"srcFqdns,omitempty" tf:"src_fqdns,omitempty"` - // CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 256. + // CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. SrcIPRanges []*string `json:"srcIpRanges,omitempty" tf:"src_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress. + // Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. SrcRegionCodes []*string `json:"srcRegionCodes,omitempty" tf:"src_region_codes,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. SrcThreatIntelligences []*string `json:"srcThreatIntelligences,omitempty" tf:"src_threat_intelligences,omitempty"` } type MatchParameters struct { - // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules. + // Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/networksecurity/v1beta1.AddressGroup // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional @@ -298,43 +348,44 @@ type MatchParameters struct { // +kubebuilder:validation:Optional DestAddressGroupsSelector *v1.Selector `json:"destAddressGroupsSelector,omitempty" tf:"-"` - // Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. // +kubebuilder:validation:Optional DestFqdns []*string `json:"destFqdns,omitempty" tf:"dest_fqdns,omitempty"` - // CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 256. + // CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. // +kubebuilder:validation:Optional DestIPRanges []*string `json:"destIpRanges,omitempty" tf:"dest_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress. + // Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. // +kubebuilder:validation:Optional DestRegionCodes []*string `json:"destRegionCodes,omitempty" tf:"dest_region_codes,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. // +kubebuilder:validation:Optional DestThreatIntelligences []*string `json:"destThreatIntelligences,omitempty" tf:"dest_threat_intelligences,omitempty"` // Pairs of IP protocols and ports that the rule should match. + // Structure is documented below. // +kubebuilder:validation:Optional Layer4Configs []Layer4ConfigsParameters `json:"layer4Configs" tf:"layer4_configs,omitempty"` - // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules. + // Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. // +kubebuilder:validation:Optional SrcAddressGroups []*string `json:"srcAddressGroups,omitempty" tf:"src_address_groups,omitempty"` - // Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress. + // Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. // +kubebuilder:validation:Optional SrcFqdns []*string `json:"srcFqdns,omitempty" tf:"src_fqdns,omitempty"` - // CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 256. + // CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. // +kubebuilder:validation:Optional SrcIPRanges []*string `json:"srcIpRanges,omitempty" tf:"src_ip_ranges,omitempty"` - // The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress. + // Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. // +kubebuilder:validation:Optional SrcRegionCodes []*string `json:"srcRegionCodes,omitempty" tf:"src_region_codes,omitempty"` - // Name of the Google Cloud Threat Intelligence list. + // Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. // +kubebuilder:validation:Optional SrcThreatIntelligences []*string `json:"srcThreatIntelligences,omitempty" tf:"src_threat_intelligences,omitempty"` } @@ -365,7 +416,7 @@ type FirewallPolicyRuleStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// FirewallPolicyRule is the Schema for the FirewallPolicyRules API. The Compute FirewallPolicyRule resource +// FirewallPolicyRule is the Schema for the FirewallPolicyRules API. Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" // +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" diff --git a/apis/compute/v1beta2/zz_generated.deepcopy.go b/apis/compute/v1beta2/zz_generated.deepcopy.go index 7823cc5e8..741022efd 100644 --- a/apis/compute/v1beta2/zz_generated.deepcopy.go +++ b/apis/compute/v1beta2/zz_generated.deepcopy.go @@ -88,6 +88,81 @@ func (in *AbortParameters) DeepCopy() *AbortParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorsInitParameters) DeepCopyInto(out *AcceleratorsInitParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(float64) + **out = **in + } + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorsInitParameters. +func (in *AcceleratorsInitParameters) DeepCopy() *AcceleratorsInitParameters { + if in == nil { + return nil + } + out := new(AcceleratorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorsObservation) DeepCopyInto(out *AcceleratorsObservation) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(float64) + **out = **in + } + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorsObservation. +func (in *AcceleratorsObservation) DeepCopy() *AcceleratorsObservation { + if in == nil { + return nil + } + out := new(AcceleratorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorsParameters) DeepCopyInto(out *AcceleratorsParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(float64) + **out = **in + } + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorsParameters. +func (in *AcceleratorsParameters) DeepCopy() *AcceleratorsParameters { + if in == nil { + return nil + } + out := new(AcceleratorsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AccessConfigInitParameters) DeepCopyInto(out *AccessConfigInitParameters) { *out = *in @@ -381,11 +456,26 @@ func (in *AdvancedMachineFeaturesInitParameters) DeepCopyInto(out *AdvancedMachi *out = new(bool) **out = **in } + if in.EnableUefiNetworking != nil { + in, out := &in.EnableUefiNetworking, &out.EnableUefiNetworking + *out = new(bool) + **out = **in + } + if in.PerformanceMonitoringUnit != nil { + in, out := &in.PerformanceMonitoringUnit, &out.PerformanceMonitoringUnit + *out = new(string) + **out = **in + } if in.ThreadsPerCore != nil { in, out := &in.ThreadsPerCore, &out.ThreadsPerCore *out = new(float64) **out = **in } + if in.TurboMode != nil { + in, out := &in.TurboMode, &out.TurboMode + *out = new(string) + **out = **in + } if in.VisibleCoreCount != nil { in, out := &in.VisibleCoreCount, &out.VisibleCoreCount *out = new(float64) @@ -411,11 +501,26 @@ func (in *AdvancedMachineFeaturesObservation) DeepCopyInto(out *AdvancedMachineF *out = new(bool) **out = **in } + if in.EnableUefiNetworking != nil { + in, out := &in.EnableUefiNetworking, &out.EnableUefiNetworking + *out = new(bool) + **out = **in + } + if in.PerformanceMonitoringUnit != nil { + in, out := &in.PerformanceMonitoringUnit, &out.PerformanceMonitoringUnit + *out = new(string) + **out = **in + } if in.ThreadsPerCore != nil { in, out := &in.ThreadsPerCore, &out.ThreadsPerCore *out = new(float64) **out = **in } + if in.TurboMode != nil { + in, out := &in.TurboMode, &out.TurboMode + *out = new(string) + **out = **in + } if in.VisibleCoreCount != nil { in, out := &in.VisibleCoreCount, &out.VisibleCoreCount *out = new(float64) @@ -441,11 +546,26 @@ func (in *AdvancedMachineFeaturesParameters) DeepCopyInto(out *AdvancedMachineFe *out = new(bool) **out = **in } + if in.EnableUefiNetworking != nil { + in, out := &in.EnableUefiNetworking, &out.EnableUefiNetworking + *out = new(bool) + **out = **in + } + if in.PerformanceMonitoringUnit != nil { + in, out := &in.PerformanceMonitoringUnit, &out.PerformanceMonitoringUnit + *out = new(string) + **out = **in + } if in.ThreadsPerCore != nil { in, out := &in.ThreadsPerCore, &out.ThreadsPerCore *out = new(float64) **out = **in } + if in.TurboMode != nil { + in, out := &in.TurboMode, &out.TurboMode + *out = new(string) + **out = **in + } if in.VisibleCoreCount != nil { in, out := &in.VisibleCoreCount, &out.VisibleCoreCount *out = new(float64) @@ -3426,6 +3546,11 @@ func (in *BackendServiceInitParameters) DeepCopyInto(out *BackendServiceInitPara *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.IPAddressSelectionPolicy != nil { + in, out := &in.IPAddressSelectionPolicy, &out.IPAddressSelectionPolicy + *out = new(string) + **out = **in + } if in.Iap != nil { in, out := &in.Iap, &out.Iap *out = new(IapInitParameters) @@ -3493,6 +3618,11 @@ func (in *BackendServiceInitParameters) DeepCopyInto(out *BackendServiceInitPara *out = new(string) **out = **in } + if in.StrongSessionAffinityCookie != nil { + in, out := &in.StrongSessionAffinityCookie, &out.StrongSessionAffinityCookie + *out = new(StrongSessionAffinityCookieInitParameters) + (*in).DeepCopyInto(*out) + } if in.TimeoutSec != nil { in, out := &in.TimeoutSec, &out.TimeoutSec *out = new(float64) @@ -3650,6 +3780,11 @@ func (in *BackendServiceObservation) DeepCopyInto(out *BackendServiceObservation *out = new(string) **out = **in } + if in.IPAddressSelectionPolicy != nil { + in, out := &in.IPAddressSelectionPolicy, &out.IPAddressSelectionPolicy + *out = new(string) + **out = **in + } if in.Iap != nil { in, out := &in.Iap, &out.Iap *out = new(IapObservation) @@ -3722,6 +3857,11 @@ func (in *BackendServiceObservation) DeepCopyInto(out *BackendServiceObservation *out = new(string) **out = **in } + if in.StrongSessionAffinityCookie != nil { + in, out := &in.StrongSessionAffinityCookie, &out.StrongSessionAffinityCookie + *out = new(StrongSessionAffinityCookieObservation) + (*in).DeepCopyInto(*out) + } if in.TimeoutSec != nil { in, out := &in.TimeoutSec, &out.TimeoutSec *out = new(float64) @@ -3839,6 +3979,11 @@ func (in *BackendServiceParameters) DeepCopyInto(out *BackendServiceParameters) *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.IPAddressSelectionPolicy != nil { + in, out := &in.IPAddressSelectionPolicy, &out.IPAddressSelectionPolicy + *out = new(string) + **out = **in + } if in.Iap != nil { in, out := &in.Iap, &out.Iap *out = new(IapParameters) @@ -3906,6 +4051,11 @@ func (in *BackendServiceParameters) DeepCopyInto(out *BackendServiceParameters) *out = new(string) **out = **in } + if in.StrongSessionAffinityCookie != nil { + in, out := &in.StrongSessionAffinityCookie, &out.StrongSessionAffinityCookie + *out = new(StrongSessionAffinityCookieParameters) + (*in).DeepCopyInto(*out) + } if in.TimeoutSec != nil { in, out := &in.TimeoutSec, &out.TimeoutSec *out = new(float64) @@ -4236,6 +4386,11 @@ func (in *BootDiskInitParameters) DeepCopyInto(out *BootDiskInitParameters) { *out = new(InitializeParamsInitParameters) (*in).DeepCopyInto(*out) } + if in.Interface != nil { + in, out := &in.Interface, &out.Interface + *out = new(string) + **out = **in + } if in.KMSKeySelfLink != nil { in, out := &in.KMSKeySelfLink, &out.KMSKeySelfLink *out = new(string) @@ -4309,6 +4464,17 @@ func (in *BootDiskInitializeParamsInitParameters) DeepCopyInto(out *BootDiskInit (*out)[key] = outVal } } + if in.ResourcePolicies != nil { + in, out := &in.ResourcePolicies, &out.ResourcePolicies + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Size != nil { in, out := &in.Size, &out.Size *out = new(float64) @@ -4382,6 +4548,17 @@ func (in *BootDiskInitializeParamsObservation) DeepCopyInto(out *BootDiskInitial (*out)[key] = outVal } } + if in.ResourcePolicies != nil { + in, out := &in.ResourcePolicies, &out.ResourcePolicies + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Size != nil { in, out := &in.Size, &out.Size *out = new(float64) @@ -4455,6 +4632,17 @@ func (in *BootDiskInitializeParamsParameters) DeepCopyInto(out *BootDiskInitiali (*out)[key] = outVal } } + if in.ResourcePolicies != nil { + in, out := &in.ResourcePolicies, &out.ResourcePolicies + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Size != nil { in, out := &in.Size, &out.Size *out = new(float64) @@ -4505,6 +4693,11 @@ func (in *BootDiskObservation) DeepCopyInto(out *BootDiskObservation) { *out = new(InitializeParamsObservation) (*in).DeepCopyInto(*out) } + if in.Interface != nil { + in, out := &in.Interface, &out.Interface + *out = new(string) + **out = **in + } if in.KMSKeySelfLink != nil { in, out := &in.KMSKeySelfLink, &out.KMSKeySelfLink *out = new(string) @@ -4555,6 +4748,11 @@ func (in *BootDiskParameters) DeepCopyInto(out *BootDiskParameters) { *out = new(InitializeParamsParameters) (*in).DeepCopyInto(*out) } + if in.Interface != nil { + in, out := &in.Interface, &out.Interface + *out = new(string) + **out = **in + } if in.KMSKeySelfLink != nil { in, out := &in.KMSKeySelfLink, &out.KMSKeySelfLink *out = new(string) @@ -9908,6 +10106,171 @@ func (in *DiskStatus) DeepCopy() *DiskStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DisksInitParameters) DeepCopyInto(out *DisksInitParameters) { + *out = *in + if in.DiskCount != nil { + in, out := &in.DiskCount, &out.DiskCount + *out = new(float64) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.DiskType != nil { + in, out := &in.DiskType, &out.DiskType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisksInitParameters. +func (in *DisksInitParameters) DeepCopy() *DisksInitParameters { + if in == nil { + return nil + } + out := new(DisksInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DisksObservation) DeepCopyInto(out *DisksObservation) { + *out = *in + if in.DiskCount != nil { + in, out := &in.DiskCount, &out.DiskCount + *out = new(float64) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.DiskType != nil { + in, out := &in.DiskType, &out.DiskType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisksObservation. +func (in *DisksObservation) DeepCopy() *DisksObservation { + if in == nil { + return nil + } + out := new(DisksObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DisksParameters) DeepCopyInto(out *DisksParameters) { + *out = *in + if in.DiskCount != nil { + in, out := &in.DiskCount, &out.DiskCount + *out = new(float64) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.DiskType != nil { + in, out := &in.DiskType, &out.DiskType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisksParameters. +func (in *DisksParameters) DeepCopy() *DisksParameters { + if in == nil { + return nil + } + out := new(DisksParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnforceOnKeyConfigsInitParameters) DeepCopyInto(out *EnforceOnKeyConfigsInitParameters) { + *out = *in + if in.EnforceOnKeyName != nil { + in, out := &in.EnforceOnKeyName, &out.EnforceOnKeyName + *out = new(string) + **out = **in + } + if in.EnforceOnKeyType != nil { + in, out := &in.EnforceOnKeyType, &out.EnforceOnKeyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnforceOnKeyConfigsInitParameters. +func (in *EnforceOnKeyConfigsInitParameters) DeepCopy() *EnforceOnKeyConfigsInitParameters { + if in == nil { + return nil + } + out := new(EnforceOnKeyConfigsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnforceOnKeyConfigsObservation) DeepCopyInto(out *EnforceOnKeyConfigsObservation) { + *out = *in + if in.EnforceOnKeyName != nil { + in, out := &in.EnforceOnKeyName, &out.EnforceOnKeyName + *out = new(string) + **out = **in + } + if in.EnforceOnKeyType != nil { + in, out := &in.EnforceOnKeyType, &out.EnforceOnKeyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnforceOnKeyConfigsObservation. +func (in *EnforceOnKeyConfigsObservation) DeepCopy() *EnforceOnKeyConfigsObservation { + if in == nil { + return nil + } + out := new(EnforceOnKeyConfigsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnforceOnKeyConfigsParameters) DeepCopyInto(out *EnforceOnKeyConfigsParameters) { + *out = *in + if in.EnforceOnKeyName != nil { + in, out := &in.EnforceOnKeyName, &out.EnforceOnKeyName + *out = new(string) + **out = **in + } + if in.EnforceOnKeyType != nil { + in, out := &in.EnforceOnKeyType, &out.EnforceOnKeyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnforceOnKeyConfigsParameters. +func (in *EnforceOnKeyConfigsParameters) DeepCopy() *EnforceOnKeyConfigsParameters { + if in == nil { + return nil + } + out := new(EnforceOnKeyConfigsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExceedRedirectOptionsInitParameters) DeepCopyInto(out *ExceedRedirectOptionsInitParameters) { *out = *in @@ -9983,6 +10346,183 @@ func (in *ExceedRedirectOptionsParameters) DeepCopy() *ExceedRedirectOptionsPara return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExclusionInitParameters) DeepCopyInto(out *ExclusionInitParameters) { + *out = *in + if in.RequestCookie != nil { + in, out := &in.RequestCookie, &out.RequestCookie + *out = make([]RequestCookieInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = make([]RequestHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestQueryParam != nil { + in, out := &in.RequestQueryParam, &out.RequestQueryParam + *out = make([]RequestQueryParamInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]RequestURIInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetRuleIds != nil { + in, out := &in.TargetRuleIds, &out.TargetRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetRuleSet != nil { + in, out := &in.TargetRuleSet, &out.TargetRuleSet + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExclusionInitParameters. +func (in *ExclusionInitParameters) DeepCopy() *ExclusionInitParameters { + if in == nil { + return nil + } + out := new(ExclusionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExclusionObservation) DeepCopyInto(out *ExclusionObservation) { + *out = *in + if in.RequestCookie != nil { + in, out := &in.RequestCookie, &out.RequestCookie + *out = make([]RequestCookieObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = make([]RequestHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestQueryParam != nil { + in, out := &in.RequestQueryParam, &out.RequestQueryParam + *out = make([]RequestQueryParamObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]RequestURIObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetRuleIds != nil { + in, out := &in.TargetRuleIds, &out.TargetRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetRuleSet != nil { + in, out := &in.TargetRuleSet, &out.TargetRuleSet + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExclusionObservation. +func (in *ExclusionObservation) DeepCopy() *ExclusionObservation { + if in == nil { + return nil + } + out := new(ExclusionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExclusionParameters) DeepCopyInto(out *ExclusionParameters) { + *out = *in + if in.RequestCookie != nil { + in, out := &in.RequestCookie, &out.RequestCookie + *out = make([]RequestCookieParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = make([]RequestHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestQueryParam != nil { + in, out := &in.RequestQueryParam, &out.RequestQueryParam + *out = make([]RequestQueryParamParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]RequestURIParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetRuleIds != nil { + in, out := &in.TargetRuleIds, &out.TargetRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetRuleSet != nil { + in, out := &in.TargetRuleSet, &out.TargetRuleSet + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExclusionParameters. +func (in *ExclusionParameters) DeepCopy() *ExclusionParameters { + if in == nil { + return nil + } + out := new(ExclusionParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExprInitParameters) DeepCopyInto(out *ExprInitParameters) { *out = *in @@ -11567,6 +12107,11 @@ func (in *FirewallPolicyRuleObservation) DeepCopyInto(out *FirewallPolicyRuleObs *out = new(string) **out = **in } + if in.CreationTimestamp != nil { + in, out := &in.CreationTimestamp, &out.CreationTimestamp + *out = new(string) + **out = **in + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -12851,6 +13396,11 @@ func (in *GlobalForwardingRuleInitParameters) DeepCopyInto(out *GlobalForwarding *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.NetworkTier != nil { + in, out := &in.NetworkTier, &out.NetworkTier + *out = new(string) + **out = **in + } if in.NoAutomateDNSZone != nil { in, out := &in.NoAutomateDNSZone, &out.NoAutomateDNSZone *out = new(bool) @@ -12995,6 +13545,11 @@ func (in *GlobalForwardingRuleObservation) DeepCopyInto(out *GlobalForwardingRul (*out)[key] = outVal } } + if in.ForwardingRuleID != nil { + in, out := &in.ForwardingRuleID, &out.ForwardingRuleID + *out = new(float64) + **out = **in + } if in.ID != nil { in, out := &in.ID, &out.ID *out = new(string) @@ -13053,6 +13608,11 @@ func (in *GlobalForwardingRuleObservation) DeepCopyInto(out *GlobalForwardingRul *out = new(string) **out = **in } + if in.NetworkTier != nil { + in, out := &in.NetworkTier, &out.NetworkTier + *out = new(string) + **out = **in + } if in.NoAutomateDNSZone != nil { in, out := &in.NoAutomateDNSZone, &out.NoAutomateDNSZone *out = new(bool) @@ -13213,6 +13773,11 @@ func (in *GlobalForwardingRuleParameters) DeepCopyInto(out *GlobalForwardingRule *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.NetworkTier != nil { + in, out := &in.NetworkTier, &out.NetworkTier + *out = new(string) + **out = **in + } if in.NoAutomateDNSZone != nil { in, out := &in.NoAutomateDNSZone, &out.NoAutomateDNSZone *out = new(bool) @@ -15707,12 +16272,21 @@ func (in *IPv6AccessConfigParameters) DeepCopy() *IPv6AccessConfigParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IapInitParameters) DeepCopyInto(out *IapInitParameters) { *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } if in.Oauth2ClientID != nil { in, out := &in.Oauth2ClientID, &out.Oauth2ClientID *out = new(string) **out = **in } - out.Oauth2ClientSecretSecretRef = in.Oauth2ClientSecretSecretRef + if in.Oauth2ClientSecretSecretRef != nil { + in, out := &in.Oauth2ClientSecretSecretRef, &out.Oauth2ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IapInitParameters. @@ -15728,6 +16302,11 @@ func (in *IapInitParameters) DeepCopy() *IapInitParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IapObservation) DeepCopyInto(out *IapObservation) { *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } if in.Oauth2ClientID != nil { in, out := &in.Oauth2ClientID, &out.Oauth2ClientID *out = new(string) @@ -15748,12 +16327,21 @@ func (in *IapObservation) DeepCopy() *IapObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IapParameters) DeepCopyInto(out *IapParameters) { *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } if in.Oauth2ClientID != nil { in, out := &in.Oauth2ClientID, &out.Oauth2ClientID *out = new(string) **out = **in } - out.Oauth2ClientSecretSecretRef = in.Oauth2ClientSecretSecretRef + if in.Oauth2ClientSecretSecretRef != nil { + in, out := &in.Oauth2ClientSecretSecretRef, &out.Oauth2ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IapParameters. @@ -16334,6 +16922,16 @@ func (in *ImageInitParameters) DeepCopyInto(out *ImageInitParameters) { *out = new(string) **out = **in } + if in.SourceDiskRef != nil { + in, out := &in.SourceDiskRef, &out.SourceDiskRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceDiskSelector != nil { + in, out := &in.SourceDiskSelector, &out.SourceDiskSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.SourceImage != nil { in, out := &in.SourceImage, &out.SourceImage *out = new(string) @@ -16633,6 +17231,16 @@ func (in *ImageParameters) DeepCopyInto(out *ImageParameters) { *out = new(string) **out = **in } + if in.SourceDiskRef != nil { + in, out := &in.SourceDiskRef, &out.SourceDiskRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceDiskSelector != nil { + in, out := &in.SourceDiskSelector, &out.SourceDiskSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.SourceImage != nil { in, out := &in.SourceImage, &out.SourceImage *out = new(string) @@ -16757,6 +17365,17 @@ func (in *InitializeParamsInitParameters) DeepCopyInto(out *InitializeParamsInit (*out)[key] = outVal } } + if in.ResourcePolicies != nil { + in, out := &in.ResourcePolicies, &out.ResourcePolicies + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Size != nil { in, out := &in.Size, &out.Size *out = new(float64) @@ -16830,6 +17449,17 @@ func (in *InitializeParamsObservation) DeepCopyInto(out *InitializeParamsObserva (*out)[key] = outVal } } + if in.ResourcePolicies != nil { + in, out := &in.ResourcePolicies, &out.ResourcePolicies + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Size != nil { in, out := &in.Size, &out.Size *out = new(float64) @@ -16913,6 +17543,17 @@ func (in *InitializeParamsParameters) DeepCopyInto(out *InitializeParamsParamete (*out)[key] = outVal } } + if in.ResourcePolicies != nil { + in, out := &in.ResourcePolicies, &out.ResourcePolicies + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Size != nil { in, out := &in.Size, &out.Size *out = new(float64) @@ -16967,6 +17608,72 @@ func (in *Instance) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceFlexibilityPolicyInitParameters) DeepCopyInto(out *InstanceFlexibilityPolicyInitParameters) { + *out = *in + if in.InstanceSelections != nil { + in, out := &in.InstanceSelections, &out.InstanceSelections + *out = make([]InstanceSelectionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceFlexibilityPolicyInitParameters. +func (in *InstanceFlexibilityPolicyInitParameters) DeepCopy() *InstanceFlexibilityPolicyInitParameters { + if in == nil { + return nil + } + out := new(InstanceFlexibilityPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceFlexibilityPolicyObservation) DeepCopyInto(out *InstanceFlexibilityPolicyObservation) { + *out = *in + if in.InstanceSelections != nil { + in, out := &in.InstanceSelections, &out.InstanceSelections + *out = make([]InstanceSelectionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceFlexibilityPolicyObservation. +func (in *InstanceFlexibilityPolicyObservation) DeepCopy() *InstanceFlexibilityPolicyObservation { + if in == nil { + return nil + } + out := new(InstanceFlexibilityPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceFlexibilityPolicyParameters) DeepCopyInto(out *InstanceFlexibilityPolicyParameters) { + *out = *in + if in.InstanceSelections != nil { + in, out := &in.InstanceSelections, &out.InstanceSelections + *out = make([]InstanceSelectionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceFlexibilityPolicyParameters. +func (in *InstanceFlexibilityPolicyParameters) DeepCopy() *InstanceFlexibilityPolicyParameters { + if in == nil { + return nil + } + out := new(InstanceFlexibilityPolicyParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceFromTemplate) DeepCopyInto(out *InstanceFromTemplate) { *out = *in @@ -17002,11 +17709,26 @@ func (in *InstanceFromTemplateAdvancedMachineFeaturesInitParameters) DeepCopyInt *out = new(bool) **out = **in } + if in.EnableUefiNetworking != nil { + in, out := &in.EnableUefiNetworking, &out.EnableUefiNetworking + *out = new(bool) + **out = **in + } + if in.PerformanceMonitoringUnit != nil { + in, out := &in.PerformanceMonitoringUnit, &out.PerformanceMonitoringUnit + *out = new(string) + **out = **in + } if in.ThreadsPerCore != nil { in, out := &in.ThreadsPerCore, &out.ThreadsPerCore *out = new(float64) **out = **in } + if in.TurboMode != nil { + in, out := &in.TurboMode, &out.TurboMode + *out = new(string) + **out = **in + } if in.VisibleCoreCount != nil { in, out := &in.VisibleCoreCount, &out.VisibleCoreCount *out = new(float64) @@ -17032,11 +17754,26 @@ func (in *InstanceFromTemplateAdvancedMachineFeaturesObservation) DeepCopyInto(o *out = new(bool) **out = **in } + if in.EnableUefiNetworking != nil { + in, out := &in.EnableUefiNetworking, &out.EnableUefiNetworking + *out = new(bool) + **out = **in + } + if in.PerformanceMonitoringUnit != nil { + in, out := &in.PerformanceMonitoringUnit, &out.PerformanceMonitoringUnit + *out = new(string) + **out = **in + } if in.ThreadsPerCore != nil { in, out := &in.ThreadsPerCore, &out.ThreadsPerCore *out = new(float64) **out = **in } + if in.TurboMode != nil { + in, out := &in.TurboMode, &out.TurboMode + *out = new(string) + **out = **in + } if in.VisibleCoreCount != nil { in, out := &in.VisibleCoreCount, &out.VisibleCoreCount *out = new(float64) @@ -17062,11 +17799,26 @@ func (in *InstanceFromTemplateAdvancedMachineFeaturesParameters) DeepCopyInto(ou *out = new(bool) **out = **in } + if in.EnableUefiNetworking != nil { + in, out := &in.EnableUefiNetworking, &out.EnableUefiNetworking + *out = new(bool) + **out = **in + } + if in.PerformanceMonitoringUnit != nil { + in, out := &in.PerformanceMonitoringUnit, &out.PerformanceMonitoringUnit + *out = new(string) + **out = **in + } if in.ThreadsPerCore != nil { in, out := &in.ThreadsPerCore, &out.ThreadsPerCore *out = new(float64) **out = **in } + if in.TurboMode != nil { + in, out := &in.TurboMode, &out.TurboMode + *out = new(string) + **out = **in + } if in.VisibleCoreCount != nil { in, out := &in.VisibleCoreCount, &out.VisibleCoreCount *out = new(float64) @@ -17092,14 +17844,9 @@ func (in *InstanceFromTemplateAttachedDiskInitParameters) DeepCopyInto(out *Inst *out = new(string) **out = **in } - if in.DiskEncryptionKeyRaw != nil { - in, out := &in.DiskEncryptionKeyRaw, &out.DiskEncryptionKeyRaw - *out = new(string) - **out = **in - } - if in.DiskEncryptionKeySha256 != nil { - in, out := &in.DiskEncryptionKeySha256, &out.DiskEncryptionKeySha256 - *out = new(string) + if in.DiskEncryptionKeyRawSecretRef != nil { + in, out := &in.DiskEncryptionKeyRawSecretRef, &out.DiskEncryptionKeyRawSecretRef + *out = new(v1.SecretKeySelector) **out = **in } if in.KMSKeySelfLink != nil { @@ -17137,11 +17884,6 @@ func (in *InstanceFromTemplateAttachedDiskObservation) DeepCopyInto(out *Instanc *out = new(string) **out = **in } - if in.DiskEncryptionKeyRaw != nil { - in, out := &in.DiskEncryptionKeyRaw, &out.DiskEncryptionKeyRaw - *out = new(string) - **out = **in - } if in.DiskEncryptionKeySha256 != nil { in, out := &in.DiskEncryptionKeySha256, &out.DiskEncryptionKeySha256 *out = new(string) @@ -17182,14 +17924,9 @@ func (in *InstanceFromTemplateAttachedDiskParameters) DeepCopyInto(out *Instance *out = new(string) **out = **in } - if in.DiskEncryptionKeyRaw != nil { - in, out := &in.DiskEncryptionKeyRaw, &out.DiskEncryptionKeyRaw - *out = new(string) - **out = **in - } - if in.DiskEncryptionKeySha256 != nil { - in, out := &in.DiskEncryptionKeySha256, &out.DiskEncryptionKeySha256 - *out = new(string) + if in.DiskEncryptionKeyRawSecretRef != nil { + in, out := &in.DiskEncryptionKeyRawSecretRef, &out.DiskEncryptionKeyRawSecretRef + *out = new(v1.SecretKeySelector) **out = **in } if in.KMSKeySelfLink != nil { @@ -17242,6 +17979,11 @@ func (in *InstanceFromTemplateBootDiskInitParameters) DeepCopyInto(out *Instance *out = new(BootDiskInitializeParamsInitParameters) (*in).DeepCopyInto(*out) } + if in.Interface != nil { + in, out := &in.Interface, &out.Interface + *out = new(string) + **out = **in + } if in.KMSKeySelfLink != nil { in, out := &in.KMSKeySelfLink, &out.KMSKeySelfLink *out = new(string) @@ -17292,6 +18034,11 @@ func (in *InstanceFromTemplateBootDiskObservation) DeepCopyInto(out *InstanceFro *out = new(BootDiskInitializeParamsObservation) (*in).DeepCopyInto(*out) } + if in.Interface != nil { + in, out := &in.Interface, &out.Interface + *out = new(string) + **out = **in + } if in.KMSKeySelfLink != nil { in, out := &in.KMSKeySelfLink, &out.KMSKeySelfLink *out = new(string) @@ -17342,6 +18089,11 @@ func (in *InstanceFromTemplateBootDiskParameters) DeepCopyInto(out *InstanceFrom *out = new(BootDiskInitializeParamsParameters) (*in).DeepCopyInto(*out) } + if in.Interface != nil { + in, out := &in.Interface, &out.Interface + *out = new(string) + **out = **in + } if in.KMSKeySelfLink != nil { in, out := &in.KMSKeySelfLink, &out.KMSKeySelfLink *out = new(string) @@ -17554,11 +18306,6 @@ func (in *InstanceFromTemplateInitParameters) DeepCopyInto(out *InstanceFromTemp *out = new(InstanceFromTemplateConfidentialInstanceConfigInitParameters) (*in).DeepCopyInto(*out) } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -17586,6 +18333,11 @@ func (in *InstanceFromTemplateInitParameters) DeepCopyInto(out *InstanceFromTemp *out = new(string) **out = **in } + if in.KeyRevocationActionType != nil { + in, out := &in.KeyRevocationActionType, &out.KeyRevocationActionType + *out = new(string) + **out = **in + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -18157,6 +18909,11 @@ func (in *InstanceFromTemplateObservation) DeepCopyInto(out *InstanceFromTemplat *out = new(InstanceFromTemplateConfidentialInstanceConfigObservation) (*in).DeepCopyInto(*out) } + if in.CreationTimestamp != nil { + in, out := &in.CreationTimestamp, &out.CreationTimestamp + *out = new(string) + **out = **in + } if in.CurrentStatus != nil { in, out := &in.CurrentStatus, &out.CurrentStatus *out = new(string) @@ -18220,6 +18977,11 @@ func (in *InstanceFromTemplateObservation) DeepCopyInto(out *InstanceFromTemplat *out = new(string) **out = **in } + if in.KeyRevocationActionType != nil { + in, out := &in.KeyRevocationActionType, &out.KeyRevocationActionType + *out = new(string) + **out = **in + } if in.LabelFingerprint != nil { in, out := &in.LabelFingerprint, &out.LabelFingerprint *out = new(string) @@ -18427,11 +19189,6 @@ func (in *InstanceFromTemplateParameters) DeepCopyInto(out *InstanceFromTemplate *out = new(InstanceFromTemplateConfidentialInstanceConfigParameters) (*in).DeepCopyInto(*out) } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -18459,6 +19216,11 @@ func (in *InstanceFromTemplateParameters) DeepCopyInto(out *InstanceFromTemplate *out = new(string) **out = **in } + if in.KeyRevocationActionType != nil { + in, out := &in.KeyRevocationActionType, &out.KeyRevocationActionType + *out = new(string) + **out = **in + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -18781,6 +19543,11 @@ func (in *InstanceFromTemplateSchedulingInitParameters) DeepCopyInto(out *Instan *out = new(bool) **out = **in } + if in.AvailabilityDomain != nil { + in, out := &in.AvailabilityDomain, &out.AvailabilityDomain + *out = new(float64) + **out = **in + } if in.InstanceTerminationAction != nil { in, out := &in.InstanceTerminationAction, &out.InstanceTerminationAction *out = new(string) @@ -18848,6 +19615,11 @@ func (in *InstanceFromTemplateSchedulingObservation) DeepCopyInto(out *InstanceF *out = new(bool) **out = **in } + if in.AvailabilityDomain != nil { + in, out := &in.AvailabilityDomain, &out.AvailabilityDomain + *out = new(float64) + **out = **in + } if in.InstanceTerminationAction != nil { in, out := &in.InstanceTerminationAction, &out.InstanceTerminationAction *out = new(string) @@ -18915,6 +19687,11 @@ func (in *InstanceFromTemplateSchedulingParameters) DeepCopyInto(out *InstanceFr *out = new(bool) **out = **in } + if in.AvailabilityDomain != nil { + in, out := &in.AvailabilityDomain, &out.AvailabilityDomain + *out = new(float64) + **out = **in + } if in.InstanceTerminationAction != nil { in, out := &in.InstanceTerminationAction, &out.InstanceTerminationAction *out = new(string) @@ -19354,6 +20131,11 @@ func (in *InstanceGroupManagerInitParameters) DeepCopyInto(out *InstanceGroupMan *out = new(string) **out = **in } + if in.StandbyPolicy != nil { + in, out := &in.StandbyPolicy, &out.StandbyPolicy + *out = new(StandbyPolicyInitParameters) + (*in).DeepCopyInto(*out) + } if in.StatefulDisk != nil { in, out := &in.StatefulDisk, &out.StatefulDisk *out = make([]StatefulDiskInitParameters, len(*in)) @@ -19403,6 +20185,16 @@ func (in *InstanceGroupManagerInitParameters) DeepCopyInto(out *InstanceGroupMan *out = new(float64) **out = **in } + if in.TargetStoppedSize != nil { + in, out := &in.TargetStoppedSize, &out.TargetStoppedSize + *out = new(float64) + **out = **in + } + if in.TargetSuspendedSize != nil { + in, out := &in.TargetSuspendedSize, &out.TargetSuspendedSize + *out = new(float64) + **out = **in + } if in.UpdatePolicy != nil { in, out := &in.UpdatePolicy, &out.UpdatePolicy *out = new(UpdatePolicyInitParameters) @@ -19512,6 +20304,11 @@ func (in *InstanceGroupManagerObservation) DeepCopyInto(out *InstanceGroupManage *out = new(string) **out = **in } + if in.InstanceGroupManagerID != nil { + in, out := &in.InstanceGroupManagerID, &out.InstanceGroupManagerID + *out = new(float64) + **out = **in + } if in.InstanceLifecyclePolicy != nil { in, out := &in.InstanceLifecyclePolicy, &out.InstanceLifecyclePolicy *out = new(InstanceLifecyclePolicyObservation) @@ -19544,6 +20341,11 @@ func (in *InstanceGroupManagerObservation) DeepCopyInto(out *InstanceGroupManage *out = new(string) **out = **in } + if in.StandbyPolicy != nil { + in, out := &in.StandbyPolicy, &out.StandbyPolicy + *out = new(StandbyPolicyObservation) + (*in).DeepCopyInto(*out) + } if in.StatefulDisk != nil { in, out := &in.StatefulDisk, &out.StatefulDisk *out = make([]StatefulDiskObservation, len(*in)) @@ -19588,6 +20390,16 @@ func (in *InstanceGroupManagerObservation) DeepCopyInto(out *InstanceGroupManage *out = new(float64) **out = **in } + if in.TargetStoppedSize != nil { + in, out := &in.TargetStoppedSize, &out.TargetStoppedSize + *out = new(float64) + **out = **in + } + if in.TargetSuspendedSize != nil { + in, out := &in.TargetSuspendedSize, &out.TargetSuspendedSize + *out = new(float64) + **out = **in + } if in.UpdatePolicy != nil { in, out := &in.UpdatePolicy, &out.UpdatePolicy *out = new(UpdatePolicyObservation) @@ -19672,6 +20484,11 @@ func (in *InstanceGroupManagerParameters) DeepCopyInto(out *InstanceGroupManager *out = new(string) **out = **in } + if in.StandbyPolicy != nil { + in, out := &in.StandbyPolicy, &out.StandbyPolicy + *out = new(StandbyPolicyParameters) + (*in).DeepCopyInto(*out) + } if in.StatefulDisk != nil { in, out := &in.StatefulDisk, &out.StatefulDisk *out = make([]StatefulDiskParameters, len(*in)) @@ -19721,6 +20538,16 @@ func (in *InstanceGroupManagerParameters) DeepCopyInto(out *InstanceGroupManager *out = new(float64) **out = **in } + if in.TargetStoppedSize != nil { + in, out := &in.TargetStoppedSize, &out.TargetStoppedSize + *out = new(float64) + **out = **in + } + if in.TargetSuspendedSize != nil { + in, out := &in.TargetSuspendedSize, &out.TargetSuspendedSize + *out = new(float64) + **out = **in + } if in.UpdatePolicy != nil { in, out := &in.UpdatePolicy, &out.UpdatePolicy *out = new(UpdatePolicyParameters) @@ -20179,11 +21006,6 @@ func (in *InstanceInitParameters) DeepCopyInto(out *InstanceInitParameters) { *out = new(ConfidentialInstanceConfigInitParameters) (*in).DeepCopyInto(*out) } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -20211,6 +21033,11 @@ func (in *InstanceInitParameters) DeepCopyInto(out *InstanceInitParameters) { *out = new(string) **out = **in } + if in.KeyRevocationActionType != nil { + in, out := &in.KeyRevocationActionType, &out.KeyRevocationActionType + *out = new(string) + **out = **in + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -20488,6 +21315,11 @@ func (in *InstanceObservation) DeepCopyInto(out *InstanceObservation) { *out = new(ConfidentialInstanceConfigObservation) (*in).DeepCopyInto(*out) } + if in.CreationTimestamp != nil { + in, out := &in.CreationTimestamp, &out.CreationTimestamp + *out = new(string) + **out = **in + } if in.CurrentStatus != nil { in, out := &in.CurrentStatus, &out.CurrentStatus *out = new(string) @@ -20551,6 +21383,11 @@ func (in *InstanceObservation) DeepCopyInto(out *InstanceObservation) { *out = new(string) **out = **in } + if in.KeyRevocationActionType != nil { + in, out := &in.KeyRevocationActionType, &out.KeyRevocationActionType + *out = new(string) + **out = **in + } if in.LabelFingerprint != nil { in, out := &in.LabelFingerprint, &out.LabelFingerprint *out = new(string) @@ -20757,11 +21594,6 @@ func (in *InstanceParameters) DeepCopyInto(out *InstanceParameters) { *out = new(ConfidentialInstanceConfigParameters) (*in).DeepCopyInto(*out) } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -20789,6 +21621,11 @@ func (in *InstanceParameters) DeepCopyInto(out *InstanceParameters) { *out = new(string) **out = **in } + if in.KeyRevocationActionType != nil { + in, out := &in.KeyRevocationActionType, &out.KeyRevocationActionType + *out = new(string) + **out = **in + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -21161,6 +21998,114 @@ func (in *InstanceSchedulePolicyParameters) DeepCopy() *InstanceSchedulePolicyPa return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSelectionsInitParameters) DeepCopyInto(out *InstanceSelectionsInitParameters) { + *out = *in + if in.MachineTypes != nil { + in, out := &in.MachineTypes, &out.MachineTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Rank != nil { + in, out := &in.Rank, &out.Rank + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSelectionsInitParameters. +func (in *InstanceSelectionsInitParameters) DeepCopy() *InstanceSelectionsInitParameters { + if in == nil { + return nil + } + out := new(InstanceSelectionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSelectionsObservation) DeepCopyInto(out *InstanceSelectionsObservation) { + *out = *in + if in.MachineTypes != nil { + in, out := &in.MachineTypes, &out.MachineTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Rank != nil { + in, out := &in.Rank, &out.Rank + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSelectionsObservation. +func (in *InstanceSelectionsObservation) DeepCopy() *InstanceSelectionsObservation { + if in == nil { + return nil + } + out := new(InstanceSelectionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSelectionsParameters) DeepCopyInto(out *InstanceSelectionsParameters) { + *out = *in + if in.MachineTypes != nil { + in, out := &in.MachineTypes, &out.MachineTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Rank != nil { + in, out := &in.Rank, &out.Rank + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSelectionsParameters. +func (in *InstanceSelectionsParameters) DeepCopy() *InstanceSelectionsParameters { + if in == nil { + return nil + } + out := new(InstanceSelectionsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceSpec) DeepCopyInto(out *InstanceSpec) { *out = *in @@ -21231,11 +22176,26 @@ func (in *InstanceTemplateAdvancedMachineFeaturesInitParameters) DeepCopyInto(ou *out = new(bool) **out = **in } + if in.EnableUefiNetworking != nil { + in, out := &in.EnableUefiNetworking, &out.EnableUefiNetworking + *out = new(bool) + **out = **in + } + if in.PerformanceMonitoringUnit != nil { + in, out := &in.PerformanceMonitoringUnit, &out.PerformanceMonitoringUnit + *out = new(string) + **out = **in + } if in.ThreadsPerCore != nil { in, out := &in.ThreadsPerCore, &out.ThreadsPerCore *out = new(float64) **out = **in } + if in.TurboMode != nil { + in, out := &in.TurboMode, &out.TurboMode + *out = new(string) + **out = **in + } if in.VisibleCoreCount != nil { in, out := &in.VisibleCoreCount, &out.VisibleCoreCount *out = new(float64) @@ -21261,11 +22221,26 @@ func (in *InstanceTemplateAdvancedMachineFeaturesObservation) DeepCopyInto(out * *out = new(bool) **out = **in } + if in.EnableUefiNetworking != nil { + in, out := &in.EnableUefiNetworking, &out.EnableUefiNetworking + *out = new(bool) + **out = **in + } + if in.PerformanceMonitoringUnit != nil { + in, out := &in.PerformanceMonitoringUnit, &out.PerformanceMonitoringUnit + *out = new(string) + **out = **in + } if in.ThreadsPerCore != nil { in, out := &in.ThreadsPerCore, &out.ThreadsPerCore *out = new(float64) **out = **in } + if in.TurboMode != nil { + in, out := &in.TurboMode, &out.TurboMode + *out = new(string) + **out = **in + } if in.VisibleCoreCount != nil { in, out := &in.VisibleCoreCount, &out.VisibleCoreCount *out = new(float64) @@ -21291,11 +22266,26 @@ func (in *InstanceTemplateAdvancedMachineFeaturesParameters) DeepCopyInto(out *I *out = new(bool) **out = **in } + if in.EnableUefiNetworking != nil { + in, out := &in.EnableUefiNetworking, &out.EnableUefiNetworking + *out = new(bool) + **out = **in + } + if in.PerformanceMonitoringUnit != nil { + in, out := &in.PerformanceMonitoringUnit, &out.PerformanceMonitoringUnit + *out = new(string) + **out = **in + } if in.ThreadsPerCore != nil { in, out := &in.ThreadsPerCore, &out.ThreadsPerCore *out = new(float64) **out = **in } + if in.TurboMode != nil { + in, out := &in.TurboMode, &out.TurboMode + *out = new(string) + **out = **in + } if in.VisibleCoreCount != nil { in, out := &in.VisibleCoreCount, &out.VisibleCoreCount *out = new(float64) @@ -21457,6 +22447,11 @@ func (in *InstanceTemplateDiskInitParameters) DeepCopyInto(out *InstanceTemplate *out = new(float64) **out = **in } + if in.ProvisionedThroughput != nil { + in, out := &in.ProvisionedThroughput, &out.ProvisionedThroughput + *out = new(float64) + **out = **in + } if in.ResourceManagerTags != nil { in, out := &in.ResourceManagerTags, &out.ResourceManagerTags *out = make(map[string]*string, len(*in)) @@ -21617,6 +22612,11 @@ func (in *InstanceTemplateDiskObservation) DeepCopyInto(out *InstanceTemplateDis *out = new(float64) **out = **in } + if in.ProvisionedThroughput != nil { + in, out := &in.ProvisionedThroughput, &out.ProvisionedThroughput + *out = new(float64) + **out = **in + } if in.ResourceManagerTags != nil { in, out := &in.ResourceManagerTags, &out.ResourceManagerTags *out = make(map[string]*string, len(*in)) @@ -21755,6 +22755,11 @@ func (in *InstanceTemplateDiskParameters) DeepCopyInto(out *InstanceTemplateDisk *out = new(float64) **out = **in } + if in.ProvisionedThroughput != nil { + in, out := &in.ProvisionedThroughput, &out.ProvisionedThroughput + *out = new(float64) + **out = **in + } if in.ResourceManagerTags != nil { in, out := &in.ResourceManagerTags, &out.ResourceManagerTags *out = make(map[string]*string, len(*in)) @@ -21963,6 +22968,11 @@ func (in *InstanceTemplateInitParameters) DeepCopyInto(out *InstanceTemplateInit *out = new(string) **out = **in } + if in.KeyRevocationActionType != nil { + in, out := &in.KeyRevocationActionType, &out.KeyRevocationActionType + *out = new(string) + **out = **in + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -22741,6 +23751,11 @@ func (in *InstanceTemplateObservation) DeepCopyInto(out *InstanceTemplateObserva *out = new(InstanceTemplateConfidentialInstanceConfigObservation) (*in).DeepCopyInto(*out) } + if in.CreationTimestamp != nil { + in, out := &in.CreationTimestamp, &out.CreationTimestamp + *out = new(string) + **out = **in + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -22786,6 +23801,11 @@ func (in *InstanceTemplateObservation) DeepCopyInto(out *InstanceTemplateObserva *out = new(string) **out = **in } + if in.KeyRevocationActionType != nil { + in, out := &in.KeyRevocationActionType, &out.KeyRevocationActionType + *out = new(string) + **out = **in + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -23004,6 +24024,11 @@ func (in *InstanceTemplateParameters) DeepCopyInto(out *InstanceTemplateParamete *out = new(string) **out = **in } + if in.KeyRevocationActionType != nil { + in, out := &in.KeyRevocationActionType, &out.KeyRevocationActionType + *out = new(string) + **out = **in + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -23320,6 +24345,11 @@ func (in *InstanceTemplateSchedulingInitParameters) DeepCopyInto(out *InstanceTe *out = new(bool) **out = **in } + if in.AvailabilityDomain != nil { + in, out := &in.AvailabilityDomain, &out.AvailabilityDomain + *out = new(float64) + **out = **in + } if in.InstanceTerminationAction != nil { in, out := &in.InstanceTerminationAction, &out.InstanceTerminationAction *out = new(string) @@ -23647,6 +24677,11 @@ func (in *InstanceTemplateSchedulingObservation) DeepCopyInto(out *InstanceTempl *out = new(bool) **out = **in } + if in.AvailabilityDomain != nil { + in, out := &in.AvailabilityDomain, &out.AvailabilityDomain + *out = new(float64) + **out = **in + } if in.InstanceTerminationAction != nil { in, out := &in.InstanceTerminationAction, &out.InstanceTerminationAction *out = new(string) @@ -23776,6 +24811,11 @@ func (in *InstanceTemplateSchedulingParameters) DeepCopyInto(out *InstanceTempla *out = new(bool) **out = **in } + if in.AvailabilityDomain != nil { + in, out := &in.AvailabilityDomain, &out.AvailabilityDomain + *out = new(float64) + **out = **in + } if in.InstanceTerminationAction != nil { in, out := &in.InstanceTerminationAction, &out.InstanceTerminationAction *out = new(string) @@ -24564,6 +25604,13 @@ func (in *Layer7DdosDefenseConfigInitParameters) DeepCopyInto(out *Layer7DdosDef *out = new(string) **out = **in } + if in.ThresholdConfigs != nil { + in, out := &in.ThresholdConfigs, &out.ThresholdConfigs + *out = make([]ThresholdConfigsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Layer7DdosDefenseConfigInitParameters. @@ -24589,6 +25636,13 @@ func (in *Layer7DdosDefenseConfigObservation) DeepCopyInto(out *Layer7DdosDefens *out = new(string) **out = **in } + if in.ThresholdConfigs != nil { + in, out := &in.ThresholdConfigs, &out.ThresholdConfigs + *out = make([]ThresholdConfigsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Layer7DdosDefenseConfigObservation. @@ -24614,6 +25668,13 @@ func (in *Layer7DdosDefenseConfigParameters) DeepCopyInto(out *Layer7DdosDefense *out = new(string) **out = **in } + if in.ThresholdConfigs != nil { + in, out := &in.ThresholdConfigs, &out.ThresholdConfigs + *out = make([]ThresholdConfigsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Layer7DdosDefenseConfigParameters. @@ -25154,11 +26215,6 @@ func (in *ManagedSSLCertificate) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedSSLCertificateInitParameters) DeepCopyInto(out *ManagedSSLCertificateInitParameters) { *out = *in - if in.CertificateID != nil { - in, out := &in.CertificateID, &out.CertificateID - *out = new(float64) - **out = **in - } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -25297,11 +26353,6 @@ func (in *ManagedSSLCertificateObservation) DeepCopy() *ManagedSSLCertificateObs // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedSSLCertificateParameters) DeepCopyInto(out *ManagedSSLCertificateParameters) { *out = *in - if in.CertificateID != nil { - in, out := &in.CertificateID, &out.CertificateID - *out = new(float64) - **out = **in - } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -28381,6 +29432,13 @@ func (in *NodeTemplate) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeTemplateInitParameters) DeepCopyInto(out *NodeTemplateInitParameters) { *out = *in + if in.Accelerators != nil { + in, out := &in.Accelerators, &out.Accelerators + *out = make([]AcceleratorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.CPUOvercommitType != nil { in, out := &in.CPUOvercommitType, &out.CPUOvercommitType *out = new(string) @@ -28391,6 +29449,13 @@ func (in *NodeTemplateInitParameters) DeepCopyInto(out *NodeTemplateInitParamete *out = new(string) **out = **in } + if in.Disks != nil { + in, out := &in.Disks, &out.Disks + *out = make([]DisksInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.NodeAffinityLabels != nil { in, out := &in.NodeAffinityLabels, &out.NodeAffinityLabels *out = make(map[string]*string, len(*in)) @@ -28474,6 +29539,13 @@ func (in *NodeTemplateList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeTemplateObservation) DeepCopyInto(out *NodeTemplateObservation) { *out = *in + if in.Accelerators != nil { + in, out := &in.Accelerators, &out.Accelerators + *out = make([]AcceleratorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.CPUOvercommitType != nil { in, out := &in.CPUOvercommitType, &out.CPUOvercommitType *out = new(string) @@ -28489,6 +29561,13 @@ func (in *NodeTemplateObservation) DeepCopyInto(out *NodeTemplateObservation) { *out = new(string) **out = **in } + if in.Disks != nil { + in, out := &in.Disks, &out.Disks + *out = make([]DisksObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.ID != nil { in, out := &in.ID, &out.ID *out = new(string) @@ -28555,6 +29634,13 @@ func (in *NodeTemplateObservation) DeepCopy() *NodeTemplateObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeTemplateParameters) DeepCopyInto(out *NodeTemplateParameters) { *out = *in + if in.Accelerators != nil { + in, out := &in.Accelerators, &out.Accelerators + *out = make([]AcceleratorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.CPUOvercommitType != nil { in, out := &in.CPUOvercommitType, &out.CPUOvercommitType *out = new(string) @@ -28565,6 +29651,13 @@ func (in *NodeTemplateParameters) DeepCopyInto(out *NodeTemplateParameters) { *out = new(string) **out = **in } + if in.Disks != nil { + in, out := &in.Disks, &out.Disks + *out = make([]DisksParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.NodeAffinityLabels != nil { in, out := &in.NodeAffinityLabels, &out.NodeAffinityLabels *out = make(map[string]*string, len(*in)) @@ -35442,6 +36535,72 @@ func (in *PolicyParameters) DeepCopy() *PolicyParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreconfiguredWafConfigInitParameters) DeepCopyInto(out *PreconfiguredWafConfigInitParameters) { + *out = *in + if in.Exclusion != nil { + in, out := &in.Exclusion, &out.Exclusion + *out = make([]ExclusionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreconfiguredWafConfigInitParameters. +func (in *PreconfiguredWafConfigInitParameters) DeepCopy() *PreconfiguredWafConfigInitParameters { + if in == nil { + return nil + } + out := new(PreconfiguredWafConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreconfiguredWafConfigObservation) DeepCopyInto(out *PreconfiguredWafConfigObservation) { + *out = *in + if in.Exclusion != nil { + in, out := &in.Exclusion, &out.Exclusion + *out = make([]ExclusionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreconfiguredWafConfigObservation. +func (in *PreconfiguredWafConfigObservation) DeepCopy() *PreconfiguredWafConfigObservation { + if in == nil { + return nil + } + out := new(PreconfiguredWafConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreconfiguredWafConfigParameters) DeepCopyInto(out *PreconfiguredWafConfigParameters) { + *out = *in + if in.Exclusion != nil { + in, out := &in.Exclusion, &out.Exclusion + *out = make([]ExclusionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreconfiguredWafConfigParameters. +func (in *PreconfiguredWafConfigParameters) DeepCopy() *PreconfiguredWafConfigParameters { + if in == nil { + return nil + } + out := new(PreconfiguredWafConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PreservedStateDiskInitParameters) DeepCopyInto(out *PreservedStateDiskInitParameters) { *out = *in @@ -36078,6 +37237,66 @@ func (in *ProjectMapParameters) DeepCopy() *ProjectMapParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PscDataInitParameters) DeepCopyInto(out *PscDataInitParameters) { + *out = *in + if in.ProducerPort != nil { + in, out := &in.ProducerPort, &out.ProducerPort + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PscDataInitParameters. +func (in *PscDataInitParameters) DeepCopy() *PscDataInitParameters { + if in == nil { + return nil + } + out := new(PscDataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PscDataObservation) DeepCopyInto(out *PscDataObservation) { + *out = *in + if in.ProducerPort != nil { + in, out := &in.ProducerPort, &out.ProducerPort + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PscDataObservation. +func (in *PscDataObservation) DeepCopy() *PscDataObservation { + if in == nil { + return nil + } + out := new(PscDataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PscDataParameters) DeepCopyInto(out *PscDataParameters) { + *out = *in + if in.ProducerPort != nil { + in, out := &in.ProducerPort, &out.ProducerPort + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PscDataParameters. +func (in *PscDataParameters) DeepCopy() *PscDataParameters { + if in == nil { + return nil + } + out := new(PscDataParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *QueryParameterMatchesInitParameters) DeepCopyInto(out *QueryParameterMatchesInitParameters) { *out = *in @@ -36281,6 +37500,13 @@ func (in *RateLimitOptionsInitParameters) DeepCopyInto(out *RateLimitOptionsInit *out = new(string) **out = **in } + if in.EnforceOnKeyConfigs != nil { + in, out := &in.EnforceOnKeyConfigs, &out.EnforceOnKeyConfigs + *out = make([]EnforceOnKeyConfigsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.EnforceOnKeyName != nil { in, out := &in.EnforceOnKeyName, &out.EnforceOnKeyName *out = new(string) @@ -36336,6 +37562,13 @@ func (in *RateLimitOptionsObservation) DeepCopyInto(out *RateLimitOptionsObserva *out = new(string) **out = **in } + if in.EnforceOnKeyConfigs != nil { + in, out := &in.EnforceOnKeyConfigs, &out.EnforceOnKeyConfigs + *out = make([]EnforceOnKeyConfigsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.EnforceOnKeyName != nil { in, out := &in.EnforceOnKeyName, &out.EnforceOnKeyName *out = new(string) @@ -36391,6 +37624,13 @@ func (in *RateLimitOptionsParameters) DeepCopyInto(out *RateLimitOptionsParamete *out = new(string) **out = **in } + if in.EnforceOnKeyConfigs != nil { + in, out := &in.EnforceOnKeyConfigs, &out.EnforceOnKeyConfigs + *out = make([]EnforceOnKeyConfigsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.EnforceOnKeyName != nil { in, out := &in.EnforceOnKeyName, &out.EnforceOnKeyName *out = new(string) @@ -38190,12 +39430,21 @@ func (in *RegionBackendServiceConsistentHashParameters) DeepCopy() *RegionBacken // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegionBackendServiceIapInitParameters) DeepCopyInto(out *RegionBackendServiceIapInitParameters) { *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } if in.Oauth2ClientID != nil { in, out := &in.Oauth2ClientID, &out.Oauth2ClientID *out = new(string) **out = **in } - out.Oauth2ClientSecretSecretRef = in.Oauth2ClientSecretSecretRef + if in.Oauth2ClientSecretSecretRef != nil { + in, out := &in.Oauth2ClientSecretSecretRef, &out.Oauth2ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionBackendServiceIapInitParameters. @@ -38211,6 +39460,11 @@ func (in *RegionBackendServiceIapInitParameters) DeepCopy() *RegionBackendServic // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegionBackendServiceIapObservation) DeepCopyInto(out *RegionBackendServiceIapObservation) { *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } if in.Oauth2ClientID != nil { in, out := &in.Oauth2ClientID, &out.Oauth2ClientID *out = new(string) @@ -38231,12 +39485,21 @@ func (in *RegionBackendServiceIapObservation) DeepCopy() *RegionBackendServiceIa // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegionBackendServiceIapParameters) DeepCopyInto(out *RegionBackendServiceIapParameters) { *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } if in.Oauth2ClientID != nil { in, out := &in.Oauth2ClientID, &out.Oauth2ClientID *out = new(string) **out = **in } - out.Oauth2ClientSecretSecretRef = in.Oauth2ClientSecretSecretRef + if in.Oauth2ClientSecretSecretRef != nil { + in, out := &in.Oauth2ClientSecretSecretRef, &out.Oauth2ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionBackendServiceIapParameters. @@ -38322,6 +39585,11 @@ func (in *RegionBackendServiceInitParameters) DeepCopyInto(out *RegionBackendSer *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.IPAddressSelectionPolicy != nil { + in, out := &in.IPAddressSelectionPolicy, &out.IPAddressSelectionPolicy + *out = new(string) + **out = **in + } if in.Iap != nil { in, out := &in.Iap, &out.Iap *out = new(RegionBackendServiceIapInitParameters) @@ -38372,6 +39640,11 @@ func (in *RegionBackendServiceInitParameters) DeepCopyInto(out *RegionBackendSer *out = new(string) **out = **in } + if in.StrongSessionAffinityCookie != nil { + in, out := &in.StrongSessionAffinityCookie, &out.StrongSessionAffinityCookie + *out = new(RegionBackendServiceStrongSessionAffinityCookieInitParameters) + (*in).DeepCopyInto(*out) + } if in.TimeoutSec != nil { in, out := &in.TimeoutSec, &out.TimeoutSec *out = new(float64) @@ -38577,6 +39850,11 @@ func (in *RegionBackendServiceObservation) DeepCopyInto(out *RegionBackendServic *out = new(string) **out = **in } + if in.IPAddressSelectionPolicy != nil { + in, out := &in.IPAddressSelectionPolicy, &out.IPAddressSelectionPolicy + *out = new(string) + **out = **in + } if in.Iap != nil { in, out := &in.Iap, &out.Iap *out = new(RegionBackendServiceIapObservation) @@ -38637,6 +39915,11 @@ func (in *RegionBackendServiceObservation) DeepCopyInto(out *RegionBackendServic *out = new(string) **out = **in } + if in.StrongSessionAffinityCookie != nil { + in, out := &in.StrongSessionAffinityCookie, &out.StrongSessionAffinityCookie + *out = new(RegionBackendServiceStrongSessionAffinityCookieObservation) + (*in).DeepCopyInto(*out) + } if in.TimeoutSec != nil { in, out := &in.TimeoutSec, &out.TimeoutSec *out = new(float64) @@ -38937,6 +40220,11 @@ func (in *RegionBackendServiceParameters) DeepCopyInto(out *RegionBackendService *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.IPAddressSelectionPolicy != nil { + in, out := &in.IPAddressSelectionPolicy, &out.IPAddressSelectionPolicy + *out = new(string) + **out = **in + } if in.Iap != nil { in, out := &in.Iap, &out.Iap *out = new(RegionBackendServiceIapParameters) @@ -38992,6 +40280,11 @@ func (in *RegionBackendServiceParameters) DeepCopyInto(out *RegionBackendService *out = new(string) **out = **in } + if in.StrongSessionAffinityCookie != nil { + in, out := &in.StrongSessionAffinityCookie, &out.StrongSessionAffinityCookie + *out = new(RegionBackendServiceStrongSessionAffinityCookieParameters) + (*in).DeepCopyInto(*out) + } if in.TimeoutSec != nil { in, out := &in.TimeoutSec, &out.TimeoutSec *out = new(float64) @@ -39044,6 +40337,171 @@ func (in *RegionBackendServiceStatus) DeepCopy() *RegionBackendServiceStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionBackendServiceStrongSessionAffinityCookieInitParameters) DeepCopyInto(out *RegionBackendServiceStrongSessionAffinityCookieInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(RegionBackendServiceStrongSessionAffinityCookieTTLInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionBackendServiceStrongSessionAffinityCookieInitParameters. +func (in *RegionBackendServiceStrongSessionAffinityCookieInitParameters) DeepCopy() *RegionBackendServiceStrongSessionAffinityCookieInitParameters { + if in == nil { + return nil + } + out := new(RegionBackendServiceStrongSessionAffinityCookieInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionBackendServiceStrongSessionAffinityCookieObservation) DeepCopyInto(out *RegionBackendServiceStrongSessionAffinityCookieObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(RegionBackendServiceStrongSessionAffinityCookieTTLObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionBackendServiceStrongSessionAffinityCookieObservation. +func (in *RegionBackendServiceStrongSessionAffinityCookieObservation) DeepCopy() *RegionBackendServiceStrongSessionAffinityCookieObservation { + if in == nil { + return nil + } + out := new(RegionBackendServiceStrongSessionAffinityCookieObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionBackendServiceStrongSessionAffinityCookieParameters) DeepCopyInto(out *RegionBackendServiceStrongSessionAffinityCookieParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(RegionBackendServiceStrongSessionAffinityCookieTTLParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionBackendServiceStrongSessionAffinityCookieParameters. +func (in *RegionBackendServiceStrongSessionAffinityCookieParameters) DeepCopy() *RegionBackendServiceStrongSessionAffinityCookieParameters { + if in == nil { + return nil + } + out := new(RegionBackendServiceStrongSessionAffinityCookieParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionBackendServiceStrongSessionAffinityCookieTTLInitParameters) DeepCopyInto(out *RegionBackendServiceStrongSessionAffinityCookieTTLInitParameters) { + *out = *in + if in.Nanos != nil { + in, out := &in.Nanos, &out.Nanos + *out = new(float64) + **out = **in + } + if in.Seconds != nil { + in, out := &in.Seconds, &out.Seconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionBackendServiceStrongSessionAffinityCookieTTLInitParameters. +func (in *RegionBackendServiceStrongSessionAffinityCookieTTLInitParameters) DeepCopy() *RegionBackendServiceStrongSessionAffinityCookieTTLInitParameters { + if in == nil { + return nil + } + out := new(RegionBackendServiceStrongSessionAffinityCookieTTLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionBackendServiceStrongSessionAffinityCookieTTLObservation) DeepCopyInto(out *RegionBackendServiceStrongSessionAffinityCookieTTLObservation) { + *out = *in + if in.Nanos != nil { + in, out := &in.Nanos, &out.Nanos + *out = new(float64) + **out = **in + } + if in.Seconds != nil { + in, out := &in.Seconds, &out.Seconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionBackendServiceStrongSessionAffinityCookieTTLObservation. +func (in *RegionBackendServiceStrongSessionAffinityCookieTTLObservation) DeepCopy() *RegionBackendServiceStrongSessionAffinityCookieTTLObservation { + if in == nil { + return nil + } + out := new(RegionBackendServiceStrongSessionAffinityCookieTTLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionBackendServiceStrongSessionAffinityCookieTTLParameters) DeepCopyInto(out *RegionBackendServiceStrongSessionAffinityCookieTTLParameters) { + *out = *in + if in.Nanos != nil { + in, out := &in.Nanos, &out.Nanos + *out = new(float64) + **out = **in + } + if in.Seconds != nil { + in, out := &in.Seconds, &out.Seconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionBackendServiceStrongSessionAffinityCookieTTLParameters. +func (in *RegionBackendServiceStrongSessionAffinityCookieTTLParameters) DeepCopy() *RegionBackendServiceStrongSessionAffinityCookieTTLParameters { + if in == nil { + return nil + } + out := new(RegionBackendServiceStrongSessionAffinityCookieTTLParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegionDisk) DeepCopyInto(out *RegionDisk) { *out = *in @@ -40997,6 +42455,11 @@ func (in *RegionHealthCheckObservation) DeepCopyInto(out *RegionHealthCheckObser *out = new(RegionHealthCheckHTTPSHealthCheckObservation) (*in).DeepCopyInto(*out) } + if in.HealthCheckID != nil { + in, out := &in.HealthCheckID, &out.HealthCheckID + *out = new(float64) + **out = **in + } if in.HealthyThreshold != nil { in, out := &in.HealthyThreshold, &out.HealthyThreshold *out = new(float64) @@ -41761,6 +43224,11 @@ func (in *RegionInstanceGroupManagerInitParameters) DeepCopyInto(out *RegionInst } } } + if in.InstanceFlexibilityPolicy != nil { + in, out := &in.InstanceFlexibilityPolicy, &out.InstanceFlexibilityPolicy + *out = new(InstanceFlexibilityPolicyInitParameters) + (*in).DeepCopyInto(*out) + } if in.InstanceLifecyclePolicy != nil { in, out := &in.InstanceLifecyclePolicy, &out.InstanceLifecyclePolicy *out = new(RegionInstanceGroupManagerInstanceLifecyclePolicyInitParameters) @@ -41793,6 +43261,11 @@ func (in *RegionInstanceGroupManagerInitParameters) DeepCopyInto(out *RegionInst *out = new(string) **out = **in } + if in.StandbyPolicy != nil { + in, out := &in.StandbyPolicy, &out.StandbyPolicy + *out = new(RegionInstanceGroupManagerStandbyPolicyInitParameters) + (*in).DeepCopyInto(*out) + } if in.StatefulDisk != nil { in, out := &in.StatefulDisk, &out.StatefulDisk *out = make([]RegionInstanceGroupManagerStatefulDiskInitParameters, len(*in)) @@ -41842,6 +43315,16 @@ func (in *RegionInstanceGroupManagerInitParameters) DeepCopyInto(out *RegionInst *out = new(float64) **out = **in } + if in.TargetStoppedSize != nil { + in, out := &in.TargetStoppedSize, &out.TargetStoppedSize + *out = new(float64) + **out = **in + } + if in.TargetSuspendedSize != nil { + in, out := &in.TargetSuspendedSize, &out.TargetSuspendedSize + *out = new(float64) + **out = **in + } if in.UpdatePolicy != nil { in, out := &in.UpdatePolicy, &out.UpdatePolicy *out = new(RegionInstanceGroupManagerUpdatePolicyInitParameters) @@ -42112,11 +43595,21 @@ func (in *RegionInstanceGroupManagerObservation) DeepCopyInto(out *RegionInstanc *out = new(string) **out = **in } + if in.InstanceFlexibilityPolicy != nil { + in, out := &in.InstanceFlexibilityPolicy, &out.InstanceFlexibilityPolicy + *out = new(InstanceFlexibilityPolicyObservation) + (*in).DeepCopyInto(*out) + } if in.InstanceGroup != nil { in, out := &in.InstanceGroup, &out.InstanceGroup *out = new(string) **out = **in } + if in.InstanceGroupManagerID != nil { + in, out := &in.InstanceGroupManagerID, &out.InstanceGroupManagerID + *out = new(float64) + **out = **in + } if in.InstanceLifecyclePolicy != nil { in, out := &in.InstanceLifecyclePolicy, &out.InstanceLifecyclePolicy *out = new(RegionInstanceGroupManagerInstanceLifecyclePolicyObservation) @@ -42154,6 +43647,11 @@ func (in *RegionInstanceGroupManagerObservation) DeepCopyInto(out *RegionInstanc *out = new(string) **out = **in } + if in.StandbyPolicy != nil { + in, out := &in.StandbyPolicy, &out.StandbyPolicy + *out = new(RegionInstanceGroupManagerStandbyPolicyObservation) + (*in).DeepCopyInto(*out) + } if in.StatefulDisk != nil { in, out := &in.StatefulDisk, &out.StatefulDisk *out = make([]RegionInstanceGroupManagerStatefulDiskObservation, len(*in)) @@ -42198,6 +43696,16 @@ func (in *RegionInstanceGroupManagerObservation) DeepCopyInto(out *RegionInstanc *out = new(float64) **out = **in } + if in.TargetStoppedSize != nil { + in, out := &in.TargetStoppedSize, &out.TargetStoppedSize + *out = new(float64) + **out = **in + } + if in.TargetSuspendedSize != nil { + in, out := &in.TargetSuspendedSize, &out.TargetSuspendedSize + *out = new(float64) + **out = **in + } if in.UpdatePolicy != nil { in, out := &in.UpdatePolicy, &out.UpdatePolicy *out = new(RegionInstanceGroupManagerUpdatePolicyObservation) @@ -42271,6 +43779,11 @@ func (in *RegionInstanceGroupManagerParameters) DeepCopyInto(out *RegionInstance } } } + if in.InstanceFlexibilityPolicy != nil { + in, out := &in.InstanceFlexibilityPolicy, &out.InstanceFlexibilityPolicy + *out = new(InstanceFlexibilityPolicyParameters) + (*in).DeepCopyInto(*out) + } if in.InstanceLifecyclePolicy != nil { in, out := &in.InstanceLifecyclePolicy, &out.InstanceLifecyclePolicy *out = new(RegionInstanceGroupManagerInstanceLifecyclePolicyParameters) @@ -42303,6 +43816,11 @@ func (in *RegionInstanceGroupManagerParameters) DeepCopyInto(out *RegionInstance *out = new(string) **out = **in } + if in.StandbyPolicy != nil { + in, out := &in.StandbyPolicy, &out.StandbyPolicy + *out = new(RegionInstanceGroupManagerStandbyPolicyParameters) + (*in).DeepCopyInto(*out) + } if in.StatefulDisk != nil { in, out := &in.StatefulDisk, &out.StatefulDisk *out = make([]RegionInstanceGroupManagerStatefulDiskParameters, len(*in)) @@ -42352,6 +43870,16 @@ func (in *RegionInstanceGroupManagerParameters) DeepCopyInto(out *RegionInstance *out = new(float64) **out = **in } + if in.TargetStoppedSize != nil { + in, out := &in.TargetStoppedSize, &out.TargetStoppedSize + *out = new(float64) + **out = **in + } + if in.TargetSuspendedSize != nil { + in, out := &in.TargetSuspendedSize, &out.TargetSuspendedSize + *out = new(float64) + **out = **in + } if in.UpdatePolicy != nil { in, out := &in.UpdatePolicy, &out.UpdatePolicy *out = new(RegionInstanceGroupManagerUpdatePolicyParameters) @@ -42404,6 +43932,81 @@ func (in *RegionInstanceGroupManagerSpec) DeepCopy() *RegionInstanceGroupManager return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionInstanceGroupManagerStandbyPolicyInitParameters) DeepCopyInto(out *RegionInstanceGroupManagerStandbyPolicyInitParameters) { + *out = *in + if in.InitialDelaySec != nil { + in, out := &in.InitialDelaySec, &out.InitialDelaySec + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionInstanceGroupManagerStandbyPolicyInitParameters. +func (in *RegionInstanceGroupManagerStandbyPolicyInitParameters) DeepCopy() *RegionInstanceGroupManagerStandbyPolicyInitParameters { + if in == nil { + return nil + } + out := new(RegionInstanceGroupManagerStandbyPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionInstanceGroupManagerStandbyPolicyObservation) DeepCopyInto(out *RegionInstanceGroupManagerStandbyPolicyObservation) { + *out = *in + if in.InitialDelaySec != nil { + in, out := &in.InitialDelaySec, &out.InitialDelaySec + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionInstanceGroupManagerStandbyPolicyObservation. +func (in *RegionInstanceGroupManagerStandbyPolicyObservation) DeepCopy() *RegionInstanceGroupManagerStandbyPolicyObservation { + if in == nil { + return nil + } + out := new(RegionInstanceGroupManagerStandbyPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionInstanceGroupManagerStandbyPolicyParameters) DeepCopyInto(out *RegionInstanceGroupManagerStandbyPolicyParameters) { + *out = *in + if in.InitialDelaySec != nil { + in, out := &in.InitialDelaySec, &out.InitialDelaySec + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionInstanceGroupManagerStandbyPolicyParameters. +func (in *RegionInstanceGroupManagerStandbyPolicyParameters) DeepCopy() *RegionInstanceGroupManagerStandbyPolicyParameters { + if in == nil { + return nil + } + out := new(RegionInstanceGroupManagerStandbyPolicyParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegionInstanceGroupManagerStatefulDiskInitParameters) DeepCopyInto(out *RegionInstanceGroupManagerStatefulDiskInitParameters) { *out = *in @@ -43137,6 +44740,11 @@ func (in *RegionNetworkEndpointGroupInitParameters) DeepCopyInto(out *RegionNetw *out = new(string) **out = **in } + if in.PscData != nil { + in, out := &in.PscData, &out.PscData + *out = new(PscDataInitParameters) + (*in).DeepCopyInto(*out) + } if in.PscTargetService != nil { in, out := &in.PscTargetService, &out.PscTargetService *out = new(string) @@ -43254,6 +44862,11 @@ func (in *RegionNetworkEndpointGroupObservation) DeepCopyInto(out *RegionNetwork *out = new(string) **out = **in } + if in.PscData != nil { + in, out := &in.PscData, &out.PscData + *out = new(PscDataObservation) + (*in).DeepCopyInto(*out) + } if in.PscTargetService != nil { in, out := &in.PscTargetService, &out.PscTargetService *out = new(string) @@ -43334,6 +44947,11 @@ func (in *RegionNetworkEndpointGroupParameters) DeepCopyInto(out *RegionNetworkE *out = new(string) **out = **in } + if in.PscData != nil { + in, out := &in.PscData, &out.PscData + *out = new(PscDataParameters) + (*in).DeepCopyInto(*out) + } if in.PscTargetService != nil { in, out := &in.PscTargetService, &out.PscTargetService *out = new(string) @@ -44348,6 +45966,156 @@ func (in *RegionURLMapStatus) DeepCopy() *RegionURLMapStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestCookieInitParameters) DeepCopyInto(out *RequestCookieInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestCookieInitParameters. +func (in *RequestCookieInitParameters) DeepCopy() *RequestCookieInitParameters { + if in == nil { + return nil + } + out := new(RequestCookieInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestCookieObservation) DeepCopyInto(out *RequestCookieObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestCookieObservation. +func (in *RequestCookieObservation) DeepCopy() *RequestCookieObservation { + if in == nil { + return nil + } + out := new(RequestCookieObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestCookieParameters) DeepCopyInto(out *RequestCookieParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestCookieParameters. +func (in *RequestCookieParameters) DeepCopy() *RequestCookieParameters { + if in == nil { + return nil + } + out := new(RequestCookieParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderInitParameters) DeepCopyInto(out *RequestHeaderInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderInitParameters. +func (in *RequestHeaderInitParameters) DeepCopy() *RequestHeaderInitParameters { + if in == nil { + return nil + } + out := new(RequestHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderObservation) DeepCopyInto(out *RequestHeaderObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderObservation. +func (in *RequestHeaderObservation) DeepCopy() *RequestHeaderObservation { + if in == nil { + return nil + } + out := new(RequestHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderParameters) DeepCopyInto(out *RequestHeaderParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderParameters. +func (in *RequestHeaderParameters) DeepCopy() *RequestHeaderParameters { + if in == nil { + return nil + } + out := new(RequestHeaderParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RequestHeadersToAddInitParameters) DeepCopyInto(out *RequestHeadersToAddInitParameters) { *out = *in @@ -44593,6 +46361,156 @@ func (in *RequestMirrorPolicyParameters) DeepCopy() *RequestMirrorPolicyParamete return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestQueryParamInitParameters) DeepCopyInto(out *RequestQueryParamInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestQueryParamInitParameters. +func (in *RequestQueryParamInitParameters) DeepCopy() *RequestQueryParamInitParameters { + if in == nil { + return nil + } + out := new(RequestQueryParamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestQueryParamObservation) DeepCopyInto(out *RequestQueryParamObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestQueryParamObservation. +func (in *RequestQueryParamObservation) DeepCopy() *RequestQueryParamObservation { + if in == nil { + return nil + } + out := new(RequestQueryParamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestQueryParamParameters) DeepCopyInto(out *RequestQueryParamParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestQueryParamParameters. +func (in *RequestQueryParamParameters) DeepCopy() *RequestQueryParamParameters { + if in == nil { + return nil + } + out := new(RequestQueryParamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIInitParameters) DeepCopyInto(out *RequestURIInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIInitParameters. +func (in *RequestURIInitParameters) DeepCopy() *RequestURIInitParameters { + if in == nil { + return nil + } + out := new(RequestURIInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIObservation) DeepCopyInto(out *RequestURIObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIObservation. +func (in *RequestURIObservation) DeepCopy() *RequestURIObservation { + if in == nil { + return nil + } + out := new(RequestURIObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIParameters) DeepCopyInto(out *RequestURIParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIParameters. +func (in *RequestURIParameters) DeepCopy() *RequestURIParameters { + if in == nil { + return nil + } + out := new(RequestURIParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Reservation) DeepCopyInto(out *Reservation) { *out = *in @@ -49951,6 +51869,17 @@ func (in *RouterNATInitParameters) DeepCopyInto(out *RouterNATInitParameters) { *out = new(float64) **out = **in } + if in.InitialNATIps != nil { + in, out := &in.InitialNATIps, &out.InitialNATIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.LogConfig != nil { in, out := &in.LogConfig, &out.LogConfig *out = new(RouterNATLogConfigInitParameters) @@ -50040,125 +51969,283 @@ func (in *RouterNATInitParameters) DeepCopyInto(out *RouterNATInitParameters) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATInitParameters. -func (in *RouterNATInitParameters) DeepCopy() *RouterNATInitParameters { - if in == nil { - return nil - } - out := new(RouterNATInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouterNATList) DeepCopyInto(out *RouterNATList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RouterNAT, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATList. -func (in *RouterNATList) DeepCopy() *RouterNATList { - if in == nil { - return nil - } - out := new(RouterNATList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RouterNATList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouterNATLogConfigInitParameters) DeepCopyInto(out *RouterNATLogConfigInitParameters) { - *out = *in - if in.Enable != nil { - in, out := &in.Enable, &out.Enable - *out = new(bool) - **out = **in - } - if in.Filter != nil { - in, out := &in.Filter, &out.Filter - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATLogConfigInitParameters. -func (in *RouterNATLogConfigInitParameters) DeepCopy() *RouterNATLogConfigInitParameters { - if in == nil { - return nil - } - out := new(RouterNATLogConfigInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouterNATLogConfigObservation) DeepCopyInto(out *RouterNATLogConfigObservation) { - *out = *in - if in.Enable != nil { - in, out := &in.Enable, &out.Enable - *out = new(bool) - **out = **in - } - if in.Filter != nil { - in, out := &in.Filter, &out.Filter - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATLogConfigObservation. -func (in *RouterNATLogConfigObservation) DeepCopy() *RouterNATLogConfigObservation { - if in == nil { - return nil - } - out := new(RouterNATLogConfigObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouterNATLogConfigParameters) DeepCopyInto(out *RouterNATLogConfigParameters) { - *out = *in - if in.Enable != nil { - in, out := &in.Enable, &out.Enable - *out = new(bool) - **out = **in - } - if in.Filter != nil { - in, out := &in.Filter, &out.Filter - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATLogConfigParameters. -func (in *RouterNATLogConfigParameters) DeepCopy() *RouterNATLogConfigParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATInitParameters. +func (in *RouterNATInitParameters) DeepCopy() *RouterNATInitParameters { + if in == nil { + return nil + } + out := new(RouterNATInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouterNATList) DeepCopyInto(out *RouterNATList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RouterNAT, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATList. +func (in *RouterNATList) DeepCopy() *RouterNATList { + if in == nil { + return nil + } + out := new(RouterNATList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RouterNATList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouterNATLogConfigInitParameters) DeepCopyInto(out *RouterNATLogConfigInitParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATLogConfigInitParameters. +func (in *RouterNATLogConfigInitParameters) DeepCopy() *RouterNATLogConfigInitParameters { + if in == nil { + return nil + } + out := new(RouterNATLogConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouterNATLogConfigObservation) DeepCopyInto(out *RouterNATLogConfigObservation) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATLogConfigObservation. +func (in *RouterNATLogConfigObservation) DeepCopy() *RouterNATLogConfigObservation { + if in == nil { + return nil + } + out := new(RouterNATLogConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouterNATLogConfigParameters) DeepCopyInto(out *RouterNATLogConfigParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATLogConfigParameters. +func (in *RouterNATLogConfigParameters) DeepCopy() *RouterNATLogConfigParameters { + if in == nil { + return nil + } + out := new(RouterNATLogConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouterNATObservation) DeepCopyInto(out *RouterNATObservation) { + *out = *in + if in.AutoNetworkTier != nil { + in, out := &in.AutoNetworkTier, &out.AutoNetworkTier + *out = new(string) + **out = **in + } + if in.DrainNATIps != nil { + in, out := &in.DrainNATIps, &out.DrainNATIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableDynamicPortAllocation != nil { + in, out := &in.EnableDynamicPortAllocation, &out.EnableDynamicPortAllocation + *out = new(bool) + **out = **in + } + if in.EnableEndpointIndependentMapping != nil { + in, out := &in.EnableEndpointIndependentMapping, &out.EnableEndpointIndependentMapping + *out = new(bool) + **out = **in + } + if in.EndpointTypes != nil { + in, out := &in.EndpointTypes, &out.EndpointTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IcmpIdleTimeoutSec != nil { + in, out := &in.IcmpIdleTimeoutSec, &out.IcmpIdleTimeoutSec + *out = new(float64) + **out = **in + } + if in.InitialNATIps != nil { + in, out := &in.InitialNATIps, &out.InitialNATIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LogConfig != nil { + in, out := &in.LogConfig, &out.LogConfig + *out = new(RouterNATLogConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.MaxPortsPerVM != nil { + in, out := &in.MaxPortsPerVM, &out.MaxPortsPerVM + *out = new(float64) + **out = **in + } + if in.MinPortsPerVM != nil { + in, out := &in.MinPortsPerVM, &out.MinPortsPerVM + *out = new(float64) + **out = **in + } + if in.NATIPAllocateOption != nil { + in, out := &in.NATIPAllocateOption, &out.NATIPAllocateOption + *out = new(string) + **out = **in + } + if in.NATIps != nil { + in, out := &in.NATIps, &out.NATIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Router != nil { + in, out := &in.Router, &out.Router + *out = new(string) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceSubnetworkIPRangesToNAT != nil { + in, out := &in.SourceSubnetworkIPRangesToNAT, &out.SourceSubnetworkIPRangesToNAT + *out = new(string) + **out = **in + } + if in.Subnetwork != nil { + in, out := &in.Subnetwork, &out.Subnetwork + *out = make([]SubnetworkObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TCPEstablishedIdleTimeoutSec != nil { + in, out := &in.TCPEstablishedIdleTimeoutSec, &out.TCPEstablishedIdleTimeoutSec + *out = new(float64) + **out = **in + } + if in.TCPTimeWaitTimeoutSec != nil { + in, out := &in.TCPTimeWaitTimeoutSec, &out.TCPTimeWaitTimeoutSec + *out = new(float64) + **out = **in + } + if in.TCPTransitoryIdleTimeoutSec != nil { + in, out := &in.TCPTransitoryIdleTimeoutSec, &out.TCPTransitoryIdleTimeoutSec + *out = new(float64) + **out = **in + } + if in.UDPIdleTimeoutSec != nil { + in, out := &in.UDPIdleTimeoutSec, &out.UDPIdleTimeoutSec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATObservation. +func (in *RouterNATObservation) DeepCopy() *RouterNATObservation { if in == nil { return nil } - out := new(RouterNATLogConfigParameters) + out := new(RouterNATObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouterNATObservation) DeepCopyInto(out *RouterNATObservation) { +func (in *RouterNATParameters) DeepCopyInto(out *RouterNATParameters) { *out = *in if in.AutoNetworkTier != nil { in, out := &in.AutoNetworkTier, &out.AutoNetworkTier @@ -50197,144 +52284,13 @@ func (in *RouterNATObservation) DeepCopyInto(out *RouterNATObservation) { } } } - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } if in.IcmpIdleTimeoutSec != nil { in, out := &in.IcmpIdleTimeoutSec, &out.IcmpIdleTimeoutSec *out = new(float64) **out = **in } - if in.LogConfig != nil { - in, out := &in.LogConfig, &out.LogConfig - *out = new(RouterNATLogConfigObservation) - (*in).DeepCopyInto(*out) - } - if in.MaxPortsPerVM != nil { - in, out := &in.MaxPortsPerVM, &out.MaxPortsPerVM - *out = new(float64) - **out = **in - } - if in.MinPortsPerVM != nil { - in, out := &in.MinPortsPerVM, &out.MinPortsPerVM - *out = new(float64) - **out = **in - } - if in.NATIPAllocateOption != nil { - in, out := &in.NATIPAllocateOption, &out.NATIPAllocateOption - *out = new(string) - **out = **in - } - if in.NATIps != nil { - in, out := &in.NATIps, &out.NATIps - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } - } - if in.Project != nil { - in, out := &in.Project, &out.Project - *out = new(string) - **out = **in - } - if in.Region != nil { - in, out := &in.Region, &out.Region - *out = new(string) - **out = **in - } - if in.Router != nil { - in, out := &in.Router, &out.Router - *out = new(string) - **out = **in - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RulesObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SourceSubnetworkIPRangesToNAT != nil { - in, out := &in.SourceSubnetworkIPRangesToNAT, &out.SourceSubnetworkIPRangesToNAT - *out = new(string) - **out = **in - } - if in.Subnetwork != nil { - in, out := &in.Subnetwork, &out.Subnetwork - *out = make([]SubnetworkObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TCPEstablishedIdleTimeoutSec != nil { - in, out := &in.TCPEstablishedIdleTimeoutSec, &out.TCPEstablishedIdleTimeoutSec - *out = new(float64) - **out = **in - } - if in.TCPTimeWaitTimeoutSec != nil { - in, out := &in.TCPTimeWaitTimeoutSec, &out.TCPTimeWaitTimeoutSec - *out = new(float64) - **out = **in - } - if in.TCPTransitoryIdleTimeoutSec != nil { - in, out := &in.TCPTransitoryIdleTimeoutSec, &out.TCPTransitoryIdleTimeoutSec - *out = new(float64) - **out = **in - } - if in.UDPIdleTimeoutSec != nil { - in, out := &in.UDPIdleTimeoutSec, &out.UDPIdleTimeoutSec - *out = new(float64) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterNATObservation. -func (in *RouterNATObservation) DeepCopy() *RouterNATObservation { - if in == nil { - return nil - } - out := new(RouterNATObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouterNATParameters) DeepCopyInto(out *RouterNATParameters) { - *out = *in - if in.AutoNetworkTier != nil { - in, out := &in.AutoNetworkTier, &out.AutoNetworkTier - *out = new(string) - **out = **in - } - if in.DrainNATIps != nil { - in, out := &in.DrainNATIps, &out.DrainNATIps - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } - } - if in.EnableDynamicPortAllocation != nil { - in, out := &in.EnableDynamicPortAllocation, &out.EnableDynamicPortAllocation - *out = new(bool) - **out = **in - } - if in.EnableEndpointIndependentMapping != nil { - in, out := &in.EnableEndpointIndependentMapping, &out.EnableEndpointIndependentMapping - *out = new(bool) - **out = **in - } - if in.EndpointTypes != nil { - in, out := &in.EndpointTypes, &out.EndpointTypes + if in.InitialNATIps != nil { + in, out := &in.InitialNATIps, &out.InitialNATIps *out = make([]*string, len(*in)) for i := range *in { if (*in)[i] != nil { @@ -50344,11 +52300,6 @@ func (in *RouterNATParameters) DeepCopyInto(out *RouterNATParameters) { } } } - if in.IcmpIdleTimeoutSec != nil { - in, out := &in.IcmpIdleTimeoutSec, &out.IcmpIdleTimeoutSec - *out = new(float64) - **out = **in - } if in.LogConfig != nil { in, out := &in.LogConfig, &out.LogConfig *out = new(RouterNATLogConfigParameters) @@ -51426,6 +53377,11 @@ func (in *RuleInitParameters) DeepCopyInto(out *RuleInitParameters) { *out = new(RuleMatchInitParameters) (*in).DeepCopyInto(*out) } + if in.PreconfiguredWafConfig != nil { + in, out := &in.PreconfiguredWafConfig, &out.PreconfiguredWafConfig + *out = new(PreconfiguredWafConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.Preview != nil { in, out := &in.Preview, &out.Preview *out = new(bool) @@ -51586,6 +53542,11 @@ func (in *RuleObservation) DeepCopyInto(out *RuleObservation) { *out = new(RuleMatchObservation) (*in).DeepCopyInto(*out) } + if in.PreconfiguredWafConfig != nil { + in, out := &in.PreconfiguredWafConfig, &out.PreconfiguredWafConfig + *out = new(PreconfiguredWafConfigObservation) + (*in).DeepCopyInto(*out) + } if in.Preview != nil { in, out := &in.Preview, &out.Preview *out = new(bool) @@ -51641,6 +53602,11 @@ func (in *RuleParameters) DeepCopyInto(out *RuleParameters) { *out = new(RuleMatchParameters) (*in).DeepCopyInto(*out) } + if in.PreconfiguredWafConfig != nil { + in, out := &in.PreconfiguredWafConfig, &out.PreconfiguredWafConfig + *out = new(PreconfiguredWafConfigParameters) + (*in).DeepCopyInto(*out) + } if in.Preview != nil { in, out := &in.Preview, &out.Preview *out = new(bool) @@ -52311,6 +54277,11 @@ func (in *SchedulingInitParameters) DeepCopyInto(out *SchedulingInitParameters) *out = new(bool) **out = **in } + if in.AvailabilityDomain != nil { + in, out := &in.AvailabilityDomain, &out.AvailabilityDomain + *out = new(float64) + **out = **in + } if in.InstanceTerminationAction != nil { in, out := &in.InstanceTerminationAction, &out.InstanceTerminationAction *out = new(string) @@ -52636,6 +54607,11 @@ func (in *SchedulingObservation) DeepCopyInto(out *SchedulingObservation) { *out = new(bool) **out = **in } + if in.AvailabilityDomain != nil { + in, out := &in.AvailabilityDomain, &out.AvailabilityDomain + *out = new(float64) + **out = **in + } if in.InstanceTerminationAction != nil { in, out := &in.InstanceTerminationAction, &out.InstanceTerminationAction *out = new(string) @@ -52763,6 +54739,11 @@ func (in *SchedulingParameters) DeepCopyInto(out *SchedulingParameters) { *out = new(bool) **out = **in } + if in.AvailabilityDomain != nil { + in, out := &in.AvailabilityDomain, &out.AvailabilityDomain + *out = new(float64) + **out = **in + } if in.InstanceTerminationAction != nil { in, out := &in.InstanceTerminationAction, &out.InstanceTerminationAction *out = new(string) @@ -52925,6 +54906,11 @@ func (in *SecondaryIPRangeInitParameters) DeepCopyInto(out *SecondaryIPRangeInit *out = new(string) **out = **in } + if in.ReservedInternalRange != nil { + in, out := &in.ReservedInternalRange, &out.ReservedInternalRange + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryIPRangeInitParameters. @@ -52950,6 +54936,11 @@ func (in *SecondaryIPRangeObservation) DeepCopyInto(out *SecondaryIPRangeObserva *out = new(string) **out = **in } + if in.ReservedInternalRange != nil { + in, out := &in.ReservedInternalRange, &out.ReservedInternalRange + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryIPRangeObservation. @@ -52975,6 +54966,11 @@ func (in *SecondaryIPRangeParameters) DeepCopyInto(out *SecondaryIPRangeParamete *out = new(string) **out = **in } + if in.ReservedInternalRange != nil { + in, out := &in.ReservedInternalRange, &out.ReservedInternalRange + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryIPRangeParameters. @@ -55280,6 +57276,81 @@ func (in *SpecificReservationParameters) DeepCopy() *SpecificReservationParamete return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandbyPolicyInitParameters) DeepCopyInto(out *StandbyPolicyInitParameters) { + *out = *in + if in.InitialDelaySec != nil { + in, out := &in.InitialDelaySec, &out.InitialDelaySec + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandbyPolicyInitParameters. +func (in *StandbyPolicyInitParameters) DeepCopy() *StandbyPolicyInitParameters { + if in == nil { + return nil + } + out := new(StandbyPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandbyPolicyObservation) DeepCopyInto(out *StandbyPolicyObservation) { + *out = *in + if in.InitialDelaySec != nil { + in, out := &in.InitialDelaySec, &out.InitialDelaySec + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandbyPolicyObservation. +func (in *StandbyPolicyObservation) DeepCopy() *StandbyPolicyObservation { + if in == nil { + return nil + } + out := new(StandbyPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandbyPolicyParameters) DeepCopyInto(out *StandbyPolicyParameters) { + *out = *in + if in.InitialDelaySec != nil { + in, out := &in.InitialDelaySec, &out.InitialDelaySec + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandbyPolicyParameters. +func (in *StandbyPolicyParameters) DeepCopy() *StandbyPolicyParameters { + if in == nil { + return nil + } + out := new(StandbyPolicyParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulDiskInitParameters) DeepCopyInto(out *StatefulDiskInitParameters) { *out = *in @@ -55845,6 +57916,171 @@ func (in *StatusVersionTargetParameters) DeepCopy() *StatusVersionTargetParamete return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrongSessionAffinityCookieInitParameters) DeepCopyInto(out *StrongSessionAffinityCookieInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(StrongSessionAffinityCookieTTLInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrongSessionAffinityCookieInitParameters. +func (in *StrongSessionAffinityCookieInitParameters) DeepCopy() *StrongSessionAffinityCookieInitParameters { + if in == nil { + return nil + } + out := new(StrongSessionAffinityCookieInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrongSessionAffinityCookieObservation) DeepCopyInto(out *StrongSessionAffinityCookieObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(StrongSessionAffinityCookieTTLObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrongSessionAffinityCookieObservation. +func (in *StrongSessionAffinityCookieObservation) DeepCopy() *StrongSessionAffinityCookieObservation { + if in == nil { + return nil + } + out := new(StrongSessionAffinityCookieObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrongSessionAffinityCookieParameters) DeepCopyInto(out *StrongSessionAffinityCookieParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(StrongSessionAffinityCookieTTLParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrongSessionAffinityCookieParameters. +func (in *StrongSessionAffinityCookieParameters) DeepCopy() *StrongSessionAffinityCookieParameters { + if in == nil { + return nil + } + out := new(StrongSessionAffinityCookieParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrongSessionAffinityCookieTTLInitParameters) DeepCopyInto(out *StrongSessionAffinityCookieTTLInitParameters) { + *out = *in + if in.Nanos != nil { + in, out := &in.Nanos, &out.Nanos + *out = new(float64) + **out = **in + } + if in.Seconds != nil { + in, out := &in.Seconds, &out.Seconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrongSessionAffinityCookieTTLInitParameters. +func (in *StrongSessionAffinityCookieTTLInitParameters) DeepCopy() *StrongSessionAffinityCookieTTLInitParameters { + if in == nil { + return nil + } + out := new(StrongSessionAffinityCookieTTLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrongSessionAffinityCookieTTLObservation) DeepCopyInto(out *StrongSessionAffinityCookieTTLObservation) { + *out = *in + if in.Nanos != nil { + in, out := &in.Nanos, &out.Nanos + *out = new(float64) + **out = **in + } + if in.Seconds != nil { + in, out := &in.Seconds, &out.Seconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrongSessionAffinityCookieTTLObservation. +func (in *StrongSessionAffinityCookieTTLObservation) DeepCopy() *StrongSessionAffinityCookieTTLObservation { + if in == nil { + return nil + } + out := new(StrongSessionAffinityCookieTTLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrongSessionAffinityCookieTTLParameters) DeepCopyInto(out *StrongSessionAffinityCookieTTLParameters) { + *out = *in + if in.Nanos != nil { + in, out := &in.Nanos, &out.Nanos + *out = new(float64) + **out = **in + } + if in.Seconds != nil { + in, out := &in.Seconds, &out.Seconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrongSessionAffinityCookieTTLParameters. +func (in *StrongSessionAffinityCookieTTLParameters) DeepCopy() *StrongSessionAffinityCookieTTLParameters { + if in == nil { + return nil + } + out := new(StrongSessionAffinityCookieTTLParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Subnetwork) DeepCopyInto(out *Subnetwork) { *out = *in @@ -56336,6 +58572,11 @@ func (in *SubnetworkInitParameters_2) DeepCopyInto(out *SubnetworkInitParameters *out = new(string) **out = **in } + if in.ReservedInternalRange != nil { + in, out := &in.ReservedInternalRange, &out.ReservedInternalRange + *out = new(string) + **out = **in + } if in.Role != nil { in, out := &in.Role, &out.Role *out = new(string) @@ -56670,6 +58911,11 @@ func (in *SubnetworkObservation_2) DeepCopyInto(out *SubnetworkObservation_2) { *out = new(string) **out = **in } + if in.ReservedInternalRange != nil { + in, out := &in.ReservedInternalRange, &out.ReservedInternalRange + *out = new(string) + **out = **in + } if in.Role != nil { in, out := &in.Role, &out.Role *out = new(string) @@ -56697,6 +58943,11 @@ func (in *SubnetworkObservation_2) DeepCopyInto(out *SubnetworkObservation_2) { *out = new(string) **out = **in } + if in.SubnetworkID != nil { + in, out := &in.SubnetworkID, &out.SubnetworkID + *out = new(float64) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetworkObservation_2. @@ -56829,6 +59080,11 @@ func (in *SubnetworkParameters_2) DeepCopyInto(out *SubnetworkParameters_2) { *out = new(string) **out = **in } + if in.ReservedInternalRange != nil { + in, out := &in.ReservedInternalRange, &out.ReservedInternalRange + *out = new(string) + **out = **in + } if in.Role != nil { in, out := &in.Role, &out.Role *out = new(string) @@ -57368,6 +59624,192 @@ func (in *TestParameters) DeepCopy() *TestParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThresholdConfigsInitParameters) DeepCopyInto(out *ThresholdConfigsInitParameters) { + *out = *in + if in.AutoDeployConfidenceThreshold != nil { + in, out := &in.AutoDeployConfidenceThreshold, &out.AutoDeployConfidenceThreshold + *out = new(float64) + **out = **in + } + if in.AutoDeployExpirationSec != nil { + in, out := &in.AutoDeployExpirationSec, &out.AutoDeployExpirationSec + *out = new(float64) + **out = **in + } + if in.AutoDeployImpactedBaselineThreshold != nil { + in, out := &in.AutoDeployImpactedBaselineThreshold, &out.AutoDeployImpactedBaselineThreshold + *out = new(float64) + **out = **in + } + if in.AutoDeployLoadThreshold != nil { + in, out := &in.AutoDeployLoadThreshold, &out.AutoDeployLoadThreshold + *out = new(float64) + **out = **in + } + if in.DetectionAbsoluteQPS != nil { + in, out := &in.DetectionAbsoluteQPS, &out.DetectionAbsoluteQPS + *out = new(float64) + **out = **in + } + if in.DetectionLoadThreshold != nil { + in, out := &in.DetectionLoadThreshold, &out.DetectionLoadThreshold + *out = new(float64) + **out = **in + } + if in.DetectionRelativeToBaselineQPS != nil { + in, out := &in.DetectionRelativeToBaselineQPS, &out.DetectionRelativeToBaselineQPS + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TrafficGranularityConfigs != nil { + in, out := &in.TrafficGranularityConfigs, &out.TrafficGranularityConfigs + *out = make([]TrafficGranularityConfigsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThresholdConfigsInitParameters. +func (in *ThresholdConfigsInitParameters) DeepCopy() *ThresholdConfigsInitParameters { + if in == nil { + return nil + } + out := new(ThresholdConfigsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThresholdConfigsObservation) DeepCopyInto(out *ThresholdConfigsObservation) { + *out = *in + if in.AutoDeployConfidenceThreshold != nil { + in, out := &in.AutoDeployConfidenceThreshold, &out.AutoDeployConfidenceThreshold + *out = new(float64) + **out = **in + } + if in.AutoDeployExpirationSec != nil { + in, out := &in.AutoDeployExpirationSec, &out.AutoDeployExpirationSec + *out = new(float64) + **out = **in + } + if in.AutoDeployImpactedBaselineThreshold != nil { + in, out := &in.AutoDeployImpactedBaselineThreshold, &out.AutoDeployImpactedBaselineThreshold + *out = new(float64) + **out = **in + } + if in.AutoDeployLoadThreshold != nil { + in, out := &in.AutoDeployLoadThreshold, &out.AutoDeployLoadThreshold + *out = new(float64) + **out = **in + } + if in.DetectionAbsoluteQPS != nil { + in, out := &in.DetectionAbsoluteQPS, &out.DetectionAbsoluteQPS + *out = new(float64) + **out = **in + } + if in.DetectionLoadThreshold != nil { + in, out := &in.DetectionLoadThreshold, &out.DetectionLoadThreshold + *out = new(float64) + **out = **in + } + if in.DetectionRelativeToBaselineQPS != nil { + in, out := &in.DetectionRelativeToBaselineQPS, &out.DetectionRelativeToBaselineQPS + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TrafficGranularityConfigs != nil { + in, out := &in.TrafficGranularityConfigs, &out.TrafficGranularityConfigs + *out = make([]TrafficGranularityConfigsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThresholdConfigsObservation. +func (in *ThresholdConfigsObservation) DeepCopy() *ThresholdConfigsObservation { + if in == nil { + return nil + } + out := new(ThresholdConfigsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThresholdConfigsParameters) DeepCopyInto(out *ThresholdConfigsParameters) { + *out = *in + if in.AutoDeployConfidenceThreshold != nil { + in, out := &in.AutoDeployConfidenceThreshold, &out.AutoDeployConfidenceThreshold + *out = new(float64) + **out = **in + } + if in.AutoDeployExpirationSec != nil { + in, out := &in.AutoDeployExpirationSec, &out.AutoDeployExpirationSec + *out = new(float64) + **out = **in + } + if in.AutoDeployImpactedBaselineThreshold != nil { + in, out := &in.AutoDeployImpactedBaselineThreshold, &out.AutoDeployImpactedBaselineThreshold + *out = new(float64) + **out = **in + } + if in.AutoDeployLoadThreshold != nil { + in, out := &in.AutoDeployLoadThreshold, &out.AutoDeployLoadThreshold + *out = new(float64) + **out = **in + } + if in.DetectionAbsoluteQPS != nil { + in, out := &in.DetectionAbsoluteQPS, &out.DetectionAbsoluteQPS + *out = new(float64) + **out = **in + } + if in.DetectionLoadThreshold != nil { + in, out := &in.DetectionLoadThreshold, &out.DetectionLoadThreshold + *out = new(float64) + **out = **in + } + if in.DetectionRelativeToBaselineQPS != nil { + in, out := &in.DetectionRelativeToBaselineQPS, &out.DetectionRelativeToBaselineQPS + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TrafficGranularityConfigs != nil { + in, out := &in.TrafficGranularityConfigs, &out.TrafficGranularityConfigs + *out = make([]TrafficGranularityConfigsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThresholdConfigsParameters. +func (in *ThresholdConfigsParameters) DeepCopy() *ThresholdConfigsParameters { + if in == nil { + return nil + } + out := new(ThresholdConfigsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TimeoutInitParameters) DeepCopyInto(out *TimeoutInitParameters) { *out = *in @@ -57443,6 +59885,96 @@ func (in *TimeoutParameters) DeepCopy() *TimeoutParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficGranularityConfigsInitParameters) DeepCopyInto(out *TrafficGranularityConfigsInitParameters) { + *out = *in + if in.EnableEachUniqueValue != nil { + in, out := &in.EnableEachUniqueValue, &out.EnableEachUniqueValue + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficGranularityConfigsInitParameters. +func (in *TrafficGranularityConfigsInitParameters) DeepCopy() *TrafficGranularityConfigsInitParameters { + if in == nil { + return nil + } + out := new(TrafficGranularityConfigsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficGranularityConfigsObservation) DeepCopyInto(out *TrafficGranularityConfigsObservation) { + *out = *in + if in.EnableEachUniqueValue != nil { + in, out := &in.EnableEachUniqueValue, &out.EnableEachUniqueValue + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficGranularityConfigsObservation. +func (in *TrafficGranularityConfigsObservation) DeepCopy() *TrafficGranularityConfigsObservation { + if in == nil { + return nil + } + out := new(TrafficGranularityConfigsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficGranularityConfigsParameters) DeepCopyInto(out *TrafficGranularityConfigsParameters) { + *out = *in + if in.EnableEachUniqueValue != nil { + in, out := &in.EnableEachUniqueValue, &out.EnableEachUniqueValue + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficGranularityConfigsParameters. +func (in *TrafficGranularityConfigsParameters) DeepCopy() *TrafficGranularityConfigsParameters { + if in == nil { + return nil + } + out := new(TrafficGranularityConfigsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *URLMap) DeepCopyInto(out *URLMap) { *out = *in diff --git a/apis/compute/v1beta2/zz_generated.resolvers.go b/apis/compute/v1beta2/zz_generated.resolvers.go index bfc5747c8..5b69a3714 100644 --- a/apis/compute/v1beta2/zz_generated.resolvers.go +++ b/apis/compute/v1beta2/zz_generated.resolvers.go @@ -903,6 +903,56 @@ func (mg *GlobalForwardingRule) ResolveReferences(ctx context.Context, c client. return nil } +// ResolveReferences of this Image. +func (mg *Image) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta2", "Disk", "DiskList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceDisk), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SourceDiskRef, + Selector: mg.Spec.ForProvider.SourceDiskSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceDisk") + } + mg.Spec.ForProvider.SourceDisk = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceDiskRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta2", "Disk", "DiskList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceDisk), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SourceDiskRef, + Selector: mg.Spec.InitProvider.SourceDiskSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceDisk") + } + mg.Spec.InitProvider.SourceDisk = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceDiskRef = rsp.ResolvedReference + + return nil +} + // ResolveReferences of this ImageIAMMember. func (mg *ImageIAMMember) ResolveReferences(ctx context.Context, c client.Reader) error { var m xpresource.Managed diff --git a/apis/compute/v1beta2/zz_globalforwardingrule_types.go b/apis/compute/v1beta2/zz_globalforwardingrule_types.go index 397184552..faabc4875 100755 --- a/apis/compute/v1beta2/zz_globalforwardingrule_types.go +++ b/apis/compute/v1beta2/zz_globalforwardingrule_types.go @@ -161,6 +161,18 @@ type GlobalForwardingRuleInitParameters struct { // +kubebuilder:validation:Optional NetworkSelector *v1.Selector `json:"networkSelector,omitempty" tf:"-"` + // This signifies the networking tier used for configuring + // this load balancer and can only take the following values: + // PREMIUM, STANDARD. + // For regional ForwardingRule, the valid values are PREMIUM and + // STANDARD. For GlobalForwardingRule, the valid value is + // PREMIUM. + // If this field is not specified, it is assumed to be PREMIUM. + // If IPAddress is specified, this value must be equal to the + // networkTier of the Address. + // Possible values are: PREMIUM, STANDARD. + NetworkTier *string `json:"networkTier,omitempty" tf:"network_tier,omitempty"` + // This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field. NoAutomateDNSZone *bool `json:"noAutomateDnsZone,omitempty" tf:"no_automate_dns_zone,omitempty"` @@ -238,6 +250,9 @@ type GlobalForwardingRuleObservation struct { // +mapType=granular EffectiveLabels map[string]*string `json:"effectiveLabels,omitempty" tf:"effective_labels,omitempty"` + // The unique identifier number for the resource. This identifier is defined by the server. + ForwardingRuleID *float64 `json:"forwardingRuleId,omitempty" tf:"forwarding_rule_id,omitempty"` + // an identifier for the resource with format projects/{{project}}/global/forwardingRules/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` @@ -305,6 +320,18 @@ type GlobalForwardingRuleObservation struct { // APIs, a network must be provided. Network *string `json:"network,omitempty" tf:"network,omitempty"` + // This signifies the networking tier used for configuring + // this load balancer and can only take the following values: + // PREMIUM, STANDARD. + // For regional ForwardingRule, the valid values are PREMIUM and + // STANDARD. For GlobalForwardingRule, the valid value is + // PREMIUM. + // If this field is not specified, it is assumed to be PREMIUM. + // If IPAddress is specified, this value must be equal to the + // networkTier of the Address. + // Possible values are: PREMIUM, STANDARD. + NetworkTier *string `json:"networkTier,omitempty" tf:"network_tier,omitempty"` + // This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field. NoAutomateDNSZone *bool `json:"noAutomateDnsZone,omitempty" tf:"no_automate_dns_zone,omitempty"` @@ -447,6 +474,19 @@ type GlobalForwardingRuleParameters struct { // +kubebuilder:validation:Optional NetworkSelector *v1.Selector `json:"networkSelector,omitempty" tf:"-"` + // This signifies the networking tier used for configuring + // this load balancer and can only take the following values: + // PREMIUM, STANDARD. + // For regional ForwardingRule, the valid values are PREMIUM and + // STANDARD. For GlobalForwardingRule, the valid value is + // PREMIUM. + // If this field is not specified, it is assumed to be PREMIUM. + // If IPAddress is specified, this value must be equal to the + // networkTier of the Address. + // Possible values are: PREMIUM, STANDARD. + // +kubebuilder:validation:Optional + NetworkTier *string `json:"networkTier,omitempty" tf:"network_tier,omitempty"` + // This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field. // +kubebuilder:validation:Optional NoAutomateDNSZone *bool `json:"noAutomateDnsZone,omitempty" tf:"no_automate_dns_zone,omitempty"` diff --git a/apis/compute/v1beta2/zz_healthcheck_types.go b/apis/compute/v1beta2/zz_healthcheck_types.go index 699765611..9164ce0f8 100755 --- a/apis/compute/v1beta2/zz_healthcheck_types.go +++ b/apis/compute/v1beta2/zz_healthcheck_types.go @@ -311,15 +311,15 @@ type HealthCheckInitParameters struct { // you create the resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. GRPCHealthCheck *GRPCHealthCheckInitParameters `json:"grpcHealthCheck,omitempty" tf:"grpc_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. HTTPHealthCheck *HTTPHealthCheckInitParameters `json:"httpHealthCheck,omitempty" tf:"http_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. HTTPSHealthCheck *HTTPSHealthCheckInitParameters `json:"httpsHealthCheck,omitempty" tf:"https_health_check,omitempty"` @@ -327,7 +327,7 @@ type HealthCheckInitParameters struct { // consecutive successes. The default value is 2. HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. Http2HealthCheck *Http2HealthCheckInitParameters `json:"http2HealthCheck,omitempty" tf:"http2_health_check,omitempty"` @@ -339,7 +339,7 @@ type HealthCheckInitParameters struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. SSLHealthCheck *SSLHealthCheckInitParameters `json:"sslHealthCheck,omitempty" tf:"ssl_health_check,omitempty"` @@ -351,7 +351,7 @@ type HealthCheckInitParameters struct { // and what other resources can use this health check: SourceRegions []*string `json:"sourceRegions,omitempty" tf:"source_regions,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. TCPHealthCheck *TCPHealthCheckInitParameters `json:"tcpHealthCheck,omitempty" tf:"tcp_health_check,omitempty"` @@ -400,15 +400,15 @@ type HealthCheckObservation struct { // you create the resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. GRPCHealthCheck *GRPCHealthCheckObservation `json:"grpcHealthCheck,omitempty" tf:"grpc_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. HTTPHealthCheck *HTTPHealthCheckObservation `json:"httpHealthCheck,omitempty" tf:"http_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. HTTPSHealthCheck *HTTPSHealthCheckObservation `json:"httpsHealthCheck,omitempty" tf:"https_health_check,omitempty"` @@ -416,7 +416,7 @@ type HealthCheckObservation struct { // consecutive successes. The default value is 2. HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. Http2HealthCheck *Http2HealthCheckObservation `json:"http2HealthCheck,omitempty" tf:"http2_health_check,omitempty"` @@ -431,7 +431,7 @@ type HealthCheckObservation struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. SSLHealthCheck *SSLHealthCheckObservation `json:"sslHealthCheck,omitempty" tf:"ssl_health_check,omitempty"` @@ -446,7 +446,7 @@ type HealthCheckObservation struct { // and what other resources can use this health check: SourceRegions []*string `json:"sourceRegions,omitempty" tf:"source_regions,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. TCPHealthCheck *TCPHealthCheckObservation `json:"tcpHealthCheck,omitempty" tf:"tcp_health_check,omitempty"` @@ -475,17 +475,17 @@ type HealthCheckParameters struct { // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional GRPCHealthCheck *GRPCHealthCheckParameters `json:"grpcHealthCheck,omitempty" tf:"grpc_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional HTTPHealthCheck *HTTPHealthCheckParameters `json:"httpHealthCheck,omitempty" tf:"http_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional HTTPSHealthCheck *HTTPSHealthCheckParameters `json:"httpsHealthCheck,omitempty" tf:"https_health_check,omitempty"` @@ -495,7 +495,7 @@ type HealthCheckParameters struct { // +kubebuilder:validation:Optional HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional Http2HealthCheck *Http2HealthCheckParameters `json:"http2HealthCheck,omitempty" tf:"http2_health_check,omitempty"` @@ -510,7 +510,7 @@ type HealthCheckParameters struct { // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional SSLHealthCheck *SSLHealthCheckParameters `json:"sslHealthCheck,omitempty" tf:"ssl_health_check,omitempty"` @@ -524,7 +524,7 @@ type HealthCheckParameters struct { // +kubebuilder:validation:Optional SourceRegions []*string `json:"sourceRegions,omitempty" tf:"source_regions,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional TCPHealthCheck *TCPHealthCheckParameters `json:"tcpHealthCheck,omitempty" tf:"tcp_health_check,omitempty"` diff --git a/apis/compute/v1beta2/zz_image_types.go b/apis/compute/v1beta2/zz_image_types.go index 064396c0f..68ef94248 100755 --- a/apis/compute/v1beta2/zz_image_types.go +++ b/apis/compute/v1beta2/zz_image_types.go @@ -54,21 +54,21 @@ type ImageEncryptionKeyParameters struct { type ImageGuestOsFeaturesInitParameters struct { // The type of supported feature. Read Enabling guest operating system features to see a list of available options. - // Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. + // Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, IDPF, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. Type *string `json:"type,omitempty" tf:"type,omitempty"` } type ImageGuestOsFeaturesObservation struct { // The type of supported feature. Read Enabling guest operating system features to see a list of available options. - // Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. + // Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, IDPF, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. Type *string `json:"type,omitempty" tf:"type,omitempty"` } type ImageGuestOsFeaturesParameters struct { // The type of supported feature. Read Enabling guest operating system features to see a list of available options. - // Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. + // Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, IDPF, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. // +kubebuilder:validation:Optional Type *string `json:"type" tf:"type,omitempty"` } @@ -121,8 +121,18 @@ type ImageInitParameters struct { // The source disk to create this image based on. // You must provide either this property or the // rawDisk.source property but not both to create an image. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta2.Disk + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SourceDisk *string `json:"sourceDisk,omitempty" tf:"source_disk,omitempty"` + // Reference to a Disk in compute to populate sourceDisk. + // +kubebuilder:validation:Optional + SourceDiskRef *v1.Reference `json:"sourceDiskRef,omitempty" tf:"-"` + + // Selector for a Disk in compute to populate sourceDisk. + // +kubebuilder:validation:Optional + SourceDiskSelector *v1.Selector `json:"sourceDiskSelector,omitempty" tf:"-"` + // URL of the source image used to create this image. In order to create an image, you must provide the full or partial // URL of one of the following: SourceImage *string `json:"sourceImage,omitempty" tf:"source_image,omitempty"` @@ -283,9 +293,19 @@ type ImageParameters struct { // The source disk to create this image based on. // You must provide either this property or the // rawDisk.source property but not both to create an image. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta2.Disk + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SourceDisk *string `json:"sourceDisk,omitempty" tf:"source_disk,omitempty"` + // Reference to a Disk in compute to populate sourceDisk. + // +kubebuilder:validation:Optional + SourceDiskRef *v1.Reference `json:"sourceDiskRef,omitempty" tf:"-"` + + // Selector for a Disk in compute to populate sourceDisk. + // +kubebuilder:validation:Optional + SourceDiskSelector *v1.Selector `json:"sourceDiskSelector,omitempty" tf:"-"` + // URL of the source image used to create this image. In order to create an image, you must provide the full or partial // URL of one of the following: // +kubebuilder:validation:Optional diff --git a/apis/compute/v1beta2/zz_instance_types.go b/apis/compute/v1beta2/zz_instance_types.go index 7109cf40f..21e9d1187 100755 --- a/apis/compute/v1beta2/zz_instance_types.go +++ b/apis/compute/v1beta2/zz_instance_types.go @@ -63,9 +63,18 @@ type AdvancedMachineFeaturesInitParameters struct { // Defines whether the instance should have nested virtualization enabled. Defaults to false. EnableNestedVirtualization *bool `json:"enableNestedVirtualization,omitempty" tf:"enable_nested_virtualization,omitempty"` - // he number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. + // Whether to enable UEFI networking for instance creation. + EnableUefiNetworking *bool `json:"enableUefiNetworking,omitempty" tf:"enable_uefi_networking,omitempty"` + + // The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + PerformanceMonitoringUnit *string `json:"performanceMonitoringUnit,omitempty" tf:"performance_monitoring_unit,omitempty"` + + // The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` + // Turbo frequency mode to use for the instance. Supported modes are currently either ALL_CORE_MAX or unset (default). + TurboMode *string `json:"turboMode,omitempty" tf:"turbo_mode,omitempty"` + // The number of physical cores to expose to an instance. visible cores info (VC). VisibleCoreCount *float64 `json:"visibleCoreCount,omitempty" tf:"visible_core_count,omitempty"` } @@ -75,9 +84,18 @@ type AdvancedMachineFeaturesObservation struct { // Defines whether the instance should have nested virtualization enabled. Defaults to false. EnableNestedVirtualization *bool `json:"enableNestedVirtualization,omitempty" tf:"enable_nested_virtualization,omitempty"` - // he number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. + // Whether to enable UEFI networking for instance creation. + EnableUefiNetworking *bool `json:"enableUefiNetworking,omitempty" tf:"enable_uefi_networking,omitempty"` + + // The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + PerformanceMonitoringUnit *string `json:"performanceMonitoringUnit,omitempty" tf:"performance_monitoring_unit,omitempty"` + + // The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` + // Turbo frequency mode to use for the instance. Supported modes are currently either ALL_CORE_MAX or unset (default). + TurboMode *string `json:"turboMode,omitempty" tf:"turbo_mode,omitempty"` + // The number of physical cores to expose to an instance. visible cores info (VC). VisibleCoreCount *float64 `json:"visibleCoreCount,omitempty" tf:"visible_core_count,omitempty"` } @@ -88,10 +106,22 @@ type AdvancedMachineFeaturesParameters struct { // +kubebuilder:validation:Optional EnableNestedVirtualization *bool `json:"enableNestedVirtualization,omitempty" tf:"enable_nested_virtualization,omitempty"` - // he number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. + // Whether to enable UEFI networking for instance creation. + // +kubebuilder:validation:Optional + EnableUefiNetworking *bool `json:"enableUefiNetworking,omitempty" tf:"enable_uefi_networking,omitempty"` + + // The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + // +kubebuilder:validation:Optional + PerformanceMonitoringUnit *string `json:"performanceMonitoringUnit,omitempty" tf:"performance_monitoring_unit,omitempty"` + + // The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. // +kubebuilder:validation:Optional ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` + // Turbo frequency mode to use for the instance. Supported modes are currently either ALL_CORE_MAX or unset (default). + // +kubebuilder:validation:Optional + TurboMode *string `json:"turboMode,omitempty" tf:"turbo_mode,omitempty"` + // The number of physical cores to expose to an instance. visible cores info (VC). // +kubebuilder:validation:Optional VisibleCoreCount *float64 `json:"visibleCoreCount,omitempty" tf:"visible_core_count,omitempty"` @@ -248,6 +278,9 @@ type BootDiskInitParameters struct { // Structure is documented below. InitializeParams *InitializeParamsInitParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + // The disk interface to use for attaching this disk; either SCSI or NVME. + Interface *string `json:"interface,omitempty" tf:"interface,omitempty"` + // The self_link of the encryption key that is // stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link // and disk_encryption_key_raw may be set. @@ -283,6 +316,9 @@ type BootDiskObservation struct { // Structure is documented below. InitializeParams *InitializeParamsObservation `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + // The disk interface to use for attaching this disk; either SCSI or NVME. + Interface *string `json:"interface,omitempty" tf:"interface,omitempty"` + // The self_link of the encryption key that is // stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link // and disk_encryption_key_raw may be set. @@ -324,6 +360,10 @@ type BootDiskParameters struct { // +kubebuilder:validation:Optional InitializeParams *InitializeParamsParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + // The disk interface to use for attaching this disk; either SCSI or NVME. + // +kubebuilder:validation:Optional + Interface *string `json:"interface,omitempty" tf:"interface,omitempty"` + // The self_link of the encryption key that is // stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link // and disk_encryption_key_raw may be set. @@ -344,7 +384,7 @@ type BootDiskParameters struct { type ConfidentialInstanceConfigInitParameters struct { - // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. TDX is only available in beta. + // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. ConfidentialInstanceType *string `json:"confidentialInstanceType,omitempty" tf:"confidential_instance_type,omitempty"` // Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, on_host_maintenance can be set to MIGRATE if min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. @@ -353,7 +393,7 @@ type ConfidentialInstanceConfigInitParameters struct { type ConfidentialInstanceConfigObservation struct { - // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. TDX is only available in beta. + // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. ConfidentialInstanceType *string `json:"confidentialInstanceType,omitempty" tf:"confidential_instance_type,omitempty"` // Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, on_host_maintenance can be set to MIGRATE if min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. @@ -362,7 +402,7 @@ type ConfidentialInstanceConfigObservation struct { type ConfidentialInstanceConfigParameters struct { - // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. TDX is only available in beta. + // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. // +kubebuilder:validation:Optional ConfidentialInstanceType *string `json:"confidentialInstanceType,omitempty" tf:"confidential_instance_type,omitempty"` @@ -374,10 +414,10 @@ type ConfidentialInstanceConfigParameters struct { type GuestAcceleratorInitParameters struct { // The number of the guest accelerator cards exposed to this instance. - Count *float64 `json:"count,omitempty" tf:"count"` + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` // The accelerator type resource to expose to this instance. E.g. nvidia-tesla-k80. - Type *string `json:"type,omitempty" tf:"type"` + Type *string `json:"type,omitempty" tf:"type,omitempty"` } type GuestAcceleratorObservation struct { @@ -393,11 +433,11 @@ type GuestAcceleratorParameters struct { // The number of the guest accelerator cards exposed to this instance. // +kubebuilder:validation:Optional - Count *float64 `json:"count,omitempty" tf:"count"` + Count *float64 `json:"count" tf:"count,omitempty"` // The accelerator type resource to expose to this instance. E.g. nvidia-tesla-k80. // +kubebuilder:validation:Optional - Type *string `json:"type,omitempty" tf:"type"` + Type *string `json:"type" tf:"type,omitempty"` } type IPv6AccessConfigInitParameters struct { @@ -530,11 +570,14 @@ type InitializeParamsInitParameters struct { // +mapType=granular ResourceManagerTags map[string]*string `json:"resourceManagerTags,omitempty" tf:"resource_manager_tags,omitempty"` + // - A list of self_links of resource policies to attach to the instance. Modifying this list will cause the instance to recreate. Currently a max of 1 resource policy is supported. + ResourcePolicies []*string `json:"resourcePolicies,omitempty" tf:"resource_policies,omitempty"` + // The size of the image in gigabytes. If not specified, it // will inherit the size of its base image. Size *float64 `json:"size,omitempty" tf:"size,omitempty"` - // The URL of the storage pool in which the new disk is created. + // The URL or the name of the storage pool in which the new disk is created. // For example: StoragePool *string `json:"storagePool,omitempty" tf:"storage_pool,omitempty"` @@ -586,11 +629,14 @@ type InitializeParamsObservation struct { // +mapType=granular ResourceManagerTags map[string]*string `json:"resourceManagerTags,omitempty" tf:"resource_manager_tags,omitempty"` + // - A list of self_links of resource policies to attach to the instance. Modifying this list will cause the instance to recreate. Currently a max of 1 resource policy is supported. + ResourcePolicies []*string `json:"resourcePolicies,omitempty" tf:"resource_policies,omitempty"` + // The size of the image in gigabytes. If not specified, it // will inherit the size of its base image. Size *float64 `json:"size,omitempty" tf:"size,omitempty"` - // The URL of the storage pool in which the new disk is created. + // The URL or the name of the storage pool in which the new disk is created. // For example: StoragePool *string `json:"storagePool,omitempty" tf:"storage_pool,omitempty"` @@ -657,12 +703,16 @@ type InitializeParamsParameters struct { // +mapType=granular ResourceManagerTags map[string]*string `json:"resourceManagerTags,omitempty" tf:"resource_manager_tags,omitempty"` + // - A list of self_links of resource policies to attach to the instance. Modifying this list will cause the instance to recreate. Currently a max of 1 resource policy is supported. + // +kubebuilder:validation:Optional + ResourcePolicies []*string `json:"resourcePolicies,omitempty" tf:"resource_policies,omitempty"` + // The size of the image in gigabytes. If not specified, it // will inherit the size of its base image. // +kubebuilder:validation:Optional Size *float64 `json:"size,omitempty" tf:"size,omitempty"` - // The URL of the storage pool in which the new disk is created. + // The URL or the name of the storage pool in which the new disk is created. // For example: // +kubebuilder:validation:Optional StoragePool *string `json:"storagePool,omitempty" tf:"storage_pool,omitempty"` @@ -695,15 +745,11 @@ type InstanceInitParameters struct { // Enable Confidential Mode on this VM. Structure is documented below ConfidentialInstanceConfig *ConfidentialInstanceConfigInitParameters `json:"confidentialInstanceConfig,omitempty" tf:"confidential_instance_config,omitempty"` - // Enable deletion protection on this instance. Defaults to false. - // Note: you must disable deletion protection before removing the resource (e.g. - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // A brief description of this resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` // Desired status of the instance. Either - // "RUNNING" or "TERMINATED". + // "RUNNING", "SUSPENDED" or "TERMINATED". DesiredStatus *string `json:"desiredStatus,omitempty" tf:"desired_status,omitempty"` // Enable Virtual Displays on this instance. @@ -712,11 +758,10 @@ type InstanceInitParameters struct { // List of the type and count of accelerator cards attached to the instance. Structure documented below. // Note: GPU accelerators can only be used with on_host_maintenance option set to TERMINATE. - // Note: This field uses attr-as-block mode to avoid - // breaking users during the 0.12 upgrade. To explicitly send a list - // of zero objects you must use the following syntax: - // example=[] - // For more details about this behavior, see this section. + // Note: As of 6.0.0, argument syntax + // is no longer supported for this field in favor of block syntax. + // To dynamically set a list of guest accelerators, use dynamic blocks. + // To set an empty list, use a single guest_accelerator block with count = 0. GuestAccelerator []GuestAcceleratorInitParameters `json:"guestAccelerator,omitempty" tf:"guest_accelerator,omitempty"` // A custom hostname for the instance. Must be a fully qualified DNS name and RFC-1035-valid. @@ -724,6 +769,9 @@ type InstanceInitParameters struct { // The entire hostname must not exceed 253 characters. Changing this forces a new resource to be created. Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + // Action to be taken when a customer's encryption key is revoked. Supports STOP and NONE, with NONE being the default. + KeyRevocationActionType *string `json:"keyRevocationActionType,omitempty" tf:"key_revocation_action_type,omitempty"` + // A map of key/value label pairs to assign to the instance. // Note: This field is non-authoritative, and will only manage the labels present in your configuration. // Please refer to the field 'effective_labels' for all of the labels present on the resource. @@ -828,7 +876,10 @@ type InstanceObservation struct { // Enable Confidential Mode on this VM. Structure is documented below ConfidentialInstanceConfig *ConfidentialInstanceConfigObservation `json:"confidentialInstanceConfig,omitempty" tf:"confidential_instance_config,omitempty"` - // The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see Instance life cycle.`, + // Creation timestamp in RFC3339 text format. + CreationTimestamp *string `json:"creationTimestamp,omitempty" tf:"creation_timestamp,omitempty"` + + // The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see Instance life cycle. CurrentStatus *string `json:"currentStatus,omitempty" tf:"current_status,omitempty"` // Enable deletion protection on this instance. Defaults to false. @@ -839,7 +890,7 @@ type InstanceObservation struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // Desired status of the instance. Either - // "RUNNING" or "TERMINATED". + // "RUNNING", "SUSPENDED" or "TERMINATED". DesiredStatus *string `json:"desiredStatus,omitempty" tf:"desired_status,omitempty"` // +mapType=granular @@ -851,11 +902,10 @@ type InstanceObservation struct { // List of the type and count of accelerator cards attached to the instance. Structure documented below. // Note: GPU accelerators can only be used with on_host_maintenance option set to TERMINATE. - // Note: This field uses attr-as-block mode to avoid - // breaking users during the 0.12 upgrade. To explicitly send a list - // of zero objects you must use the following syntax: - // example=[] - // For more details about this behavior, see this section. + // Note: As of 6.0.0, argument syntax + // is no longer supported for this field in favor of block syntax. + // To dynamically set a list of guest accelerators, use dynamic blocks. + // To set an empty list, use a single guest_accelerator block with count = 0. GuestAccelerator []GuestAcceleratorObservation `json:"guestAccelerator,omitempty" tf:"guest_accelerator,omitempty"` // A custom hostname for the instance. Must be a fully qualified DNS name and RFC-1035-valid. @@ -869,6 +919,9 @@ type InstanceObservation struct { // The server-assigned unique identifier of this instance. InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + // Action to be taken when a customer's encryption key is revoked. Supports STOP and NONE, with NONE being the default. + KeyRevocationActionType *string `json:"keyRevocationActionType,omitempty" tf:"key_revocation_action_type,omitempty"` + // The unique fingerprint of the labels. LabelFingerprint *string `json:"labelFingerprint,omitempty" tf:"label_fingerprint,omitempty"` @@ -995,17 +1048,12 @@ type InstanceParameters struct { // +kubebuilder:validation:Optional ConfidentialInstanceConfig *ConfidentialInstanceConfigParameters `json:"confidentialInstanceConfig,omitempty" tf:"confidential_instance_config,omitempty"` - // Enable deletion protection on this instance. Defaults to false. - // Note: you must disable deletion protection before removing the resource (e.g. - // +kubebuilder:validation:Optional - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // A brief description of this resource. // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` // Desired status of the instance. Either - // "RUNNING" or "TERMINATED". + // "RUNNING", "SUSPENDED" or "TERMINATED". // +kubebuilder:validation:Optional DesiredStatus *string `json:"desiredStatus,omitempty" tf:"desired_status,omitempty"` @@ -1016,11 +1064,10 @@ type InstanceParameters struct { // List of the type and count of accelerator cards attached to the instance. Structure documented below. // Note: GPU accelerators can only be used with on_host_maintenance option set to TERMINATE. - // Note: This field uses attr-as-block mode to avoid - // breaking users during the 0.12 upgrade. To explicitly send a list - // of zero objects you must use the following syntax: - // example=[] - // For more details about this behavior, see this section. + // Note: As of 6.0.0, argument syntax + // is no longer supported for this field in favor of block syntax. + // To dynamically set a list of guest accelerators, use dynamic blocks. + // To set an empty list, use a single guest_accelerator block with count = 0. // +kubebuilder:validation:Optional GuestAccelerator []GuestAcceleratorParameters `json:"guestAccelerator,omitempty" tf:"guest_accelerator,omitempty"` @@ -1030,6 +1077,10 @@ type InstanceParameters struct { // +kubebuilder:validation:Optional Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + // Action to be taken when a customer's encryption key is revoked. Supports STOP and NONE, with NONE being the default. + // +kubebuilder:validation:Optional + KeyRevocationActionType *string `json:"keyRevocationActionType,omitempty" tf:"key_revocation_action_type,omitempty"` + // A map of key/value label pairs to assign to the instance. // Note: This field is non-authoritative, and will only manage the labels present in your configuration. // Please refer to the field 'effective_labels' for all of the labels present on the resource. @@ -1222,7 +1273,7 @@ type NetworkInterfaceInitParameters struct { // instance can be accessed via the Internet. Omit to ensure that the instance // is not accessible from the Internet.g. via // tunnel or because it is running on another cloud instance on that network). - // This block can be repeated multiple times. Structure documented below. + // This block can be specified once per network_interface. Structure documented below. AccessConfig []AccessConfigInitParameters `json:"accessConfig,omitempty" tf:"access_config,omitempty"` // An @@ -1258,7 +1309,7 @@ type NetworkInterfaceInitParameters struct { // +kubebuilder:validation:Optional NetworkSelector *v1.Selector `json:"networkSelector,omitempty" tf:"-"` - // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. + // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET, IDPF. In the beta provider the additional values of MRDMA and IRDMA are supported. NicType *string `json:"nicType,omitempty" tf:"nic_type,omitempty"` // The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. @@ -1279,7 +1330,7 @@ type NetworkInterfaceInitParameters struct { Subnetwork *string `json:"subnetwork,omitempty" tf:"subnetwork,omitempty"` // The project in which the subnetwork belongs. - // If the subnetwork is a self_link, this field is ignored in favor of the project + // If the subnetwork is a self_link, this field is set to the project // defined in the subnetwork self_link. If the subnetwork is a name and this // field is not provided, the provider project is used. SubnetworkProject *string `json:"subnetworkProject,omitempty" tf:"subnetwork_project,omitempty"` @@ -1299,7 +1350,7 @@ type NetworkInterfaceObservation struct { // instance can be accessed via the Internet. Omit to ensure that the instance // is not accessible from the Internet.g. via // tunnel or because it is running on another cloud instance on that network). - // This block can be repeated multiple times. Structure documented below. + // This block can be specified once per network_interface. Structure documented below. AccessConfig []AccessConfigObservation `json:"accessConfig,omitempty" tf:"access_config,omitempty"` // An @@ -1333,7 +1384,7 @@ type NetworkInterfaceObservation struct { // empty, the address will be automatically assigned. NetworkIP *string `json:"networkIp,omitempty" tf:"network_ip,omitempty"` - // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. + // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET, IDPF. In the beta provider the additional values of MRDMA and IRDMA are supported. NicType *string `json:"nicType,omitempty" tf:"nic_type,omitempty"` // The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. @@ -1352,7 +1403,7 @@ type NetworkInterfaceObservation struct { Subnetwork *string `json:"subnetwork,omitempty" tf:"subnetwork,omitempty"` // The project in which the subnetwork belongs. - // If the subnetwork is a self_link, this field is ignored in favor of the project + // If the subnetwork is a self_link, this field is set to the project // defined in the subnetwork self_link. If the subnetwork is a name and this // field is not provided, the provider project is used. SubnetworkProject *string `json:"subnetworkProject,omitempty" tf:"subnetwork_project,omitempty"` @@ -1364,7 +1415,7 @@ type NetworkInterfaceParameters struct { // instance can be accessed via the Internet. Omit to ensure that the instance // is not accessible from the Internet.g. via // tunnel or because it is running on another cloud instance on that network). - // This block can be repeated multiple times. Structure documented below. + // This block can be specified once per network_interface. Structure documented below. // +kubebuilder:validation:Optional AccessConfig []AccessConfigParameters `json:"accessConfig,omitempty" tf:"access_config,omitempty"` @@ -1407,7 +1458,7 @@ type NetworkInterfaceParameters struct { // +kubebuilder:validation:Optional NetworkSelector *v1.Selector `json:"networkSelector,omitempty" tf:"-"` - // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. + // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET, IDPF. In the beta provider the additional values of MRDMA and IRDMA are supported. // +kubebuilder:validation:Optional NicType *string `json:"nicType,omitempty" tf:"nic_type,omitempty"` @@ -1432,7 +1483,7 @@ type NetworkInterfaceParameters struct { Subnetwork *string `json:"subnetwork,omitempty" tf:"subnetwork,omitempty"` // The project in which the subnetwork belongs. - // If the subnetwork is a self_link, this field is ignored in favor of the project + // If the subnetwork is a self_link, this field is set to the project // defined in the subnetwork self_link. If the subnetwork is a name and this // field is not provided, the provider project is used. // +kubebuilder:validation:Optional @@ -1594,6 +1645,9 @@ type SchedulingInitParameters struct { // Defaults to true. AutomaticRestart *bool `json:"automaticRestart,omitempty" tf:"automatic_restart,omitempty"` + // Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. + AvailabilityDomain *float64 `json:"availabilityDomain,omitempty" tf:"availability_domain,omitempty"` + // Describe the type of termination action for VM. Can be STOP or DELETE. Read more on here InstanceTerminationAction *string `json:"instanceTerminationAction,omitempty" tf:"instance_termination_action,omitempty"` @@ -1641,6 +1695,9 @@ type SchedulingObservation struct { // Defaults to true. AutomaticRestart *bool `json:"automaticRestart,omitempty" tf:"automatic_restart,omitempty"` + // Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. + AvailabilityDomain *float64 `json:"availabilityDomain,omitempty" tf:"availability_domain,omitempty"` + // Describe the type of termination action for VM. Can be STOP or DELETE. Read more on here InstanceTerminationAction *string `json:"instanceTerminationAction,omitempty" tf:"instance_termination_action,omitempty"` @@ -1689,6 +1746,10 @@ type SchedulingParameters struct { // +kubebuilder:validation:Optional AutomaticRestart *bool `json:"automaticRestart,omitempty" tf:"automatic_restart,omitempty"` + // Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. + // +kubebuilder:validation:Optional + AvailabilityDomain *float64 `json:"availabilityDomain,omitempty" tf:"availability_domain,omitempty"` + // Describe the type of termination action for VM. Can be STOP or DELETE. Read more on here // +kubebuilder:validation:Optional InstanceTerminationAction *string `json:"instanceTerminationAction,omitempty" tf:"instance_termination_action,omitempty"` diff --git a/apis/compute/v1beta2/zz_instancefromtemplate_terraformed.go b/apis/compute/v1beta2/zz_instancefromtemplate_terraformed.go index bbe065740..11f5da524 100755 --- a/apis/compute/v1beta2/zz_instancefromtemplate_terraformed.go +++ b/apis/compute/v1beta2/zz_instancefromtemplate_terraformed.go @@ -21,7 +21,7 @@ func (mg *InstanceFromTemplate) GetTerraformResourceType() string { // GetConnectionDetailsMapping for this InstanceFromTemplate func (tr *InstanceFromTemplate) GetConnectionDetailsMapping() map[string]string { - return map[string]string{"boot_disk[*].disk_encryption_key_raw": "bootDisk[*].diskEncryptionKeyRawSecretRef"} + return map[string]string{"attached_disk[*].disk_encryption_key_raw": "attachedDisk[*].diskEncryptionKeyRawSecretRef", "boot_disk[*].disk_encryption_key_raw": "bootDisk[*].diskEncryptionKeyRawSecretRef"} } // GetObservation of this InstanceFromTemplate diff --git a/apis/compute/v1beta2/zz_instancefromtemplate_types.go b/apis/compute/v1beta2/zz_instancefromtemplate_types.go index 5dbbe716c..429728891 100755 --- a/apis/compute/v1beta2/zz_instancefromtemplate_types.go +++ b/apis/compute/v1beta2/zz_instancefromtemplate_types.go @@ -27,6 +27,8 @@ type BootDiskInitializeParamsInitParameters struct { // +mapType=granular ResourceManagerTags map[string]*string `json:"resourceManagerTags,omitempty" tf:"resource_manager_tags,omitempty"` + ResourcePolicies []*string `json:"resourcePolicies,omitempty" tf:"resource_policies,omitempty"` + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` StoragePool *string `json:"storagePool,omitempty" tf:"storage_pool,omitempty"` @@ -48,6 +50,8 @@ type BootDiskInitializeParamsObservation struct { // +mapType=granular ResourceManagerTags map[string]*string `json:"resourceManagerTags,omitempty" tf:"resource_manager_tags,omitempty"` + ResourcePolicies []*string `json:"resourcePolicies,omitempty" tf:"resource_policies,omitempty"` + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` StoragePool *string `json:"storagePool,omitempty" tf:"storage_pool,omitempty"` @@ -76,6 +80,9 @@ type BootDiskInitializeParamsParameters struct { // +mapType=granular ResourceManagerTags map[string]*string `json:"resourceManagerTags,omitempty" tf:"resource_manager_tags,omitempty"` + // +kubebuilder:validation:Optional + ResourcePolicies []*string `json:"resourcePolicies,omitempty" tf:"resource_policies,omitempty"` + // +kubebuilder:validation:Optional Size *float64 `json:"size,omitempty" tf:"size,omitempty"` @@ -89,16 +96,28 @@ type BootDiskInitializeParamsParameters struct { type InstanceFromTemplateAdvancedMachineFeaturesInitParameters struct { EnableNestedVirtualization *bool `json:"enableNestedVirtualization,omitempty" tf:"enable_nested_virtualization,omitempty"` + EnableUefiNetworking *bool `json:"enableUefiNetworking,omitempty" tf:"enable_uefi_networking,omitempty"` + + PerformanceMonitoringUnit *string `json:"performanceMonitoringUnit,omitempty" tf:"performance_monitoring_unit,omitempty"` + ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` + TurboMode *string `json:"turboMode,omitempty" tf:"turbo_mode,omitempty"` + VisibleCoreCount *float64 `json:"visibleCoreCount,omitempty" tf:"visible_core_count,omitempty"` } type InstanceFromTemplateAdvancedMachineFeaturesObservation struct { EnableNestedVirtualization *bool `json:"enableNestedVirtualization,omitempty" tf:"enable_nested_virtualization,omitempty"` + EnableUefiNetworking *bool `json:"enableUefiNetworking,omitempty" tf:"enable_uefi_networking,omitempty"` + + PerformanceMonitoringUnit *string `json:"performanceMonitoringUnit,omitempty" tf:"performance_monitoring_unit,omitempty"` + ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` + TurboMode *string `json:"turboMode,omitempty" tf:"turbo_mode,omitempty"` + VisibleCoreCount *float64 `json:"visibleCoreCount,omitempty" tf:"visible_core_count,omitempty"` } @@ -107,9 +126,18 @@ type InstanceFromTemplateAdvancedMachineFeaturesParameters struct { // +kubebuilder:validation:Optional EnableNestedVirtualization *bool `json:"enableNestedVirtualization,omitempty" tf:"enable_nested_virtualization,omitempty"` + // +kubebuilder:validation:Optional + EnableUefiNetworking *bool `json:"enableUefiNetworking,omitempty" tf:"enable_uefi_networking,omitempty"` + + // +kubebuilder:validation:Optional + PerformanceMonitoringUnit *string `json:"performanceMonitoringUnit,omitempty" tf:"performance_monitoring_unit,omitempty"` + // +kubebuilder:validation:Optional ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` + // +kubebuilder:validation:Optional + TurboMode *string `json:"turboMode,omitempty" tf:"turbo_mode,omitempty"` + // +kubebuilder:validation:Optional VisibleCoreCount *float64 `json:"visibleCoreCount,omitempty" tf:"visible_core_count,omitempty"` } @@ -118,17 +146,15 @@ type InstanceFromTemplateAttachedDiskInitParameters struct { // A unique name for the resource, required by GCE. // Changing this forces a new resource to be created. - DeviceName *string `json:"deviceName,omitempty" tf:"device_name"` - - DiskEncryptionKeyRaw *string `json:"diskEncryptionKeyRaw,omitempty" tf:"disk_encryption_key_raw"` + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` - DiskEncryptionKeySha256 *string `json:"diskEncryptionKeySha256,omitempty" tf:"disk_encryption_key_sha256"` + DiskEncryptionKeyRawSecretRef *v1.SecretKeySelector `json:"diskEncryptionKeyRawSecretRef,omitempty" tf:"-"` - KMSKeySelfLink *string `json:"kmsKeySelfLink,omitempty" tf:"kms_key_self_link"` + KMSKeySelfLink *string `json:"kmsKeySelfLink,omitempty" tf:"kms_key_self_link,omitempty"` - Mode *string `json:"mode,omitempty" tf:"mode"` + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` - Source *string `json:"source,omitempty" tf:"source"` + Source *string `json:"source,omitempty" tf:"source,omitempty"` } type InstanceFromTemplateAttachedDiskObservation struct { @@ -137,8 +163,6 @@ type InstanceFromTemplateAttachedDiskObservation struct { // Changing this forces a new resource to be created. DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` - DiskEncryptionKeyRaw *string `json:"diskEncryptionKeyRaw,omitempty" tf:"disk_encryption_key_raw,omitempty"` - DiskEncryptionKeySha256 *string `json:"diskEncryptionKeySha256,omitempty" tf:"disk_encryption_key_sha256,omitempty"` KMSKeySelfLink *string `json:"kmsKeySelfLink,omitempty" tf:"kms_key_self_link,omitempty"` @@ -153,22 +177,19 @@ type InstanceFromTemplateAttachedDiskParameters struct { // A unique name for the resource, required by GCE. // Changing this forces a new resource to be created. // +kubebuilder:validation:Optional - DeviceName *string `json:"deviceName,omitempty" tf:"device_name"` - - // +kubebuilder:validation:Optional - DiskEncryptionKeyRaw *string `json:"diskEncryptionKeyRaw,omitempty" tf:"disk_encryption_key_raw"` + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` // +kubebuilder:validation:Optional - DiskEncryptionKeySha256 *string `json:"diskEncryptionKeySha256,omitempty" tf:"disk_encryption_key_sha256"` + DiskEncryptionKeyRawSecretRef *v1.SecretKeySelector `json:"diskEncryptionKeyRawSecretRef,omitempty" tf:"-"` // +kubebuilder:validation:Optional - KMSKeySelfLink *string `json:"kmsKeySelfLink,omitempty" tf:"kms_key_self_link"` + KMSKeySelfLink *string `json:"kmsKeySelfLink,omitempty" tf:"kms_key_self_link,omitempty"` // +kubebuilder:validation:Optional - Mode *string `json:"mode,omitempty" tf:"mode"` + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` // +kubebuilder:validation:Optional - Source *string `json:"source,omitempty" tf:"source"` + Source *string `json:"source" tf:"source,omitempty"` } type InstanceFromTemplateBootDiskInitParameters struct { @@ -184,6 +205,8 @@ type InstanceFromTemplateBootDiskInitParameters struct { InitializeParams *BootDiskInitializeParamsInitParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + Interface *string `json:"interface,omitempty" tf:"interface,omitempty"` + KMSKeySelfLink *string `json:"kmsKeySelfLink,omitempty" tf:"kms_key_self_link,omitempty"` Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` @@ -204,6 +227,8 @@ type InstanceFromTemplateBootDiskObservation struct { InitializeParams *BootDiskInitializeParamsObservation `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + Interface *string `json:"interface,omitempty" tf:"interface,omitempty"` + KMSKeySelfLink *string `json:"kmsKeySelfLink,omitempty" tf:"kms_key_self_link,omitempty"` Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` @@ -228,6 +253,9 @@ type InstanceFromTemplateBootDiskParameters struct { // +kubebuilder:validation:Optional InitializeParams *BootDiskInitializeParamsParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + // +kubebuilder:validation:Optional + Interface *string `json:"interface,omitempty" tf:"interface,omitempty"` + // +kubebuilder:validation:Optional KMSKeySelfLink *string `json:"kmsKeySelfLink,omitempty" tf:"kms_key_self_link,omitempty"` @@ -260,9 +288,9 @@ type InstanceFromTemplateConfidentialInstanceConfigParameters struct { } type InstanceFromTemplateGuestAcceleratorInitParameters struct { - Count *float64 `json:"count,omitempty" tf:"count"` + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` - Type *string `json:"type,omitempty" tf:"type"` + Type *string `json:"type,omitempty" tf:"type,omitempty"` } type InstanceFromTemplateGuestAcceleratorObservation struct { @@ -274,10 +302,10 @@ type InstanceFromTemplateGuestAcceleratorObservation struct { type InstanceFromTemplateGuestAcceleratorParameters struct { // +kubebuilder:validation:Optional - Count *float64 `json:"count,omitempty" tf:"count"` + Count *float64 `json:"count" tf:"count,omitempty"` // +kubebuilder:validation:Optional - Type *string `json:"type,omitempty" tf:"type"` + Type *string `json:"type" tf:"type,omitempty"` } type InstanceFromTemplateInitParameters struct { @@ -294,8 +322,6 @@ type InstanceFromTemplateInitParameters struct { ConfidentialInstanceConfig *InstanceFromTemplateConfidentialInstanceConfigInitParameters `json:"confidentialInstanceConfig,omitempty" tf:"confidential_instance_config,omitempty"` - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - Description *string `json:"description,omitempty" tf:"description,omitempty"` DesiredStatus *string `json:"desiredStatus,omitempty" tf:"desired_status,omitempty"` @@ -308,6 +334,8 @@ type InstanceFromTemplateInitParameters struct { // Changing this forces a new resource to be created. Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + KeyRevocationActionType *string `json:"keyRevocationActionType,omitempty" tf:"key_revocation_action_type,omitempty"` + // +mapType=granular Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` @@ -529,6 +557,8 @@ type InstanceFromTemplateObservation struct { ConfidentialInstanceConfig *InstanceFromTemplateConfidentialInstanceConfigObservation `json:"confidentialInstanceConfig,omitempty" tf:"confidential_instance_config,omitempty"` + CreationTimestamp *string `json:"creationTimestamp,omitempty" tf:"creation_timestamp,omitempty"` + CurrentStatus *string `json:"currentStatus,omitempty" tf:"current_status,omitempty"` DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` @@ -552,6 +582,8 @@ type InstanceFromTemplateObservation struct { InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + KeyRevocationActionType *string `json:"keyRevocationActionType,omitempty" tf:"key_revocation_action_type,omitempty"` + LabelFingerprint *string `json:"labelFingerprint,omitempty" tf:"label_fingerprint,omitempty"` // +mapType=granular @@ -632,9 +664,6 @@ type InstanceFromTemplateParameters struct { // +kubebuilder:validation:Optional ConfidentialInstanceConfig *InstanceFromTemplateConfidentialInstanceConfigParameters `json:"confidentialInstanceConfig,omitempty" tf:"confidential_instance_config,omitempty"` - // +kubebuilder:validation:Optional - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -652,6 +681,9 @@ type InstanceFromTemplateParameters struct { // +kubebuilder:validation:Optional Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + // +kubebuilder:validation:Optional + KeyRevocationActionType *string `json:"keyRevocationActionType,omitempty" tf:"key_revocation_action_type,omitempty"` + // +kubebuilder:validation:Optional // +mapType=granular Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` @@ -772,6 +804,8 @@ type InstanceFromTemplateReservationAffinityParameters struct { type InstanceFromTemplateSchedulingInitParameters struct { AutomaticRestart *bool `json:"automaticRestart,omitempty" tf:"automatic_restart,omitempty"` + AvailabilityDomain *float64 `json:"availabilityDomain,omitempty" tf:"availability_domain,omitempty"` + InstanceTerminationAction *string `json:"instanceTerminationAction,omitempty" tf:"instance_termination_action,omitempty"` LocalSsdRecoveryTimeout *SchedulingLocalSsdRecoveryTimeoutInitParameters `json:"localSsdRecoveryTimeout,omitempty" tf:"local_ssd_recovery_timeout,omitempty"` @@ -794,6 +828,8 @@ type InstanceFromTemplateSchedulingInitParameters struct { type InstanceFromTemplateSchedulingObservation struct { AutomaticRestart *bool `json:"automaticRestart,omitempty" tf:"automatic_restart,omitempty"` + AvailabilityDomain *float64 `json:"availabilityDomain,omitempty" tf:"availability_domain,omitempty"` + InstanceTerminationAction *string `json:"instanceTerminationAction,omitempty" tf:"instance_termination_action,omitempty"` LocalSsdRecoveryTimeout *SchedulingLocalSsdRecoveryTimeoutObservation `json:"localSsdRecoveryTimeout,omitempty" tf:"local_ssd_recovery_timeout,omitempty"` @@ -818,6 +854,9 @@ type InstanceFromTemplateSchedulingParameters struct { // +kubebuilder:validation:Optional AutomaticRestart *bool `json:"automaticRestart,omitempty" tf:"automatic_restart,omitempty"` + // +kubebuilder:validation:Optional + AvailabilityDomain *float64 `json:"availabilityDomain,omitempty" tf:"availability_domain,omitempty"` + // +kubebuilder:validation:Optional InstanceTerminationAction *string `json:"instanceTerminationAction,omitempty" tf:"instance_termination_action,omitempty"` @@ -850,11 +889,11 @@ type InstanceFromTemplateScratchDiskInitParameters struct { // A unique name for the resource, required by GCE. // Changing this forces a new resource to be created. - DeviceName *string `json:"deviceName,omitempty" tf:"device_name"` + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` - Interface *string `json:"interface,omitempty" tf:"interface"` + Interface *string `json:"interface,omitempty" tf:"interface,omitempty"` - Size *float64 `json:"size,omitempty" tf:"size"` + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` } type InstanceFromTemplateScratchDiskObservation struct { @@ -873,20 +912,20 @@ type InstanceFromTemplateScratchDiskParameters struct { // A unique name for the resource, required by GCE. // Changing this forces a new resource to be created. // +kubebuilder:validation:Optional - DeviceName *string `json:"deviceName,omitempty" tf:"device_name"` + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` // +kubebuilder:validation:Optional - Interface *string `json:"interface,omitempty" tf:"interface"` + Interface *string `json:"interface" tf:"interface,omitempty"` // +kubebuilder:validation:Optional - Size *float64 `json:"size,omitempty" tf:"size"` + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` } type InstanceFromTemplateServiceAccountInitParameters struct { - Email *string `json:"email,omitempty" tf:"email"` + Email *string `json:"email,omitempty" tf:"email,omitempty"` // +listType=set - Scopes []*string `json:"scopes,omitempty" tf:"scopes"` + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` } type InstanceFromTemplateServiceAccountObservation struct { @@ -899,11 +938,11 @@ type InstanceFromTemplateServiceAccountObservation struct { type InstanceFromTemplateServiceAccountParameters struct { // +kubebuilder:validation:Optional - Email *string `json:"email,omitempty" tf:"email"` + Email *string `json:"email,omitempty" tf:"email,omitempty"` // +kubebuilder:validation:Optional // +listType=set - Scopes []*string `json:"scopes,omitempty" tf:"scopes"` + Scopes []*string `json:"scopes" tf:"scopes,omitempty"` } type InstanceFromTemplateShieldedInstanceConfigInitParameters struct { @@ -935,13 +974,13 @@ type InstanceFromTemplateShieldedInstanceConfigParameters struct { } type NetworkInterfaceAccessConfigInitParameters struct { - NATIP *string `json:"natIp,omitempty" tf:"nat_ip"` + NATIP *string `json:"natIp,omitempty" tf:"nat_ip,omitempty"` - NetworkTier *string `json:"networkTier,omitempty" tf:"network_tier"` + NetworkTier *string `json:"networkTier,omitempty" tf:"network_tier,omitempty"` // A unique name for the resource, required by GCE. // Changing this forces a new resource to be created. - PublicPtrDomainName *string `json:"publicPtrDomainName,omitempty" tf:"public_ptr_domain_name"` + PublicPtrDomainName *string `json:"publicPtrDomainName,omitempty" tf:"public_ptr_domain_name,omitempty"` } type NetworkInterfaceAccessConfigObservation struct { @@ -957,23 +996,23 @@ type NetworkInterfaceAccessConfigObservation struct { type NetworkInterfaceAccessConfigParameters struct { // +kubebuilder:validation:Optional - NATIP *string `json:"natIp,omitempty" tf:"nat_ip"` + NATIP *string `json:"natIp,omitempty" tf:"nat_ip,omitempty"` // +kubebuilder:validation:Optional - NetworkTier *string `json:"networkTier,omitempty" tf:"network_tier"` + NetworkTier *string `json:"networkTier,omitempty" tf:"network_tier,omitempty"` // A unique name for the resource, required by GCE. // Changing this forces a new resource to be created. // +kubebuilder:validation:Optional - PublicPtrDomainName *string `json:"publicPtrDomainName,omitempty" tf:"public_ptr_domain_name"` + PublicPtrDomainName *string `json:"publicPtrDomainName,omitempty" tf:"public_ptr_domain_name,omitempty"` } type NetworkInterfaceAliasIPRangeInitParameters struct { - IPCidrRange *string `json:"ipCidrRange,omitempty" tf:"ip_cidr_range"` + IPCidrRange *string `json:"ipCidrRange,omitempty" tf:"ip_cidr_range,omitempty"` // A unique name for the resource, required by GCE. // Changing this forces a new resource to be created. - SubnetworkRangeName *string `json:"subnetworkRangeName,omitempty" tf:"subnetwork_range_name"` + SubnetworkRangeName *string `json:"subnetworkRangeName,omitempty" tf:"subnetwork_range_name,omitempty"` } type NetworkInterfaceAliasIPRangeObservation struct { @@ -987,12 +1026,12 @@ type NetworkInterfaceAliasIPRangeObservation struct { type NetworkInterfaceAliasIPRangeParameters struct { // +kubebuilder:validation:Optional - IPCidrRange *string `json:"ipCidrRange,omitempty" tf:"ip_cidr_range"` + IPCidrRange *string `json:"ipCidrRange" tf:"ip_cidr_range,omitempty"` // A unique name for the resource, required by GCE. // Changing this forces a new resource to be created. // +kubebuilder:validation:Optional - SubnetworkRangeName *string `json:"subnetworkRangeName,omitempty" tf:"subnetwork_range_name"` + SubnetworkRangeName *string `json:"subnetworkRangeName,omitempty" tf:"subnetwork_range_name,omitempty"` } type NetworkInterfaceIPv6AccessConfigInitParameters struct { diff --git a/apis/compute/v1beta2/zz_instancegroupmanager_types.go b/apis/compute/v1beta2/zz_instancegroupmanager_types.go index 88617b8c3..aa22ddf96 100755 --- a/apis/compute/v1beta2/zz_instancegroupmanager_types.go +++ b/apis/compute/v1beta2/zz_instancegroupmanager_types.go @@ -141,6 +141,9 @@ type InstanceGroupManagerInitParameters struct { // is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + // The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the official documentation. + StandbyPolicy *StandbyPolicyInitParameters `json:"standbyPolicy,omitempty" tf:"standby_policy,omitempty"` + // Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the official documentation. StatefulDisk []StatefulDiskInitParameters `json:"statefulDisk,omitempty" tf:"stateful_disk,omitempty"` @@ -172,6 +175,12 @@ type InstanceGroupManagerInitParameters struct { // lifecycle. Defaults to 0. TargetSize *float64 `json:"targetSize,omitempty" tf:"target_size,omitempty"` + // The target number of stopped instances for this managed instance group. + TargetStoppedSize *float64 `json:"targetStoppedSize,omitempty" tf:"target_stopped_size,omitempty"` + + // The target number of suspended instances for this managed instance group. + TargetSuspendedSize *float64 `json:"targetSuspendedSize,omitempty" tf:"target_suspended_size,omitempty"` + // The update policy for this managed instance group. Structure is documented below. For more information, see the official documentation and API. UpdatePolicy *UpdatePolicyInitParameters `json:"updatePolicy,omitempty" tf:"update_policy,omitempty"` @@ -226,6 +235,9 @@ type InstanceGroupManagerObservation struct { // The full URL of the instance group created by the manager. InstanceGroup *string `json:"instanceGroup,omitempty" tf:"instance_group,omitempty"` + // an identifier for the resource with format projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}} + InstanceGroupManagerID *float64 `json:"instanceGroupManagerId,omitempty" tf:"instance_group_manager_id,omitempty"` + InstanceLifecyclePolicy *InstanceLifecyclePolicyObservation `json:"instanceLifecyclePolicy,omitempty" tf:"instance_lifecycle_policy,omitempty"` // Pagination behavior of the listManagedInstances API @@ -249,6 +261,9 @@ type InstanceGroupManagerObservation struct { // The URL of the created resource. SelfLink *string `json:"selfLink,omitempty" tf:"self_link,omitempty"` + // The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the official documentation. + StandbyPolicy *StandbyPolicyObservation `json:"standbyPolicy,omitempty" tf:"standby_policy,omitempty"` + // Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the official documentation. StatefulDisk []StatefulDiskObservation `json:"statefulDisk,omitempty" tf:"stateful_disk,omitempty"` @@ -273,6 +288,12 @@ type InstanceGroupManagerObservation struct { // lifecycle. Defaults to 0. TargetSize *float64 `json:"targetSize,omitempty" tf:"target_size,omitempty"` + // The target number of stopped instances for this managed instance group. + TargetStoppedSize *float64 `json:"targetStoppedSize,omitempty" tf:"target_stopped_size,omitempty"` + + // The target number of suspended instances for this managed instance group. + TargetSuspendedSize *float64 `json:"targetSuspendedSize,omitempty" tf:"target_suspended_size,omitempty"` + // The update policy for this managed instance group. Structure is documented below. For more information, see the official documentation and API. UpdatePolicy *UpdatePolicyObservation `json:"updatePolicy,omitempty" tf:"update_policy,omitempty"` @@ -345,6 +366,10 @@ type InstanceGroupManagerParameters struct { // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` + // The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the official documentation. + // +kubebuilder:validation:Optional + StandbyPolicy *StandbyPolicyParameters `json:"standbyPolicy,omitempty" tf:"standby_policy,omitempty"` + // Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the official documentation. // +kubebuilder:validation:Optional StatefulDisk []StatefulDiskParameters `json:"statefulDisk,omitempty" tf:"stateful_disk,omitempty"` @@ -381,6 +406,14 @@ type InstanceGroupManagerParameters struct { // +kubebuilder:validation:Optional TargetSize *float64 `json:"targetSize,omitempty" tf:"target_size,omitempty"` + // The target number of stopped instances for this managed instance group. + // +kubebuilder:validation:Optional + TargetStoppedSize *float64 `json:"targetStoppedSize,omitempty" tf:"target_stopped_size,omitempty"` + + // The target number of suspended instances for this managed instance group. + // +kubebuilder:validation:Optional + TargetSuspendedSize *float64 `json:"targetSuspendedSize,omitempty" tf:"target_suspended_size,omitempty"` + // The update policy for this managed instance group. Structure is documented below. For more information, see the official documentation and API. // +kubebuilder:validation:Optional UpdatePolicy *UpdatePolicyParameters `json:"updatePolicy,omitempty" tf:"update_policy,omitempty"` @@ -479,6 +512,35 @@ type PerInstanceConfigsObservation struct { type PerInstanceConfigsParameters struct { } +type StandbyPolicyInitParameters struct { + + // - Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. + InitialDelaySec *float64 `json:"initialDelaySec,omitempty" tf:"initial_delay_sec,omitempty"` + + // - Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have full control over which VMs are stopped and suspended in the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type StandbyPolicyObservation struct { + + // - Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. + InitialDelaySec *float64 `json:"initialDelaySec,omitempty" tf:"initial_delay_sec,omitempty"` + + // - Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have full control over which VMs are stopped and suspended in the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type StandbyPolicyParameters struct { + + // - Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. + // +kubebuilder:validation:Optional + InitialDelaySec *float64 `json:"initialDelaySec,omitempty" tf:"initial_delay_sec,omitempty"` + + // - Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have full control over which VMs are stopped and suspended in the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + type StatefulDiskInitParameters struct { // , A value that prescribes what should happen to the stateful disk when the VM instance is deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the disk when the VM is deleted, but do not delete the disk. ON_PERMANENT_INSTANCE_DELETION will delete the stateful disk when the VM is permanently deleted from the instance group. The default is NEVER. diff --git a/apis/compute/v1beta2/zz_instancetemplate_types.go b/apis/compute/v1beta2/zz_instancetemplate_types.go index 8b89a25f4..63879da9c 100755 --- a/apis/compute/v1beta2/zz_instancetemplate_types.go +++ b/apis/compute/v1beta2/zz_instancetemplate_types.go @@ -116,9 +116,18 @@ type InstanceTemplateAdvancedMachineFeaturesInitParameters struct { // Defines whether the instance should have nested virtualization enabled. Defaults to false. EnableNestedVirtualization *bool `json:"enableNestedVirtualization,omitempty" tf:"enable_nested_virtualization,omitempty"` + // Whether to enable UEFI networking for instance creation. + EnableUefiNetworking *bool `json:"enableUefiNetworking,omitempty" tf:"enable_uefi_networking,omitempty"` + + // The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + PerformanceMonitoringUnit *string `json:"performanceMonitoringUnit,omitempty" tf:"performance_monitoring_unit,omitempty"` + // The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` + // Turbo frequency mode to use for the instance. Supported modes are currently either ALL_CORE_MAX or unset (default). + TurboMode *string `json:"turboMode,omitempty" tf:"turbo_mode,omitempty"` + // The number of physical cores to expose to an instance. visible cores info (VC). VisibleCoreCount *float64 `json:"visibleCoreCount,omitempty" tf:"visible_core_count,omitempty"` } @@ -128,9 +137,18 @@ type InstanceTemplateAdvancedMachineFeaturesObservation struct { // Defines whether the instance should have nested virtualization enabled. Defaults to false. EnableNestedVirtualization *bool `json:"enableNestedVirtualization,omitempty" tf:"enable_nested_virtualization,omitempty"` + // Whether to enable UEFI networking for instance creation. + EnableUefiNetworking *bool `json:"enableUefiNetworking,omitempty" tf:"enable_uefi_networking,omitempty"` + + // The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + PerformanceMonitoringUnit *string `json:"performanceMonitoringUnit,omitempty" tf:"performance_monitoring_unit,omitempty"` + // The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` + // Turbo frequency mode to use for the instance. Supported modes are currently either ALL_CORE_MAX or unset (default). + TurboMode *string `json:"turboMode,omitempty" tf:"turbo_mode,omitempty"` + // The number of physical cores to expose to an instance. visible cores info (VC). VisibleCoreCount *float64 `json:"visibleCoreCount,omitempty" tf:"visible_core_count,omitempty"` } @@ -141,10 +159,22 @@ type InstanceTemplateAdvancedMachineFeaturesParameters struct { // +kubebuilder:validation:Optional EnableNestedVirtualization *bool `json:"enableNestedVirtualization,omitempty" tf:"enable_nested_virtualization,omitempty"` + // Whether to enable UEFI networking for instance creation. + // +kubebuilder:validation:Optional + EnableUefiNetworking *bool `json:"enableUefiNetworking,omitempty" tf:"enable_uefi_networking,omitempty"` + + // The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + // +kubebuilder:validation:Optional + PerformanceMonitoringUnit *string `json:"performanceMonitoringUnit,omitempty" tf:"performance_monitoring_unit,omitempty"` + // The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. // +kubebuilder:validation:Optional ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` + // Turbo frequency mode to use for the instance. Supported modes are currently either ALL_CORE_MAX or unset (default). + // +kubebuilder:validation:Optional + TurboMode *string `json:"turboMode,omitempty" tf:"turbo_mode,omitempty"` + // The number of physical cores to expose to an instance. visible cores info (VC). // +kubebuilder:validation:Optional VisibleCoreCount *float64 `json:"visibleCoreCount,omitempty" tf:"visible_core_count,omitempty"` @@ -152,7 +182,7 @@ type InstanceTemplateAdvancedMachineFeaturesParameters struct { type InstanceTemplateConfidentialInstanceConfigInitParameters struct { - // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. TDX is only available in beta. + // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. ConfidentialInstanceType *string `json:"confidentialInstanceType,omitempty" tf:"confidential_instance_type,omitempty"` // Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, on_host_maintenance can be set to MIGRATE if min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. @@ -161,7 +191,7 @@ type InstanceTemplateConfidentialInstanceConfigInitParameters struct { type InstanceTemplateConfidentialInstanceConfigObservation struct { - // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. TDX is only available in beta. + // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. ConfidentialInstanceType *string `json:"confidentialInstanceType,omitempty" tf:"confidential_instance_type,omitempty"` // Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, on_host_maintenance can be set to MIGRATE if min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. @@ -170,7 +200,7 @@ type InstanceTemplateConfidentialInstanceConfigObservation struct { type InstanceTemplateConfidentialInstanceConfigParameters struct { - // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. TDX is only available in beta. + // Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. // +kubebuilder:validation:Optional ConfidentialInstanceType *string `json:"confidentialInstanceType,omitempty" tf:"confidential_instance_type,omitempty"` @@ -231,6 +261,8 @@ type InstanceTemplateDiskInitParameters struct { // Extreme persistent disk documentation. ProvisionedIops *float64 `json:"provisionedIops,omitempty" tf:"provisioned_iops,omitempty"` + ProvisionedThroughput *float64 `json:"provisionedThroughput,omitempty" tf:"provisioned_throughput,omitempty"` + // A set of key/value resource manager tag pairs to bind to this disk. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. // +mapType=granular ResourceManagerTags map[string]*string `json:"resourceManagerTags,omitempty" tf:"resource_manager_tags,omitempty"` @@ -342,6 +374,8 @@ type InstanceTemplateDiskObservation struct { // Extreme persistent disk documentation. ProvisionedIops *float64 `json:"provisionedIops,omitempty" tf:"provisioned_iops,omitempty"` + ProvisionedThroughput *float64 `json:"provisionedThroughput,omitempty" tf:"provisioned_throughput,omitempty"` + // A set of key/value resource manager tag pairs to bind to this disk. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. // +mapType=granular ResourceManagerTags map[string]*string `json:"resourceManagerTags,omitempty" tf:"resource_manager_tags,omitempty"` @@ -445,6 +479,9 @@ type InstanceTemplateDiskParameters struct { // +kubebuilder:validation:Optional ProvisionedIops *float64 `json:"provisionedIops,omitempty" tf:"provisioned_iops,omitempty"` + // +kubebuilder:validation:Optional + ProvisionedThroughput *float64 `json:"provisionedThroughput,omitempty" tf:"provisioned_throughput,omitempty"` + // A set of key/value resource manager tag pairs to bind to this disk. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. // +kubebuilder:validation:Optional // +mapType=granular @@ -568,6 +605,9 @@ type InstanceTemplateInitParameters struct { // created from this template. InstanceDescription *string `json:"instanceDescription,omitempty" tf:"instance_description,omitempty"` + // Action to be taken when a customer's encryption key is revoked. Supports STOP and NONE, with NONE being the default. + KeyRevocationActionType *string `json:"keyRevocationActionType,omitempty" tf:"key_revocation_action_type,omitempty"` + // A set of key/value label pairs to assign to instances // created from this template. // +mapType=granular @@ -594,7 +634,9 @@ type InstanceTemplateInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // Creates a unique name beginning with the specified - // prefix. Conflicts with name. + // prefix. Conflicts with name. Max length is 54 characters. + // Prefixes with lengths longer than 37 characters will use a shortened + // UUID that will be more prone to collisions. NamePrefix *string `json:"namePrefix,omitempty" tf:"name_prefix,omitempty"` // Networks to attach to instances created from @@ -765,7 +807,7 @@ type InstanceTemplateNetworkInterfaceInitParameters struct { // Access configurations, i.e. IPs via which this // instance can be accessed via the Internet.g. via tunnel or because it is running on another cloud instance - // on that network). This block can be repeated multiple times. Structure documented below. + // on that network). This block can be specified once per network_interface. Structure documented below. AccessConfig []InstanceTemplateNetworkInterfaceAccessConfigInitParameters `json:"accessConfig,omitempty" tf:"access_config,omitempty"` // An @@ -800,7 +842,7 @@ type InstanceTemplateNetworkInterfaceInitParameters struct { // +kubebuilder:validation:Optional NetworkSelector *v1.Selector `json:"networkSelector,omitempty" tf:"-"` - // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. + // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. In the beta provider the additional values of MRDMA and IRDMA are supported. NicType *string `json:"nicType,omitempty" tf:"nic_type,omitempty"` // The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. @@ -832,7 +874,7 @@ type InstanceTemplateNetworkInterfaceObservation struct { // Access configurations, i.e. IPs via which this // instance can be accessed via the Internet.g. via tunnel or because it is running on another cloud instance - // on that network). This block can be repeated multiple times. Structure documented below. + // on that network). This block can be specified once per network_interface. Structure documented below. AccessConfig []InstanceTemplateNetworkInterfaceAccessConfigObservation `json:"accessConfig,omitempty" tf:"access_config,omitempty"` // An @@ -863,7 +905,7 @@ type InstanceTemplateNetworkInterfaceObservation struct { // empty, the address will be automatically assigned. NetworkIP *string `json:"networkIp,omitempty" tf:"network_ip,omitempty"` - // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. + // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. In the beta provider the additional values of MRDMA and IRDMA are supported. NicType *string `json:"nicType,omitempty" tf:"nic_type,omitempty"` // The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. @@ -886,7 +928,7 @@ type InstanceTemplateNetworkInterfaceParameters struct { // Access configurations, i.e. IPs via which this // instance can be accessed via the Internet.g. via tunnel or because it is running on another cloud instance - // on that network). This block can be repeated multiple times. Structure documented below. + // on that network). This block can be specified once per network_interface. Structure documented below. // +kubebuilder:validation:Optional AccessConfig []InstanceTemplateNetworkInterfaceAccessConfigParameters `json:"accessConfig,omitempty" tf:"access_config,omitempty"` @@ -928,7 +970,7 @@ type InstanceTemplateNetworkInterfaceParameters struct { // +kubebuilder:validation:Optional NetworkSelector *v1.Selector `json:"networkSelector,omitempty" tf:"-"` - // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. + // The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. In the beta provider the additional values of MRDMA and IRDMA are supported. // +kubebuilder:validation:Optional NicType *string `json:"nicType,omitempty" tf:"nic_type,omitempty"` @@ -992,6 +1034,9 @@ type InstanceTemplateObservation struct { // Enable Confidential Mode on this VM. Structure is documented below ConfidentialInstanceConfig *InstanceTemplateConfidentialInstanceConfigObservation `json:"confidentialInstanceConfig,omitempty" tf:"confidential_instance_config,omitempty"` + // Creation timestamp in RFC3339 text format. + CreationTimestamp *string `json:"creationTimestamp,omitempty" tf:"creation_timestamp,omitempty"` + // A brief description of this resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -1013,6 +1058,9 @@ type InstanceTemplateObservation struct { // created from this template. InstanceDescription *string `json:"instanceDescription,omitempty" tf:"instance_description,omitempty"` + // Action to be taken when a customer's encryption key is revoked. Supports STOP and NONE, with NONE being the default. + KeyRevocationActionType *string `json:"keyRevocationActionType,omitempty" tf:"key_revocation_action_type,omitempty"` + // A set of key/value label pairs to assign to instances // created from this template. // +mapType=granular @@ -1042,7 +1090,9 @@ type InstanceTemplateObservation struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // Creates a unique name beginning with the specified - // prefix. Conflicts with name. + // prefix. Conflicts with name. Max length is 54 characters. + // Prefixes with lengths longer than 37 characters will use a shortened + // UUID that will be more prone to collisions. NamePrefix *string `json:"namePrefix,omitempty" tf:"name_prefix,omitempty"` // Networks to attach to instances created from @@ -1141,6 +1191,10 @@ type InstanceTemplateParameters struct { // +kubebuilder:validation:Optional InstanceDescription *string `json:"instanceDescription,omitempty" tf:"instance_description,omitempty"` + // Action to be taken when a customer's encryption key is revoked. Supports STOP and NONE, with NONE being the default. + // +kubebuilder:validation:Optional + KeyRevocationActionType *string `json:"keyRevocationActionType,omitempty" tf:"key_revocation_action_type,omitempty"` + // A set of key/value label pairs to assign to instances // created from this template. // +kubebuilder:validation:Optional @@ -1173,7 +1227,9 @@ type InstanceTemplateParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // Creates a unique name beginning with the specified - // prefix. Conflicts with name. + // prefix. Conflicts with name. Max length is 54 characters. + // Prefixes with lengths longer than 37 characters will use a shortened + // UUID that will be more prone to collisions. // +kubebuilder:validation:Optional NamePrefix *string `json:"namePrefix,omitempty" tf:"name_prefix,omitempty"` @@ -1304,6 +1360,9 @@ type InstanceTemplateSchedulingInitParameters struct { // terminated by a user). This defaults to true. AutomaticRestart *bool `json:"automaticRestart,omitempty" tf:"automatic_restart,omitempty"` + // Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. + AvailabilityDomain *float64 `json:"availabilityDomain,omitempty" tf:"availability_domain,omitempty"` + // Describe the type of termination action for SPOT VM. Can be STOP or DELETE. Read more on here InstanceTerminationAction *string `json:"instanceTerminationAction,omitempty" tf:"instance_termination_action,omitempty"` @@ -1482,6 +1541,9 @@ type InstanceTemplateSchedulingObservation struct { // terminated by a user). This defaults to true. AutomaticRestart *bool `json:"automaticRestart,omitempty" tf:"automatic_restart,omitempty"` + // Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. + AvailabilityDomain *float64 `json:"availabilityDomain,omitempty" tf:"availability_domain,omitempty"` + // Describe the type of termination action for SPOT VM. Can be STOP or DELETE. Read more on here InstanceTerminationAction *string `json:"instanceTerminationAction,omitempty" tf:"instance_termination_action,omitempty"` @@ -1547,6 +1609,10 @@ type InstanceTemplateSchedulingParameters struct { // +kubebuilder:validation:Optional AutomaticRestart *bool `json:"automaticRestart,omitempty" tf:"automatic_restart,omitempty"` + // Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. + // +kubebuilder:validation:Optional + AvailabilityDomain *float64 `json:"availabilityDomain,omitempty" tf:"availability_domain,omitempty"` + // Describe the type of termination action for SPOT VM. Can be STOP or DELETE. Read more on here // +kubebuilder:validation:Optional InstanceTerminationAction *string `json:"instanceTerminationAction,omitempty" tf:"instance_termination_action,omitempty"` diff --git a/apis/compute/v1beta2/zz_managedsslcertificate_types.go b/apis/compute/v1beta2/zz_managedsslcertificate_types.go index a5abd6bc6..293e33c10 100755 --- a/apis/compute/v1beta2/zz_managedsslcertificate_types.go +++ b/apis/compute/v1beta2/zz_managedsslcertificate_types.go @@ -37,9 +37,6 @@ type ManagedParameters struct { type ManagedSSLCertificateInitParameters struct { - // The unique identifier for the resource. - CertificateID *float64 `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` - // An optional description of this resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -100,10 +97,6 @@ type ManagedSSLCertificateObservation struct { type ManagedSSLCertificateParameters struct { - // The unique identifier for the resource. - // +kubebuilder:validation:Optional - CertificateID *float64 `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` - // An optional description of this resource. // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` diff --git a/apis/compute/v1beta2/zz_nodetemplate_types.go b/apis/compute/v1beta2/zz_nodetemplate_types.go index bfec8be8f..6187d3427 100755 --- a/apis/compute/v1beta2/zz_nodetemplate_types.go +++ b/apis/compute/v1beta2/zz_nodetemplate_types.go @@ -13,8 +13,87 @@ import ( v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" ) +type AcceleratorsInitParameters struct { + + // The number of the guest accelerator cards exposed to this + // node template. + AcceleratorCount *float64 `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // Full or partial URL of the accelerator type resource to expose + // to this node template. + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` +} + +type AcceleratorsObservation struct { + + // The number of the guest accelerator cards exposed to this + // node template. + AcceleratorCount *float64 `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // Full or partial URL of the accelerator type resource to expose + // to this node template. + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` +} + +type AcceleratorsParameters struct { + + // The number of the guest accelerator cards exposed to this + // node template. + // +kubebuilder:validation:Optional + AcceleratorCount *float64 `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // Full or partial URL of the accelerator type resource to expose + // to this node template. + // +kubebuilder:validation:Optional + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` +} + +type DisksInitParameters struct { + + // Specifies the number of such disks. + DiskCount *float64 `json:"diskCount,omitempty" tf:"disk_count,omitempty"` + + // Specifies the size of the disk in base-2 GB. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL. + DiskType *string `json:"diskType,omitempty" tf:"disk_type,omitempty"` +} + +type DisksObservation struct { + + // Specifies the number of such disks. + DiskCount *float64 `json:"diskCount,omitempty" tf:"disk_count,omitempty"` + + // Specifies the size of the disk in base-2 GB. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL. + DiskType *string `json:"diskType,omitempty" tf:"disk_type,omitempty"` +} + +type DisksParameters struct { + + // Specifies the number of such disks. + // +kubebuilder:validation:Optional + DiskCount *float64 `json:"diskCount,omitempty" tf:"disk_count,omitempty"` + + // Specifies the size of the disk in base-2 GB. + // +kubebuilder:validation:Optional + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL. + // +kubebuilder:validation:Optional + DiskType *string `json:"diskType,omitempty" tf:"disk_type,omitempty"` +} + type NodeTemplateInitParameters struct { + // List of the type and count of accelerator cards attached to the + // node template + // Structure is documented below. + Accelerators []AcceleratorsInitParameters `json:"accelerators,omitempty" tf:"accelerators,omitempty"` + // CPU overcommit. // Default value is NONE. // Possible values are: ENABLED, NONE. @@ -23,6 +102,11 @@ type NodeTemplateInitParameters struct { // An optional textual description of the resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` + // List of the type, size and count of disks attached to the + // node template + // Structure is documented below. + Disks []DisksInitParameters `json:"disks,omitempty" tf:"disks,omitempty"` + // Labels to use for node affinity, which will be used in // instance scheduling. // +mapType=granular @@ -51,6 +135,11 @@ type NodeTemplateInitParameters struct { type NodeTemplateObservation struct { + // List of the type and count of accelerator cards attached to the + // node template + // Structure is documented below. + Accelerators []AcceleratorsObservation `json:"accelerators,omitempty" tf:"accelerators,omitempty"` + // CPU overcommit. // Default value is NONE. // Possible values are: ENABLED, NONE. @@ -62,6 +151,11 @@ type NodeTemplateObservation struct { // An optional textual description of the resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` + // List of the type, size and count of disks attached to the + // node template + // Structure is documented below. + Disks []DisksObservation `json:"disks,omitempty" tf:"disks,omitempty"` + // an identifier for the resource with format projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` @@ -100,6 +194,12 @@ type NodeTemplateObservation struct { type NodeTemplateParameters struct { + // List of the type and count of accelerator cards attached to the + // node template + // Structure is documented below. + // +kubebuilder:validation:Optional + Accelerators []AcceleratorsParameters `json:"accelerators,omitempty" tf:"accelerators,omitempty"` + // CPU overcommit. // Default value is NONE. // Possible values are: ENABLED, NONE. @@ -110,6 +210,12 @@ type NodeTemplateParameters struct { // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` + // List of the type, size and count of disks attached to the + // node template + // Structure is documented below. + // +kubebuilder:validation:Optional + Disks []DisksParameters `json:"disks,omitempty" tf:"disks,omitempty"` + // Labels to use for node affinity, which will be used in // instance scheduling. // +kubebuilder:validation:Optional diff --git a/apis/compute/v1beta2/zz_regionautoscaler_types.go b/apis/compute/v1beta2/zz_regionautoscaler_types.go index d33c8084e..db73b91bd 100755 --- a/apis/compute/v1beta2/zz_regionautoscaler_types.go +++ b/apis/compute/v1beta2/zz_regionautoscaler_types.go @@ -245,7 +245,7 @@ type AutoscalingPolicyMetricParameters struct { type AutoscalingPolicyScaleInControlInitParameters struct { - // A nested object resource + // A nested object resource. // Structure is documented below. MaxScaledInReplicas *ScaleInControlMaxScaledInReplicasInitParameters `json:"maxScaledInReplicas,omitempty" tf:"max_scaled_in_replicas,omitempty"` @@ -256,7 +256,7 @@ type AutoscalingPolicyScaleInControlInitParameters struct { type AutoscalingPolicyScaleInControlObservation struct { - // A nested object resource + // A nested object resource. // Structure is documented below. MaxScaledInReplicas *ScaleInControlMaxScaledInReplicasObservation `json:"maxScaledInReplicas,omitempty" tf:"max_scaled_in_replicas,omitempty"` @@ -267,7 +267,7 @@ type AutoscalingPolicyScaleInControlObservation struct { type AutoscalingPolicyScaleInControlParameters struct { - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional MaxScaledInReplicas *ScaleInControlMaxScaledInReplicasParameters `json:"maxScaledInReplicas,omitempty" tf:"max_scaled_in_replicas,omitempty"` diff --git a/apis/compute/v1beta2/zz_regionbackendservice_types.go b/apis/compute/v1beta2/zz_regionbackendservice_types.go index a1862f02c..5cae20111 100755 --- a/apis/compute/v1beta2/zz_regionbackendservice_types.go +++ b/apis/compute/v1beta2/zz_regionbackendservice_types.go @@ -15,51 +15,33 @@ import ( type ConsistentHashHTTPCookieInitParameters struct { - // Name of the resource. Provided by the client when the resource is - // created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and match - // the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the - // first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the last - // character, which cannot be a dash. + // Name of the cookie. Name *string `json:"name,omitempty" tf:"name,omitempty"` // Path to set for the cookie. Path *string `json:"path,omitempty" tf:"path,omitempty"` - // The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - // (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + // Lifetime of the cookie. + // Structure is documented below. TTL *HTTPCookieTTLInitParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` } type ConsistentHashHTTPCookieObservation struct { - // Name of the resource. Provided by the client when the resource is - // created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and match - // the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the - // first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the last - // character, which cannot be a dash. + // Name of the cookie. Name *string `json:"name,omitempty" tf:"name,omitempty"` // Path to set for the cookie. Path *string `json:"path,omitempty" tf:"path,omitempty"` - // The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - // (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + // Lifetime of the cookie. + // Structure is documented below. TTL *HTTPCookieTTLObservation `json:"ttl,omitempty" tf:"ttl,omitempty"` } type ConsistentHashHTTPCookieParameters struct { - // Name of the resource. Provided by the client when the resource is - // created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and match - // the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the - // first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the last - // character, which cannot be a dash. + // Name of the cookie. // +kubebuilder:validation:Optional Name *string `json:"name,omitempty" tf:"name,omitempty"` @@ -67,8 +49,8 @@ type ConsistentHashHTTPCookieParameters struct { // +kubebuilder:validation:Optional Path *string `json:"path,omitempty" tf:"path,omitempty"` - // The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - // (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + // Lifetime of the cookie. + // Structure is documented below. // +kubebuilder:validation:Optional TTL *HTTPCookieTTLParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` } @@ -291,8 +273,7 @@ type RegionBackendServiceBackendInitParameters struct { // Specifies the balancing mode for this backend. // See the Backend Services Overview // for an explanation of load balancing modes. - // From version 6.0.0 default value will be UTILIZATION to match default GCP value. - // Default value is CONNECTION. + // Default value is UTILIZATION. // Possible values are: UTILIZATION, RATE, CONNECTION. BalancingMode *string `json:"balancingMode,omitempty" tf:"balancing_mode,omitempty"` @@ -401,8 +382,7 @@ type RegionBackendServiceBackendObservation struct { // Specifies the balancing mode for this backend. // See the Backend Services Overview // for an explanation of load balancing modes. - // From version 6.0.0 default value will be UTILIZATION to match default GCP value. - // Default value is CONNECTION. + // Default value is UTILIZATION. // Possible values are: UTILIZATION, RATE, CONNECTION. BalancingMode *string `json:"balancingMode,omitempty" tf:"balancing_mode,omitempty"` @@ -501,8 +481,7 @@ type RegionBackendServiceBackendParameters struct { // Specifies the balancing mode for this backend. // See the Backend Services Overview // for an explanation of load balancing modes. - // From version 6.0.0 default value will be UTILIZATION to match default GCP value. - // Default value is CONNECTION. + // Default value is UTILIZATION. // Possible values are: UTILIZATION, RATE, CONNECTION. // +kubebuilder:validation:Optional BalancingMode *string `json:"balancingMode,omitempty" tf:"balancing_mode,omitempty"` @@ -1046,30 +1025,40 @@ type RegionBackendServiceConsistentHashParameters struct { type RegionBackendServiceIapInitParameters struct { + // Whether the serving infrastructure will authenticate and authorize all incoming requests. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // OAuth2 Client ID for IAP Oauth2ClientID *string `json:"oauth2ClientId,omitempty" tf:"oauth2_client_id,omitempty"` // OAuth2 Client Secret for IAP // Note: This property is sensitive and will not be displayed in the plan. - Oauth2ClientSecretSecretRef v1.SecretKeySelector `json:"oauth2ClientSecretSecretRef" tf:"-"` + Oauth2ClientSecretSecretRef *v1.SecretKeySelector `json:"oauth2ClientSecretSecretRef,omitempty" tf:"-"` } type RegionBackendServiceIapObservation struct { + // Whether the serving infrastructure will authenticate and authorize all incoming requests. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // OAuth2 Client ID for IAP Oauth2ClientID *string `json:"oauth2ClientId,omitempty" tf:"oauth2_client_id,omitempty"` } type RegionBackendServiceIapParameters struct { + // Whether the serving infrastructure will authenticate and authorize all incoming requests. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + // OAuth2 Client ID for IAP // +kubebuilder:validation:Optional - Oauth2ClientID *string `json:"oauth2ClientId" tf:"oauth2_client_id,omitempty"` + Oauth2ClientID *string `json:"oauth2ClientId,omitempty" tf:"oauth2_client_id,omitempty"` // OAuth2 Client Secret for IAP // Note: This property is sensitive and will not be displayed in the plan. // +kubebuilder:validation:Optional - Oauth2ClientSecretSecretRef v1.SecretKeySelector `json:"oauth2ClientSecretSecretRef" tf:"-"` + Oauth2ClientSecretSecretRef *v1.SecretKeySelector `json:"oauth2ClientSecretSecretRef,omitempty" tf:"-"` } type RegionBackendServiceInitParameters struct { @@ -1097,7 +1086,6 @@ type RegionBackendServiceInitParameters struct { // Time for which instance will be drained (not accept new // connections, but still work to finish started). - // From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value. ConnectionDrainingTimeoutSec *float64 `json:"connectionDrainingTimeoutSec,omitempty" tf:"connection_draining_timeout_sec,omitempty"` // Consistent Hash-based load balancing can be used to provide soft session @@ -1137,6 +1125,10 @@ type RegionBackendServiceInitParameters struct { // +kubebuilder:validation:Optional HealthChecksSelector *v1.Selector `json:"healthChecksSelector,omitempty" tf:"-"` + // Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + // Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + IPAddressSelectionPolicy *string `json:"ipAddressSelectionPolicy,omitempty" tf:"ip_address_selection_policy,omitempty"` + // Settings for enabling Cloud Identity Aware Proxy // Structure is documented below. Iap *RegionBackendServiceIapInitParameters `json:"iap,omitempty" tf:"iap,omitempty"` @@ -1160,8 +1152,6 @@ type RegionBackendServiceInitParameters struct { // Settings controlling eviction of unhealthy hosts from the load balancing pool. // This field is applicable only when the load_balancing_scheme is set // to INTERNAL_MANAGED and the protocol is set to HTTP, HTTPS, or HTTP2. - // From version 6.0. - // Default values are enforce by GCP without providing them. // Structure is documented below. OutlierDetection *RegionBackendServiceOutlierDetectionInitParameters `json:"outlierDetection,omitempty" tf:"outlier_detection,omitempty"` @@ -1183,9 +1173,13 @@ type RegionBackendServiceInitParameters struct { // Type of session affinity to use. The default is NONE. Session affinity is // not applicable if the protocol is UDP. - // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION. + // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION, STRONG_COOKIE_AFFINITY. SessionAffinity *string `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` + // Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + // Structure is documented below. + StrongSessionAffinityCookie *RegionBackendServiceStrongSessionAffinityCookieInitParameters `json:"strongSessionAffinityCookie,omitempty" tf:"strong_session_affinity_cookie,omitempty"` + // The backend service timeout has a different meaning depending on the type of load balancer. // For more information see, Backend service settings. // The default is 30 seconds. @@ -1256,7 +1250,6 @@ type RegionBackendServiceObservation struct { // Time for which instance will be drained (not accept new // connections, but still work to finish started). - // From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value. ConnectionDrainingTimeoutSec *float64 `json:"connectionDrainingTimeoutSec,omitempty" tf:"connection_draining_timeout_sec,omitempty"` // Consistent Hash-based load balancing can be used to provide soft session @@ -1299,6 +1292,10 @@ type RegionBackendServiceObservation struct { // an identifier for the resource with format projects/{{project}}/regions/{{region}}/backendServices/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + // Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + IPAddressSelectionPolicy *string `json:"ipAddressSelectionPolicy,omitempty" tf:"ip_address_selection_policy,omitempty"` + // Settings for enabling Cloud Identity Aware Proxy // Structure is documented below. Iap *RegionBackendServiceIapObservation `json:"iap,omitempty" tf:"iap,omitempty"` @@ -1322,8 +1319,6 @@ type RegionBackendServiceObservation struct { // Settings controlling eviction of unhealthy hosts from the load balancing pool. // This field is applicable only when the load_balancing_scheme is set // to INTERNAL_MANAGED and the protocol is set to HTTP, HTTPS, or HTTP2. - // From version 6.0. - // Default values are enforce by GCP without providing them. // Structure is documented below. OutlierDetection *RegionBackendServiceOutlierDetectionObservation `json:"outlierDetection,omitempty" tf:"outlier_detection,omitempty"` @@ -1352,9 +1347,13 @@ type RegionBackendServiceObservation struct { // Type of session affinity to use. The default is NONE. Session affinity is // not applicable if the protocol is UDP. - // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION. + // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION, STRONG_COOKIE_AFFINITY. SessionAffinity *string `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` + // Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + // Structure is documented below. + StrongSessionAffinityCookie *RegionBackendServiceStrongSessionAffinityCookieObservation `json:"strongSessionAffinityCookie,omitempty" tf:"strong_session_affinity_cookie,omitempty"` + // The backend service timeout has a different meaning depending on the type of load balancer. // For more information see, Backend service settings. // The default is 30 seconds. @@ -1594,7 +1593,6 @@ type RegionBackendServiceParameters struct { // Time for which instance will be drained (not accept new // connections, but still work to finish started). - // From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value. // +kubebuilder:validation:Optional ConnectionDrainingTimeoutSec *float64 `json:"connectionDrainingTimeoutSec,omitempty" tf:"connection_draining_timeout_sec,omitempty"` @@ -1640,6 +1638,11 @@ type RegionBackendServiceParameters struct { // +kubebuilder:validation:Optional HealthChecksSelector *v1.Selector `json:"healthChecksSelector,omitempty" tf:"-"` + // Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + // Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + // +kubebuilder:validation:Optional + IPAddressSelectionPolicy *string `json:"ipAddressSelectionPolicy,omitempty" tf:"ip_address_selection_policy,omitempty"` + // Settings for enabling Cloud Identity Aware Proxy // Structure is documented below. // +kubebuilder:validation:Optional @@ -1668,8 +1671,6 @@ type RegionBackendServiceParameters struct { // Settings controlling eviction of unhealthy hosts from the load balancing pool. // This field is applicable only when the load_balancing_scheme is set // to INTERNAL_MANAGED and the protocol is set to HTTP, HTTPS, or HTTP2. - // From version 6.0. - // Default values are enforce by GCP without providing them. // Structure is documented below. // +kubebuilder:validation:Optional OutlierDetection *RegionBackendServiceOutlierDetectionParameters `json:"outlierDetection,omitempty" tf:"outlier_detection,omitempty"` @@ -1700,10 +1701,15 @@ type RegionBackendServiceParameters struct { // Type of session affinity to use. The default is NONE. Session affinity is // not applicable if the protocol is UDP. - // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION. + // Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION, STRONG_COOKIE_AFFINITY. // +kubebuilder:validation:Optional SessionAffinity *string `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` + // Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + // Structure is documented below. + // +kubebuilder:validation:Optional + StrongSessionAffinityCookie *RegionBackendServiceStrongSessionAffinityCookieParameters `json:"strongSessionAffinityCookie,omitempty" tf:"strong_session_affinity_cookie,omitempty"` + // The backend service timeout has a different meaning depending on the type of load balancer. // For more information see, Backend service settings. // The default is 30 seconds. @@ -1712,6 +1718,89 @@ type RegionBackendServiceParameters struct { TimeoutSec *float64 `json:"timeoutSec,omitempty" tf:"timeout_sec,omitempty"` } +type RegionBackendServiceStrongSessionAffinityCookieInitParameters struct { + + // Name of the cookie. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Path to set for the cookie. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Lifetime of the cookie. + // Structure is documented below. + TTL *RegionBackendServiceStrongSessionAffinityCookieTTLInitParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type RegionBackendServiceStrongSessionAffinityCookieObservation struct { + + // Name of the cookie. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Path to set for the cookie. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Lifetime of the cookie. + // Structure is documented below. + TTL *RegionBackendServiceStrongSessionAffinityCookieTTLObservation `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type RegionBackendServiceStrongSessionAffinityCookieParameters struct { + + // Name of the cookie. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Path to set for the cookie. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Lifetime of the cookie. + // Structure is documented below. + // +kubebuilder:validation:Optional + TTL *RegionBackendServiceStrongSessionAffinityCookieTTLParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type RegionBackendServiceStrongSessionAffinityCookieTTLInitParameters struct { + + // Span of time that's a fraction of a second at nanosecond + // resolution. Durations less than one second are represented + // with a 0 seconds field and a positive nanos field. Must + // be from 0 to 999,999,999 inclusive. + Nanos *float64 `json:"nanos,omitempty" tf:"nanos,omitempty"` + + // Span of time at a resolution of a second. + // Must be from 0 to 315,576,000,000 inclusive. + Seconds *float64 `json:"seconds,omitempty" tf:"seconds,omitempty"` +} + +type RegionBackendServiceStrongSessionAffinityCookieTTLObservation struct { + + // Span of time that's a fraction of a second at nanosecond + // resolution. Durations less than one second are represented + // with a 0 seconds field and a positive nanos field. Must + // be from 0 to 999,999,999 inclusive. + Nanos *float64 `json:"nanos,omitempty" tf:"nanos,omitempty"` + + // Span of time at a resolution of a second. + // Must be from 0 to 315,576,000,000 inclusive. + Seconds *float64 `json:"seconds,omitempty" tf:"seconds,omitempty"` +} + +type RegionBackendServiceStrongSessionAffinityCookieTTLParameters struct { + + // Span of time that's a fraction of a second at nanosecond + // resolution. Durations less than one second are represented + // with a 0 seconds field and a positive nanos field. Must + // be from 0 to 999,999,999 inclusive. + // +kubebuilder:validation:Optional + Nanos *float64 `json:"nanos,omitempty" tf:"nanos,omitempty"` + + // Span of time at a resolution of a second. + // Must be from 0 to 315,576,000,000 inclusive. + // +kubebuilder:validation:Optional + Seconds *float64 `json:"seconds" tf:"seconds,omitempty"` +} + // RegionBackendServiceSpec defines the desired state of RegionBackendService type RegionBackendServiceSpec struct { v1.ResourceSpec `json:",inline"` diff --git a/apis/compute/v1beta2/zz_regiondisk_types.go b/apis/compute/v1beta2/zz_regiondisk_types.go index c5b9fef44..19f3748e3 100755 --- a/apis/compute/v1beta2/zz_regiondisk_types.go +++ b/apis/compute/v1beta2/zz_regiondisk_types.go @@ -111,7 +111,7 @@ type RegionDiskGuestOsFeaturesParameters struct { type RegionDiskInitParameters struct { - // A nested object resource + // A nested object resource. // Structure is documented below. AsyncPrimaryDisk *RegionDiskAsyncPrimaryDiskInitParameters `json:"asyncPrimaryDisk,omitempty" tf:"async_primary_disk,omitempty"` @@ -198,7 +198,7 @@ type RegionDiskInitParameters struct { type RegionDiskObservation struct { - // A nested object resource + // A nested object resource. // Structure is documented below. AsyncPrimaryDisk *RegionDiskAsyncPrimaryDiskObservation `json:"asyncPrimaryDisk,omitempty" tf:"async_primary_disk,omitempty"` @@ -323,7 +323,7 @@ type RegionDiskObservation struct { type RegionDiskParameters struct { - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional AsyncPrimaryDisk *RegionDiskAsyncPrimaryDiskParameters `json:"asyncPrimaryDisk,omitempty" tf:"async_primary_disk,omitempty"` diff --git a/apis/compute/v1beta2/zz_regionhealthcheck_types.go b/apis/compute/v1beta2/zz_regionhealthcheck_types.go index def7ca2c1..05c21e7b3 100755 --- a/apis/compute/v1beta2/zz_regionhealthcheck_types.go +++ b/apis/compute/v1beta2/zz_regionhealthcheck_types.go @@ -423,15 +423,15 @@ type RegionHealthCheckInitParameters struct { // you create the resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. GRPCHealthCheck *RegionHealthCheckGRPCHealthCheckInitParameters `json:"grpcHealthCheck,omitempty" tf:"grpc_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. HTTPHealthCheck *RegionHealthCheckHTTPHealthCheckInitParameters `json:"httpHealthCheck,omitempty" tf:"http_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. HTTPSHealthCheck *RegionHealthCheckHTTPSHealthCheckInitParameters `json:"httpsHealthCheck,omitempty" tf:"https_health_check,omitempty"` @@ -439,7 +439,7 @@ type RegionHealthCheckInitParameters struct { // consecutive successes. The default value is 2. HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. Http2HealthCheck *RegionHealthCheckHttp2HealthCheckInitParameters `json:"http2HealthCheck,omitempty" tf:"http2_health_check,omitempty"` @@ -451,11 +451,11 @@ type RegionHealthCheckInitParameters struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. SSLHealthCheck *RegionHealthCheckSSLHealthCheckInitParameters `json:"sslHealthCheck,omitempty" tf:"ssl_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. TCPHealthCheck *RegionHealthCheckTCPHealthCheckInitParameters `json:"tcpHealthCheck,omitempty" tf:"tcp_health_check,omitempty"` @@ -504,23 +504,26 @@ type RegionHealthCheckObservation struct { // you create the resource. Description *string `json:"description,omitempty" tf:"description,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. GRPCHealthCheck *RegionHealthCheckGRPCHealthCheckObservation `json:"grpcHealthCheck,omitempty" tf:"grpc_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. HTTPHealthCheck *RegionHealthCheckHTTPHealthCheckObservation `json:"httpHealthCheck,omitempty" tf:"http_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. HTTPSHealthCheck *RegionHealthCheckHTTPSHealthCheckObservation `json:"httpsHealthCheck,omitempty" tf:"https_health_check,omitempty"` + // The unique identifier number for the resource. This identifier is defined by the server. + HealthCheckID *float64 `json:"healthCheckId,omitempty" tf:"health_check_id,omitempty"` + // A so-far unhealthy instance will be marked healthy after this many // consecutive successes. The default value is 2. HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. Http2HealthCheck *RegionHealthCheckHttp2HealthCheckObservation `json:"http2HealthCheck,omitempty" tf:"http2_health_check,omitempty"` @@ -539,14 +542,14 @@ type RegionHealthCheckObservation struct { // If it is not provided, the provider region is used. Region *string `json:"region,omitempty" tf:"region,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. SSLHealthCheck *RegionHealthCheckSSLHealthCheckObservation `json:"sslHealthCheck,omitempty" tf:"ssl_health_check,omitempty"` // The URI of the created resource. SelfLink *string `json:"selfLink,omitempty" tf:"self_link,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. TCPHealthCheck *RegionHealthCheckTCPHealthCheckObservation `json:"tcpHealthCheck,omitempty" tf:"tcp_health_check,omitempty"` @@ -575,17 +578,17 @@ type RegionHealthCheckParameters struct { // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional GRPCHealthCheck *RegionHealthCheckGRPCHealthCheckParameters `json:"grpcHealthCheck,omitempty" tf:"grpc_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional HTTPHealthCheck *RegionHealthCheckHTTPHealthCheckParameters `json:"httpHealthCheck,omitempty" tf:"http_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional HTTPSHealthCheck *RegionHealthCheckHTTPSHealthCheckParameters `json:"httpsHealthCheck,omitempty" tf:"https_health_check,omitempty"` @@ -595,7 +598,7 @@ type RegionHealthCheckParameters struct { // +kubebuilder:validation:Optional HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional Http2HealthCheck *RegionHealthCheckHttp2HealthCheckParameters `json:"http2HealthCheck,omitempty" tf:"http2_health_check,omitempty"` @@ -615,12 +618,12 @@ type RegionHealthCheckParameters struct { // +kubebuilder:validation:Required Region *string `json:"region" tf:"region,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional SSLHealthCheck *RegionHealthCheckSSLHealthCheckParameters `json:"sslHealthCheck,omitempty" tf:"ssl_health_check,omitempty"` - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional TCPHealthCheck *RegionHealthCheckTCPHealthCheckParameters `json:"tcpHealthCheck,omitempty" tf:"tcp_health_check,omitempty"` diff --git a/apis/compute/v1beta2/zz_regioninstancegroupmanager_types.go b/apis/compute/v1beta2/zz_regioninstancegroupmanager_types.go index 530be83d7..34a9e8977 100755 --- a/apis/compute/v1beta2/zz_regioninstancegroupmanager_types.go +++ b/apis/compute/v1beta2/zz_regioninstancegroupmanager_types.go @@ -13,6 +13,67 @@ import ( v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" ) +type InstanceFlexibilityPolicyInitParameters struct { + + // , Named instance selections configuring properties that the group will use when creating new VMs. One can specify multiple instance selection to allow managed instance group to create VMs from multiple types of machines, based on preference and availability. Structure is documented below. + InstanceSelections []InstanceSelectionsInitParameters `json:"instanceSelections,omitempty" tf:"instance_selections,omitempty"` +} + +type InstanceFlexibilityPolicyObservation struct { + + // , Named instance selections configuring properties that the group will use when creating new VMs. One can specify multiple instance selection to allow managed instance group to create VMs from multiple types of machines, based on preference and availability. Structure is documented below. + InstanceSelections []InstanceSelectionsObservation `json:"instanceSelections,omitempty" tf:"instance_selections,omitempty"` +} + +type InstanceFlexibilityPolicyParameters struct { + + // , Named instance selections configuring properties that the group will use when creating new VMs. One can specify multiple instance selection to allow managed instance group to create VMs from multiple types of machines, based on preference and availability. Structure is documented below. + // +kubebuilder:validation:Optional + InstanceSelections []InstanceSelectionsParameters `json:"instanceSelections,omitempty" tf:"instance_selections,omitempty"` +} + +type InstanceSelectionsInitParameters struct { + + // , A list of full machine-type names, e.g. "n1-standard-16". + // +listType=set + MachineTypes []*string `json:"machineTypes,omitempty" tf:"machine_types,omitempty"` + + // , Name of the instance selection, e.g. instance_selection_with_n1_machines_types. Instance selection names must be unique within the flexibility policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // , Preference of this instance selection. Lower number means higher preference. Managed instance group will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference. + Rank *float64 `json:"rank,omitempty" tf:"rank,omitempty"` +} + +type InstanceSelectionsObservation struct { + + // , A list of full machine-type names, e.g. "n1-standard-16". + // +listType=set + MachineTypes []*string `json:"machineTypes,omitempty" tf:"machine_types,omitempty"` + + // , Name of the instance selection, e.g. instance_selection_with_n1_machines_types. Instance selection names must be unique within the flexibility policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // , Preference of this instance selection. Lower number means higher preference. Managed instance group will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference. + Rank *float64 `json:"rank,omitempty" tf:"rank,omitempty"` +} + +type InstanceSelectionsParameters struct { + + // , A list of full machine-type names, e.g. "n1-standard-16". + // +kubebuilder:validation:Optional + // +listType=set + MachineTypes []*string `json:"machineTypes" tf:"machine_types,omitempty"` + + // , Name of the instance selection, e.g. instance_selection_with_n1_machines_types. Instance selection names must be unique within the flexibility policy. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // , Preference of this instance selection. Lower number means higher preference. Managed instance group will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference. + // +kubebuilder:validation:Optional + Rank *float64 `json:"rank,omitempty" tf:"rank,omitempty"` +} + type RegionInstanceGroupManagerAllInstancesConfigInitParameters struct { // , The label key-value pairs that you want to patch onto the instance. @@ -131,6 +192,9 @@ type RegionInstanceGroupManagerInitParameters struct { // +listType=set DistributionPolicyZones []*string `json:"distributionPolicyZones,omitempty" tf:"distribution_policy_zones,omitempty"` + // The flexibility policy for managed instance group. Instance flexibility allows managed instance group to create VMs from multiple types of machines. Instance flexibility configuration on managed instance group overrides instance template configuration. Structure is documented below. + InstanceFlexibilityPolicy *InstanceFlexibilityPolicyInitParameters `json:"instanceFlexibilityPolicy,omitempty" tf:"instance_flexibility_policy,omitempty"` + InstanceLifecyclePolicy *RegionInstanceGroupManagerInstanceLifecyclePolicyInitParameters `json:"instanceLifecyclePolicy,omitempty" tf:"instance_lifecycle_policy,omitempty"` // Pagination behavior of the listManagedInstances API @@ -158,6 +222,9 @@ type RegionInstanceGroupManagerInitParameters struct { // The region where the managed instance group resides. If not provided, the provider region is used. Region *string `json:"region,omitempty" tf:"region,omitempty"` + // The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the official documentation. + StandbyPolicy *RegionInstanceGroupManagerStandbyPolicyInitParameters `json:"standbyPolicy,omitempty" tf:"standby_policy,omitempty"` + // Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the official documentation. Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the update_policy. StatefulDisk []RegionInstanceGroupManagerStatefulDiskInitParameters `json:"statefulDisk,omitempty" tf:"stateful_disk,omitempty"` @@ -189,6 +256,12 @@ type RegionInstanceGroupManagerInitParameters struct { // lifecycle. Defaults to 0. TargetSize *float64 `json:"targetSize,omitempty" tf:"target_size,omitempty"` + // The target number of stopped instances for this managed instance group. + TargetStoppedSize *float64 `json:"targetStoppedSize,omitempty" tf:"target_stopped_size,omitempty"` + + // The target number of suspended instances for this managed instance group. + TargetSuspendedSize *float64 `json:"targetSuspendedSize,omitempty" tf:"target_suspended_size,omitempty"` + // The update policy for this managed instance group. Structure is documented below. For more information, see the official documentation and API UpdatePolicy *RegionInstanceGroupManagerUpdatePolicyInitParameters `json:"updatePolicy,omitempty" tf:"update_policy,omitempty"` @@ -306,9 +379,15 @@ type RegionInstanceGroupManagerObservation struct { // an identifier for the resource with format projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` + // The flexibility policy for managed instance group. Instance flexibility allows managed instance group to create VMs from multiple types of machines. Instance flexibility configuration on managed instance group overrides instance template configuration. Structure is documented below. + InstanceFlexibilityPolicy *InstanceFlexibilityPolicyObservation `json:"instanceFlexibilityPolicy,omitempty" tf:"instance_flexibility_policy,omitempty"` + // The full URL of the instance group created by the manager. InstanceGroup *string `json:"instanceGroup,omitempty" tf:"instance_group,omitempty"` + // an identifier for the resource with format projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}} + InstanceGroupManagerID *float64 `json:"instanceGroupManagerId,omitempty" tf:"instance_group_manager_id,omitempty"` + InstanceLifecyclePolicy *RegionInstanceGroupManagerInstanceLifecyclePolicyObservation `json:"instanceLifecyclePolicy,omitempty" tf:"instance_lifecycle_policy,omitempty"` // Pagination behavior of the listManagedInstances API @@ -339,6 +418,9 @@ type RegionInstanceGroupManagerObservation struct { // The URL of the created resource. SelfLink *string `json:"selfLink,omitempty" tf:"self_link,omitempty"` + // The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the official documentation. + StandbyPolicy *RegionInstanceGroupManagerStandbyPolicyObservation `json:"standbyPolicy,omitempty" tf:"standby_policy,omitempty"` + // Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the official documentation. Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the update_policy. StatefulDisk []RegionInstanceGroupManagerStatefulDiskObservation `json:"statefulDisk,omitempty" tf:"stateful_disk,omitempty"` @@ -362,6 +444,12 @@ type RegionInstanceGroupManagerObservation struct { // lifecycle. Defaults to 0. TargetSize *float64 `json:"targetSize,omitempty" tf:"target_size,omitempty"` + // The target number of stopped instances for this managed instance group. + TargetStoppedSize *float64 `json:"targetStoppedSize,omitempty" tf:"target_stopped_size,omitempty"` + + // The target number of suspended instances for this managed instance group. + TargetSuspendedSize *float64 `json:"targetSuspendedSize,omitempty" tf:"target_suspended_size,omitempty"` + // The update policy for this managed instance group. Structure is documented below. For more information, see the official documentation and API UpdatePolicy *RegionInstanceGroupManagerUpdatePolicyObservation `json:"updatePolicy,omitempty" tf:"update_policy,omitempty"` @@ -418,6 +506,10 @@ type RegionInstanceGroupManagerParameters struct { // +listType=set DistributionPolicyZones []*string `json:"distributionPolicyZones,omitempty" tf:"distribution_policy_zones,omitempty"` + // The flexibility policy for managed instance group. Instance flexibility allows managed instance group to create VMs from multiple types of machines. Instance flexibility configuration on managed instance group overrides instance template configuration. Structure is documented below. + // +kubebuilder:validation:Optional + InstanceFlexibilityPolicy *InstanceFlexibilityPolicyParameters `json:"instanceFlexibilityPolicy,omitempty" tf:"instance_flexibility_policy,omitempty"` + // +kubebuilder:validation:Optional InstanceLifecyclePolicy *RegionInstanceGroupManagerInstanceLifecyclePolicyParameters `json:"instanceLifecyclePolicy,omitempty" tf:"instance_lifecycle_policy,omitempty"` @@ -451,6 +543,10 @@ type RegionInstanceGroupManagerParameters struct { // +kubebuilder:validation:Optional Region *string `json:"region,omitempty" tf:"region,omitempty"` + // The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the official documentation. + // +kubebuilder:validation:Optional + StandbyPolicy *RegionInstanceGroupManagerStandbyPolicyParameters `json:"standbyPolicy,omitempty" tf:"standby_policy,omitempty"` + // Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the official documentation. Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the update_policy. // +kubebuilder:validation:Optional StatefulDisk []RegionInstanceGroupManagerStatefulDiskParameters `json:"statefulDisk,omitempty" tf:"stateful_disk,omitempty"` @@ -487,6 +583,14 @@ type RegionInstanceGroupManagerParameters struct { // +kubebuilder:validation:Optional TargetSize *float64 `json:"targetSize,omitempty" tf:"target_size,omitempty"` + // The target number of stopped instances for this managed instance group. + // +kubebuilder:validation:Optional + TargetStoppedSize *float64 `json:"targetStoppedSize,omitempty" tf:"target_stopped_size,omitempty"` + + // The target number of suspended instances for this managed instance group. + // +kubebuilder:validation:Optional + TargetSuspendedSize *float64 `json:"targetSuspendedSize,omitempty" tf:"target_suspended_size,omitempty"` + // The update policy for this managed instance group. Structure is documented below. For more information, see the official documentation and API // +kubebuilder:validation:Optional UpdatePolicy *RegionInstanceGroupManagerUpdatePolicyParameters `json:"updatePolicy,omitempty" tf:"update_policy,omitempty"` @@ -510,6 +614,35 @@ type RegionInstanceGroupManagerParameters struct { WaitForInstancesStatus *string `json:"waitForInstancesStatus,omitempty" tf:"wait_for_instances_status,omitempty"` } +type RegionInstanceGroupManagerStandbyPolicyInitParameters struct { + + // - Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. + InitialDelaySec *float64 `json:"initialDelaySec,omitempty" tf:"initial_delay_sec,omitempty"` + + // - Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have full control over which VMs are stopped and suspended in the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type RegionInstanceGroupManagerStandbyPolicyObservation struct { + + // - Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. + InitialDelaySec *float64 `json:"initialDelaySec,omitempty" tf:"initial_delay_sec,omitempty"` + + // - Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have full control over which VMs are stopped and suspended in the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type RegionInstanceGroupManagerStandbyPolicyParameters struct { + + // - Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. + // +kubebuilder:validation:Optional + InitialDelaySec *float64 `json:"initialDelaySec,omitempty" tf:"initial_delay_sec,omitempty"` + + // - Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have full control over which VMs are stopped and suspended in the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + type RegionInstanceGroupManagerStatefulDiskInitParameters struct { // , A value that prescribes what should happen to the stateful disk when the VM instance is deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the disk when the VM is deleted, but do not delete the disk. ON_PERMANENT_INSTANCE_DELETION will delete the stateful disk when the VM is permanently deleted from the instance group. The default is NEVER. diff --git a/apis/compute/v1beta2/zz_regionnetworkendpointgroup_types.go b/apis/compute/v1beta2/zz_regionnetworkendpointgroup_types.go index 725f29bd9..01797e401 100755 --- a/apis/compute/v1beta2/zz_regionnetworkendpointgroup_types.go +++ b/apis/compute/v1beta2/zz_regionnetworkendpointgroup_types.go @@ -240,6 +240,34 @@ type CloudRunParameters struct { URLMask *string `json:"urlMask,omitempty" tf:"url_mask,omitempty"` } +type PscDataInitParameters struct { + + // The PSC producer port to use when consumer PSC NEG connects to a producer. If + // this flag isn't specified for a PSC NEG with endpoint type + // private-service-connect, then PSC NEG will be connected to a first port in the + // available PSC producer port range. + ProducerPort *string `json:"producerPort,omitempty" tf:"producer_port,omitempty"` +} + +type PscDataObservation struct { + + // The PSC producer port to use when consumer PSC NEG connects to a producer. If + // this flag isn't specified for a PSC NEG with endpoint type + // private-service-connect, then PSC NEG will be connected to a first port in the + // available PSC producer port range. + ProducerPort *string `json:"producerPort,omitempty" tf:"producer_port,omitempty"` +} + +type PscDataParameters struct { + + // The PSC producer port to use when consumer PSC NEG connects to a producer. If + // this flag isn't specified for a PSC NEG with endpoint type + // private-service-connect, then PSC NEG will be connected to a first port in the + // available PSC producer port range. + // +kubebuilder:validation:Optional + ProducerPort *string `json:"producerPort,omitempty" tf:"producer_port,omitempty"` +} + type RegionNetworkEndpointGroupInitParameters struct { // This field is only used for SERVERLESS NEGs. @@ -285,6 +313,10 @@ type RegionNetworkEndpointGroupInitParameters struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + // This field is only used for PSC NEGs. + // Structure is documented below. + PscData *PscDataInitParameters `json:"pscData,omitempty" tf:"psc_data,omitempty"` + // This field is only used for PSC and INTERNET NEGs. // The target service url used to set up private service connection to // a Google API or a PSC Producer Service Attachment. @@ -353,6 +385,10 @@ type RegionNetworkEndpointGroupObservation struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + // This field is only used for PSC NEGs. + // Structure is documented below. + PscData *PscDataObservation `json:"pscData,omitempty" tf:"psc_data,omitempty"` + // This field is only used for PSC and INTERNET NEGs. // The target service url used to set up private service connection to // a Google API or a PSC Producer Service Attachment. @@ -421,6 +457,11 @@ type RegionNetworkEndpointGroupParameters struct { // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` + // This field is only used for PSC NEGs. + // Structure is documented below. + // +kubebuilder:validation:Optional + PscData *PscDataParameters `json:"pscData,omitempty" tf:"psc_data,omitempty"` + // This field is only used for PSC and INTERNET NEGs. // The target service url used to set up private service connection to // a Google API or a PSC Producer Service Attachment. diff --git a/apis/compute/v1beta2/zz_routernat_types.go b/apis/compute/v1beta2/zz_routernat_types.go index ffed45276..f20247971 100755 --- a/apis/compute/v1beta2/zz_routernat_types.go +++ b/apis/compute/v1beta2/zz_routernat_types.go @@ -117,6 +117,11 @@ type RouterNATInitParameters struct { // Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. IcmpIdleTimeoutSec *float64 `json:"icmpIdleTimeoutSec,omitempty" tf:"icmp_idle_timeout_sec,omitempty"` + // Self-links of NAT IPs to be used as initial value for creation alongside a RouterNatAddress resource. + // Conflicts with natIps and drainNatIps. Only valid if natIpAllocateOption is set to MANUAL_ONLY. + // +listType=set + InitialNATIps []*string `json:"initialNatIps,omitempty" tf:"initial_nat_ips,omitempty"` + // Configuration for logging on NAT // Structure is documented below. LogConfig *RouterNATLogConfigInitParameters `json:"logConfig,omitempty" tf:"log_config,omitempty"` @@ -136,6 +141,9 @@ type RouterNATInitParameters struct { // Self-links of NAT IPs. Only valid if natIpAllocateOption // is set to MANUAL_ONLY. + // If this field is used alongside with a count created list of address resources google_compute_address.foobar.*.self_link, + // the access level resource for the address resource must have a lifecycle block with create_before_destroy = true so + // the number of resources can be increased/decreased without triggering the resourceInUseByAnotherResource error. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.Address // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("self_link",true) // +listType=set @@ -260,6 +268,11 @@ type RouterNATObservation struct { // Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. IcmpIdleTimeoutSec *float64 `json:"icmpIdleTimeoutSec,omitempty" tf:"icmp_idle_timeout_sec,omitempty"` + // Self-links of NAT IPs to be used as initial value for creation alongside a RouterNatAddress resource. + // Conflicts with natIps and drainNatIps. Only valid if natIpAllocateOption is set to MANUAL_ONLY. + // +listType=set + InitialNATIps []*string `json:"initialNatIps,omitempty" tf:"initial_nat_ips,omitempty"` + // Configuration for logging on NAT // Structure is documented below. LogConfig *RouterNATLogConfigObservation `json:"logConfig,omitempty" tf:"log_config,omitempty"` @@ -279,6 +292,9 @@ type RouterNATObservation struct { // Self-links of NAT IPs. Only valid if natIpAllocateOption // is set to MANUAL_ONLY. + // If this field is used alongside with a count created list of address resources google_compute_address.foobar.*.self_link, + // the access level resource for the address resource must have a lifecycle block with create_before_destroy = true so + // the number of resources can be increased/decreased without triggering the resourceInUseByAnotherResource error. // +listType=set NATIps []*string `json:"natIps,omitempty" tf:"nat_ips,omitempty"` @@ -370,6 +386,12 @@ type RouterNATParameters struct { // +kubebuilder:validation:Optional IcmpIdleTimeoutSec *float64 `json:"icmpIdleTimeoutSec,omitempty" tf:"icmp_idle_timeout_sec,omitempty"` + // Self-links of NAT IPs to be used as initial value for creation alongside a RouterNatAddress resource. + // Conflicts with natIps and drainNatIps. Only valid if natIpAllocateOption is set to MANUAL_ONLY. + // +kubebuilder:validation:Optional + // +listType=set + InitialNATIps []*string `json:"initialNatIps,omitempty" tf:"initial_nat_ips,omitempty"` + // Configuration for logging on NAT // Structure is documented below. // +kubebuilder:validation:Optional @@ -393,6 +415,9 @@ type RouterNATParameters struct { // Self-links of NAT IPs. Only valid if natIpAllocateOption // is set to MANUAL_ONLY. + // If this field is used alongside with a count created list of address resources google_compute_address.foobar.*.self_link, + // the access level resource for the address resource must have a lifecycle block with create_before_destroy = true so + // the number of resources can be increased/decreased without triggering the resourceInUseByAnotherResource error. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.Address // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("self_link",true) // +kubebuilder:validation:Optional diff --git a/apis/compute/v1beta2/zz_routerpeer_types.go b/apis/compute/v1beta2/zz_routerpeer_types.go index a524193f1..b48e03944 100755 --- a/apis/compute/v1beta2/zz_routerpeer_types.go +++ b/apis/compute/v1beta2/zz_routerpeer_types.go @@ -109,21 +109,21 @@ type BfdParameters struct { type CustomLearnedIPRangesInitParameters struct { - // The IP range to advertise. The value must be a + // The IP range to learn. The value must be a // CIDR-formatted string. Range *string `json:"range,omitempty" tf:"range,omitempty"` } type CustomLearnedIPRangesObservation struct { - // The IP range to advertise. The value must be a + // The IP range to learn. The value must be a // CIDR-formatted string. Range *string `json:"range,omitempty" tf:"range,omitempty"` } type CustomLearnedIPRangesParameters struct { - // The IP range to advertise. The value must be a + // The IP range to learn. The value must be a // CIDR-formatted string. // +kubebuilder:validation:Optional Range *string `json:"range" tf:"range,omitempty"` @@ -216,8 +216,16 @@ type RouterPeerInitParameters struct { // Structure is documented below. Bfd *BfdInitParameters `json:"bfd,omitempty" tf:"bfd,omitempty"` + // The custom learned route IP address range. Must be a valid CIDR-formatted prefix. + // If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, + // a /32 singular IP address range, and, for IPv6, /128. + // Structure is documented below. CustomLearnedIPRanges []CustomLearnedIPRangesInitParameters `json:"customLearnedIpRanges,omitempty" tf:"custom_learned_ip_ranges,omitempty"` + // The user-defined custom learned route priority for a BGP session. + // This value is applied to all custom learned route ranges for the session. + // You can choose a value from 0 to 65335. If you don't provide a value, + // Google Cloud assigns a priority of 100 to the ranges. CustomLearnedRoutePriority *float64 `json:"customLearnedRoutePriority,omitempty" tf:"custom_learned_route_priority,omitempty"` // The status of the BGP peer connection. If set to false, any active session @@ -353,8 +361,16 @@ type RouterPeerObservation struct { // Structure is documented below. Bfd *BfdObservation `json:"bfd,omitempty" tf:"bfd,omitempty"` + // The custom learned route IP address range. Must be a valid CIDR-formatted prefix. + // If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, + // a /32 singular IP address range, and, for IPv6, /128. + // Structure is documented below. CustomLearnedIPRanges []CustomLearnedIPRangesObservation `json:"customLearnedIpRanges,omitempty" tf:"custom_learned_ip_ranges,omitempty"` + // The user-defined custom learned route priority for a BGP session. + // This value is applied to all custom learned route ranges for the session. + // You can choose a value from 0 to 65335. If you don't provide a value, + // Google Cloud assigns a priority of 100 to the ranges. CustomLearnedRoutePriority *float64 `json:"customLearnedRoutePriority,omitempty" tf:"custom_learned_route_priority,omitempty"` // The status of the BGP peer connection. If set to false, any active session @@ -464,9 +480,17 @@ type RouterPeerParameters struct { // +kubebuilder:validation:Optional Bfd *BfdParameters `json:"bfd,omitempty" tf:"bfd,omitempty"` + // The custom learned route IP address range. Must be a valid CIDR-formatted prefix. + // If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, + // a /32 singular IP address range, and, for IPv6, /128. + // Structure is documented below. // +kubebuilder:validation:Optional CustomLearnedIPRanges []CustomLearnedIPRangesParameters `json:"customLearnedIpRanges,omitempty" tf:"custom_learned_ip_ranges,omitempty"` + // The user-defined custom learned route priority for a BGP session. + // This value is applied to all custom learned route ranges for the session. + // You can choose a value from 0 to 65335. If you don't provide a value, + // Google Cloud assigns a priority of 100 to the ranges. // +kubebuilder:validation:Optional CustomLearnedRoutePriority *float64 `json:"customLearnedRoutePriority,omitempty" tf:"custom_learned_route_priority,omitempty"` diff --git a/apis/compute/v1beta2/zz_securitypolicy_types.go b/apis/compute/v1beta2/zz_securitypolicy_types.go index bb259200a..5b1eeb344 100755 --- a/apis/compute/v1beta2/zz_securitypolicy_types.go +++ b/apis/compute/v1beta2/zz_securitypolicy_types.go @@ -144,6 +144,35 @@ type ConfigParameters struct { SrcIPRanges []*string `json:"srcIpRanges" tf:"src_ip_ranges,omitempty"` } +type EnforceOnKeyConfigsInitParameters struct { + + // Rate limit key name applicable only for the following key types: + EnforceOnKeyName *string `json:"enforceOnKeyName,omitempty" tf:"enforce_on_key_name,omitempty"` + + // Determines the key to enforce the rate_limit_threshold on. If not specified, defaults to ALL. + EnforceOnKeyType *string `json:"enforceOnKeyType,omitempty" tf:"enforce_on_key_type,omitempty"` +} + +type EnforceOnKeyConfigsObservation struct { + + // Rate limit key name applicable only for the following key types: + EnforceOnKeyName *string `json:"enforceOnKeyName,omitempty" tf:"enforce_on_key_name,omitempty"` + + // Determines the key to enforce the rate_limit_threshold on. If not specified, defaults to ALL. + EnforceOnKeyType *string `json:"enforceOnKeyType,omitempty" tf:"enforce_on_key_type,omitempty"` +} + +type EnforceOnKeyConfigsParameters struct { + + // Rate limit key name applicable only for the following key types: + // +kubebuilder:validation:Optional + EnforceOnKeyName *string `json:"enforceOnKeyName,omitempty" tf:"enforce_on_key_name,omitempty"` + + // Determines the key to enforce the rate_limit_threshold on. If not specified, defaults to ALL. + // +kubebuilder:validation:Optional + EnforceOnKeyType *string `json:"enforceOnKeyType,omitempty" tf:"enforce_on_key_type,omitempty"` +} + type ExceedRedirectOptionsInitParameters struct { // Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. @@ -173,6 +202,78 @@ type ExceedRedirectOptionsParameters struct { Type *string `json:"type" tf:"type,omitempty"` } +type ExclusionInitParameters struct { + + // Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation. Structure is documented below. + RequestCookie []RequestCookieInitParameters `json:"requestCookie,omitempty" tf:"request_cookie,omitempty"` + + // Request header whose value will be excluded from inspection during preconfigured WAF evaluation. Structure is documented below. + RequestHeader []RequestHeaderInitParameters `json:"requestHeader,omitempty" tf:"request_header,omitempty"` + + // Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body. Structure is documented below. + RequestQueryParam []RequestQueryParamInitParameters `json:"requestQueryParam,omitempty" tf:"request_query_param,omitempty"` + + // Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded. Structure is documented below. + RequestURI []RequestURIInitParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` + + // A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set. + // +listType=set + TargetRuleIds []*string `json:"targetRuleIds,omitempty" tf:"target_rule_ids,omitempty"` + + // Target WAF rule set to apply the preconfigured WAF exclusion. + TargetRuleSet *string `json:"targetRuleSet,omitempty" tf:"target_rule_set,omitempty"` +} + +type ExclusionObservation struct { + + // Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation. Structure is documented below. + RequestCookie []RequestCookieObservation `json:"requestCookie,omitempty" tf:"request_cookie,omitempty"` + + // Request header whose value will be excluded from inspection during preconfigured WAF evaluation. Structure is documented below. + RequestHeader []RequestHeaderObservation `json:"requestHeader,omitempty" tf:"request_header,omitempty"` + + // Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body. Structure is documented below. + RequestQueryParam []RequestQueryParamObservation `json:"requestQueryParam,omitempty" tf:"request_query_param,omitempty"` + + // Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded. Structure is documented below. + RequestURI []RequestURIObservation `json:"requestUri,omitempty" tf:"request_uri,omitempty"` + + // A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set. + // +listType=set + TargetRuleIds []*string `json:"targetRuleIds,omitempty" tf:"target_rule_ids,omitempty"` + + // Target WAF rule set to apply the preconfigured WAF exclusion. + TargetRuleSet *string `json:"targetRuleSet,omitempty" tf:"target_rule_set,omitempty"` +} + +type ExclusionParameters struct { + + // Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation. Structure is documented below. + // +kubebuilder:validation:Optional + RequestCookie []RequestCookieParameters `json:"requestCookie,omitempty" tf:"request_cookie,omitempty"` + + // Request header whose value will be excluded from inspection during preconfigured WAF evaluation. Structure is documented below. + // +kubebuilder:validation:Optional + RequestHeader []RequestHeaderParameters `json:"requestHeader,omitempty" tf:"request_header,omitempty"` + + // Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body. Structure is documented below. + // +kubebuilder:validation:Optional + RequestQueryParam []RequestQueryParamParameters `json:"requestQueryParam,omitempty" tf:"request_query_param,omitempty"` + + // Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded. Structure is documented below. + // +kubebuilder:validation:Optional + RequestURI []RequestURIParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` + + // A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set. + // +kubebuilder:validation:Optional + // +listType=set + TargetRuleIds []*string `json:"targetRuleIds,omitempty" tf:"target_rule_ids,omitempty"` + + // Target WAF rule set to apply the preconfigured WAF exclusion. + // +kubebuilder:validation:Optional + TargetRuleSet *string `json:"targetRuleSet" tf:"target_rule_set,omitempty"` +} + type ExprInitParameters struct { // Textual representation of an expression in Common Expression Language syntax. @@ -255,6 +356,9 @@ type Layer7DdosDefenseConfigInitParameters struct { // Rule visibility can be one of the following: RuleVisibility *string `json:"ruleVisibility,omitempty" tf:"rule_visibility,omitempty"` + + // Configuration options for layer7 adaptive protection for various customizable thresholds. Structure is documented below. + ThresholdConfigs []ThresholdConfigsInitParameters `json:"thresholdConfigs,omitempty" tf:"threshold_configs,omitempty"` } type Layer7DdosDefenseConfigObservation struct { @@ -264,6 +368,9 @@ type Layer7DdosDefenseConfigObservation struct { // Rule visibility can be one of the following: RuleVisibility *string `json:"ruleVisibility,omitempty" tf:"rule_visibility,omitempty"` + + // Configuration options for layer7 adaptive protection for various customizable thresholds. Structure is documented below. + ThresholdConfigs []ThresholdConfigsObservation `json:"thresholdConfigs,omitempty" tf:"threshold_configs,omitempty"` } type Layer7DdosDefenseConfigParameters struct { @@ -275,6 +382,29 @@ type Layer7DdosDefenseConfigParameters struct { // Rule visibility can be one of the following: // +kubebuilder:validation:Optional RuleVisibility *string `json:"ruleVisibility,omitempty" tf:"rule_visibility,omitempty"` + + // Configuration options for layer7 adaptive protection for various customizable thresholds. Structure is documented below. + // +kubebuilder:validation:Optional + ThresholdConfigs []ThresholdConfigsParameters `json:"thresholdConfigs,omitempty" tf:"threshold_configs,omitempty"` +} + +type PreconfiguredWafConfigInitParameters struct { + + // An exclusion to apply during preconfigured WAF evaluation. Structure is documented below. + Exclusion []ExclusionInitParameters `json:"exclusion,omitempty" tf:"exclusion,omitempty"` +} + +type PreconfiguredWafConfigObservation struct { + + // An exclusion to apply during preconfigured WAF evaluation. Structure is documented below. + Exclusion []ExclusionObservation `json:"exclusion,omitempty" tf:"exclusion,omitempty"` +} + +type PreconfiguredWafConfigParameters struct { + + // An exclusion to apply during preconfigured WAF evaluation. Structure is documented below. + // +kubebuilder:validation:Optional + Exclusion []ExclusionParameters `json:"exclusion,omitempty" tf:"exclusion,omitempty"` } type RateLimitOptionsInitParameters struct { @@ -294,6 +424,9 @@ type RateLimitOptionsInitParameters struct { // Determines the key to enforce the rate_limit_threshold on. If not specified, defaults to ALL. EnforceOnKey *string `json:"enforceOnKey,omitempty" tf:"enforce_on_key,omitempty"` + // If specified, any combination of values of enforce_on_key_type/enforce_on_key_name is treated as the key on which rate limit threshold/action is enforced. You can specify up to 3 enforce_on_key_configs. If enforce_on_key_configs is specified, enforce_on_key must be set to an empty string. Structure is documented below. + EnforceOnKeyConfigs []EnforceOnKeyConfigsInitParameters `json:"enforceOnKeyConfigs,omitempty" tf:"enforce_on_key_configs,omitempty"` + // Rate limit key name applicable only for the following key types: EnforceOnKeyName *string `json:"enforceOnKeyName,omitempty" tf:"enforce_on_key_name,omitempty"` @@ -325,6 +458,9 @@ type RateLimitOptionsObservation struct { // Determines the key to enforce the rate_limit_threshold on. If not specified, defaults to ALL. EnforceOnKey *string `json:"enforceOnKey,omitempty" tf:"enforce_on_key,omitempty"` + // If specified, any combination of values of enforce_on_key_type/enforce_on_key_name is treated as the key on which rate limit threshold/action is enforced. You can specify up to 3 enforce_on_key_configs. If enforce_on_key_configs is specified, enforce_on_key must be set to an empty string. Structure is documented below. + EnforceOnKeyConfigs []EnforceOnKeyConfigsObservation `json:"enforceOnKeyConfigs,omitempty" tf:"enforce_on_key_configs,omitempty"` + // Rate limit key name applicable only for the following key types: EnforceOnKeyName *string `json:"enforceOnKeyName,omitempty" tf:"enforce_on_key_name,omitempty"` @@ -360,6 +496,10 @@ type RateLimitOptionsParameters struct { // +kubebuilder:validation:Optional EnforceOnKey *string `json:"enforceOnKey,omitempty" tf:"enforce_on_key,omitempty"` + // If specified, any combination of values of enforce_on_key_type/enforce_on_key_name is treated as the key on which rate limit threshold/action is enforced. You can specify up to 3 enforce_on_key_configs. If enforce_on_key_configs is specified, enforce_on_key must be set to an empty string. Structure is documented below. + // +kubebuilder:validation:Optional + EnforceOnKeyConfigs []EnforceOnKeyConfigsParameters `json:"enforceOnKeyConfigs,omitempty" tf:"enforce_on_key_configs,omitempty"` + // Rate limit key name applicable only for the following key types: // +kubebuilder:validation:Optional EnforceOnKeyName *string `json:"enforceOnKeyName,omitempty" tf:"enforce_on_key_name,omitempty"` @@ -484,6 +624,64 @@ type RedirectOptionsParameters struct { Type *string `json:"type" tf:"type,omitempty"` } +type RequestCookieInitParameters struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestCookieObservation struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestCookieParameters struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestHeaderInitParameters struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestHeaderObservation struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestHeaderParameters struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + type RequestHeadersToAddsInitParameters struct { // The name of the header to set. @@ -513,6 +711,64 @@ type RequestHeadersToAddsParameters struct { HeaderValue *string `json:"headerValue,omitempty" tf:"header_value,omitempty"` } +type RequestQueryParamInitParameters struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestQueryParamObservation struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestQueryParamParameters struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestURIInitParameters struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestURIObservation struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestURIParameters struct { + + // You can specify an exact match or a partial match by using a field operator and a field value. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + type RuleHeaderActionInitParameters struct { // The list of request headers to add or overwrite if they're already present. Structure is documented below. @@ -547,6 +803,9 @@ type RuleInitParameters struct { // If it evaluates to true, the corresponding action is enforced. Structure is documented below. Match *RuleMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + // Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. Structure is documented below. + PreconfiguredWafConfig *PreconfiguredWafConfigInitParameters `json:"preconfiguredWafConfig,omitempty" tf:"preconfigured_waf_config,omitempty"` + // When set to true, the action specified above is not enforced. // Stackdriver logs for requests that trigger a preview action are annotated as such. Preview *bool `json:"preview,omitempty" tf:"preview,omitempty"` @@ -644,6 +903,9 @@ type RuleObservation struct { // If it evaluates to true, the corresponding action is enforced. Structure is documented below. Match *RuleMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + // Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. Structure is documented below. + PreconfiguredWafConfig *PreconfiguredWafConfigObservation `json:"preconfiguredWafConfig,omitempty" tf:"preconfigured_waf_config,omitempty"` + // When set to true, the action specified above is not enforced. // Stackdriver logs for requests that trigger a preview action are annotated as such. Preview *bool `json:"preview,omitempty" tf:"preview,omitempty"` @@ -678,6 +940,10 @@ type RuleParameters struct { // +kubebuilder:validation:Optional Match *RuleMatchParameters `json:"match" tf:"match,omitempty"` + // Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. Structure is documented below. + // +kubebuilder:validation:Optional + PreconfiguredWafConfig *PreconfiguredWafConfigParameters `json:"preconfiguredWafConfig,omitempty" tf:"preconfigured_waf_config,omitempty"` + // When set to true, the action specified above is not enforced. // Stackdriver logs for requests that trigger a preview action are annotated as such. // +kubebuilder:validation:Optional @@ -797,6 +1063,144 @@ type SecurityPolicyParameters struct { Type *string `json:"type,omitempty" tf:"type,omitempty"` } +type ThresholdConfigsInitParameters struct { + + // Confidence threshold above which Adaptive Protection's auto-deploy takes actions. + AutoDeployConfidenceThreshold *float64 `json:"autoDeployConfidenceThreshold,omitempty" tf:"auto_deploy_confidence_threshold,omitempty"` + + // Duration over which Adaptive Protection's auto-deployed actions last. + AutoDeployExpirationSec *float64 `json:"autoDeployExpirationSec,omitempty" tf:"auto_deploy_expiration_sec,omitempty"` + + // Impacted baseline threshold below which Adaptive Protection's auto-deploy takes actions. + AutoDeployImpactedBaselineThreshold *float64 `json:"autoDeployImpactedBaselineThreshold,omitempty" tf:"auto_deploy_impacted_baseline_threshold,omitempty"` + + // Load threshold above which Adaptive Protection automatically deploy threshold based on the backend load threshold and detect a new rule during an alerted attack. + AutoDeployLoadThreshold *float64 `json:"autoDeployLoadThreshold,omitempty" tf:"auto_deploy_load_threshold,omitempty"` + + // Detection threshold based on absolute QPS. + DetectionAbsoluteQPS *float64 `json:"detectionAbsoluteQps,omitempty" tf:"detection_absolute_qps,omitempty"` + + // Detection threshold based on the backend service's load. + DetectionLoadThreshold *float64 `json:"detectionLoadThreshold,omitempty" tf:"detection_load_threshold,omitempty"` + + // Detection threshold based on QPS relative to the average of baseline traffic. + DetectionRelativeToBaselineQPS *float64 `json:"detectionRelativeToBaselineQps,omitempty" tf:"detection_relative_to_baseline_qps,omitempty"` + + // The name of config. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration options for enabling Adaptive Protection to work on the specified service granularity. Structure is documented below. + TrafficGranularityConfigs []TrafficGranularityConfigsInitParameters `json:"trafficGranularityConfigs,omitempty" tf:"traffic_granularity_configs,omitempty"` +} + +type ThresholdConfigsObservation struct { + + // Confidence threshold above which Adaptive Protection's auto-deploy takes actions. + AutoDeployConfidenceThreshold *float64 `json:"autoDeployConfidenceThreshold,omitempty" tf:"auto_deploy_confidence_threshold,omitempty"` + + // Duration over which Adaptive Protection's auto-deployed actions last. + AutoDeployExpirationSec *float64 `json:"autoDeployExpirationSec,omitempty" tf:"auto_deploy_expiration_sec,omitempty"` + + // Impacted baseline threshold below which Adaptive Protection's auto-deploy takes actions. + AutoDeployImpactedBaselineThreshold *float64 `json:"autoDeployImpactedBaselineThreshold,omitempty" tf:"auto_deploy_impacted_baseline_threshold,omitempty"` + + // Load threshold above which Adaptive Protection automatically deploy threshold based on the backend load threshold and detect a new rule during an alerted attack. + AutoDeployLoadThreshold *float64 `json:"autoDeployLoadThreshold,omitempty" tf:"auto_deploy_load_threshold,omitempty"` + + // Detection threshold based on absolute QPS. + DetectionAbsoluteQPS *float64 `json:"detectionAbsoluteQps,omitempty" tf:"detection_absolute_qps,omitempty"` + + // Detection threshold based on the backend service's load. + DetectionLoadThreshold *float64 `json:"detectionLoadThreshold,omitempty" tf:"detection_load_threshold,omitempty"` + + // Detection threshold based on QPS relative to the average of baseline traffic. + DetectionRelativeToBaselineQPS *float64 `json:"detectionRelativeToBaselineQps,omitempty" tf:"detection_relative_to_baseline_qps,omitempty"` + + // The name of config. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration options for enabling Adaptive Protection to work on the specified service granularity. Structure is documented below. + TrafficGranularityConfigs []TrafficGranularityConfigsObservation `json:"trafficGranularityConfigs,omitempty" tf:"traffic_granularity_configs,omitempty"` +} + +type ThresholdConfigsParameters struct { + + // Confidence threshold above which Adaptive Protection's auto-deploy takes actions. + // +kubebuilder:validation:Optional + AutoDeployConfidenceThreshold *float64 `json:"autoDeployConfidenceThreshold,omitempty" tf:"auto_deploy_confidence_threshold,omitempty"` + + // Duration over which Adaptive Protection's auto-deployed actions last. + // +kubebuilder:validation:Optional + AutoDeployExpirationSec *float64 `json:"autoDeployExpirationSec,omitempty" tf:"auto_deploy_expiration_sec,omitempty"` + + // Impacted baseline threshold below which Adaptive Protection's auto-deploy takes actions. + // +kubebuilder:validation:Optional + AutoDeployImpactedBaselineThreshold *float64 `json:"autoDeployImpactedBaselineThreshold,omitempty" tf:"auto_deploy_impacted_baseline_threshold,omitempty"` + + // Load threshold above which Adaptive Protection automatically deploy threshold based on the backend load threshold and detect a new rule during an alerted attack. + // +kubebuilder:validation:Optional + AutoDeployLoadThreshold *float64 `json:"autoDeployLoadThreshold,omitempty" tf:"auto_deploy_load_threshold,omitempty"` + + // Detection threshold based on absolute QPS. + // +kubebuilder:validation:Optional + DetectionAbsoluteQPS *float64 `json:"detectionAbsoluteQps,omitempty" tf:"detection_absolute_qps,omitempty"` + + // Detection threshold based on the backend service's load. + // +kubebuilder:validation:Optional + DetectionLoadThreshold *float64 `json:"detectionLoadThreshold,omitempty" tf:"detection_load_threshold,omitempty"` + + // Detection threshold based on QPS relative to the average of baseline traffic. + // +kubebuilder:validation:Optional + DetectionRelativeToBaselineQPS *float64 `json:"detectionRelativeToBaselineQps,omitempty" tf:"detection_relative_to_baseline_qps,omitempty"` + + // The name of config. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Configuration options for enabling Adaptive Protection to work on the specified service granularity. Structure is documented below. + // +kubebuilder:validation:Optional + TrafficGranularityConfigs []TrafficGranularityConfigsParameters `json:"trafficGranularityConfigs,omitempty" tf:"traffic_granularity_configs,omitempty"` +} + +type TrafficGranularityConfigsInitParameters struct { + + // If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if value is empty. + EnableEachUniqueValue *bool `json:"enableEachUniqueValue,omitempty" tf:"enable_each_unique_value,omitempty"` + + // Type of the redirect action. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TrafficGranularityConfigsObservation struct { + + // If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if value is empty. + EnableEachUniqueValue *bool `json:"enableEachUniqueValue,omitempty" tf:"enable_each_unique_value,omitempty"` + + // Type of the redirect action. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TrafficGranularityConfigsParameters struct { + + // If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if value is empty. + // +kubebuilder:validation:Optional + EnableEachUniqueValue *bool `json:"enableEachUniqueValue,omitempty" tf:"enable_each_unique_value,omitempty"` + + // Type of the redirect action. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Requests that match this value constitute a granular traffic unit. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + // SecurityPolicySpec defines the desired state of SecurityPolicy type SecurityPolicySpec struct { v1.ResourceSpec `json:",inline"` diff --git a/apis/compute/v1beta2/zz_subnetwork_types.go b/apis/compute/v1beta2/zz_subnetwork_types.go index 3a80e36d2..8f502f7f6 100755 --- a/apis/compute/v1beta2/zz_subnetwork_types.go +++ b/apis/compute/v1beta2/zz_subnetwork_types.go @@ -19,13 +19,18 @@ type SecondaryIPRangeInitParameters struct { // range. Provide this property when you create the subnetwork. // Ranges must be unique and non-overlapping with all primary and // secondary IP ranges within a network. Only IPv4 is supported. - IPCidrRange *string `json:"ipCidrRange,omitempty" tf:"ip_cidr_range"` + // Field is optional when reserved_internal_range is defined, otherwise required. + IPCidrRange *string `json:"ipCidrRange,omitempty" tf:"ip_cidr_range,omitempty"` // The name associated with this subnetwork secondary range, used // when adding an alias IP range to a VM instance. The name must // be 1-63 characters long, and comply with RFC1035. The name // must be unique within the subnetwork. - RangeName *string `json:"rangeName,omitempty" tf:"range_name"` + RangeName *string `json:"rangeName,omitempty" tf:"range_name,omitempty"` + + // The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + // E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} + ReservedInternalRange *string `json:"reservedInternalRange,omitempty" tf:"reserved_internal_range,omitempty"` } type SecondaryIPRangeObservation struct { @@ -34,6 +39,7 @@ type SecondaryIPRangeObservation struct { // range. Provide this property when you create the subnetwork. // Ranges must be unique and non-overlapping with all primary and // secondary IP ranges within a network. Only IPv4 is supported. + // Field is optional when reserved_internal_range is defined, otherwise required. IPCidrRange *string `json:"ipCidrRange,omitempty" tf:"ip_cidr_range,omitempty"` // The name associated with this subnetwork secondary range, used @@ -41,6 +47,10 @@ type SecondaryIPRangeObservation struct { // be 1-63 characters long, and comply with RFC1035. The name // must be unique within the subnetwork. RangeName *string `json:"rangeName,omitempty" tf:"range_name,omitempty"` + + // The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + // E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} + ReservedInternalRange *string `json:"reservedInternalRange,omitempty" tf:"reserved_internal_range,omitempty"` } type SecondaryIPRangeParameters struct { @@ -49,15 +59,21 @@ type SecondaryIPRangeParameters struct { // range. Provide this property when you create the subnetwork. // Ranges must be unique and non-overlapping with all primary and // secondary IP ranges within a network. Only IPv4 is supported. + // Field is optional when reserved_internal_range is defined, otherwise required. // +kubebuilder:validation:Optional - IPCidrRange *string `json:"ipCidrRange,omitempty" tf:"ip_cidr_range"` + IPCidrRange *string `json:"ipCidrRange,omitempty" tf:"ip_cidr_range,omitempty"` // The name associated with this subnetwork secondary range, used // when adding an alias IP range to a VM instance. The name must // be 1-63 characters long, and comply with RFC1035. The name // must be unique within the subnetwork. // +kubebuilder:validation:Optional - RangeName *string `json:"rangeName,omitempty" tf:"range_name"` + RangeName *string `json:"rangeName" tf:"range_name,omitempty"` + + // The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + // E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} + // +kubebuilder:validation:Optional + ReservedInternalRange *string `json:"reservedInternalRange,omitempty" tf:"reserved_internal_range,omitempty"` } type SubnetworkInitParameters_2 struct { @@ -74,6 +90,7 @@ type SubnetworkInitParameters_2 struct { // Provide this property when you create the subnetwork. For example, // 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and // non-overlapping within a network. Only IPv4 is supported. + // Field is optional when reserved_internal_range is defined, otherwise required. IPCidrRange *string `json:"ipCidrRange,omitempty" tf:"ip_cidr_range,omitempty"` // The access type of IPv6 address this subnet holds. It's immutable and can only be specified during creation @@ -113,15 +130,19 @@ type SubnetworkInitParameters_2 struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` - // The purpose of the resource. This field can be either PRIVATE_RFC_1918, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). + // The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). // A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. // A subnetwork in a given region with purpose set to GLOBAL_MANAGED_PROXY is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. // A subnetwork with purpose set to PRIVATE_SERVICE_CONNECT reserves the subnet for hosting a Private Service Connect published service. // A subnetwork with purpose set to PRIVATE_NAT is used as source range for Private NAT gateways. // Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. - // If unspecified, the purpose defaults to PRIVATE_RFC_1918. + // If unspecified, the purpose defaults to PRIVATE. Purpose *string `json:"purpose,omitempty" tf:"purpose,omitempty"` + // The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + // E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} + ReservedInternalRange *string `json:"reservedInternalRange,omitempty" tf:"reserved_internal_range,omitempty"` + // The role of subnetwork. // Currently, this field is only used when purpose is REGIONAL_MANAGED_PROXY. // The value can be set to ACTIVE or BACKUP. @@ -290,6 +311,7 @@ type SubnetworkObservation_2 struct { // Provide this property when you create the subnetwork. For example, // 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and // non-overlapping within a network. Only IPv4 is supported. + // Field is optional when reserved_internal_range is defined, otherwise required. IPCidrRange *string `json:"ipCidrRange,omitempty" tf:"ip_cidr_range,omitempty"` // The access type of IPv6 address this subnet holds. It's immutable and can only be specified during creation @@ -326,18 +348,22 @@ type SubnetworkObservation_2 struct { // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` - // The purpose of the resource. This field can be either PRIVATE_RFC_1918, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). + // The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). // A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. // A subnetwork in a given region with purpose set to GLOBAL_MANAGED_PROXY is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. // A subnetwork with purpose set to PRIVATE_SERVICE_CONNECT reserves the subnet for hosting a Private Service Connect published service. // A subnetwork with purpose set to PRIVATE_NAT is used as source range for Private NAT gateways. // Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. - // If unspecified, the purpose defaults to PRIVATE_RFC_1918. + // If unspecified, the purpose defaults to PRIVATE. Purpose *string `json:"purpose,omitempty" tf:"purpose,omitempty"` // The GCP region for this subnetwork. Region *string `json:"region,omitempty" tf:"region,omitempty"` + // The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + // E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} + ReservedInternalRange *string `json:"reservedInternalRange,omitempty" tf:"reserved_internal_range,omitempty"` + // The role of subnetwork. // Currently, this field is only used when purpose is REGIONAL_MANAGED_PROXY. // The value can be set to ACTIVE or BACKUP. @@ -371,6 +397,9 @@ type SubnetworkObservation_2 struct { // If not specified IPV4_ONLY will be used. // Possible values are: IPV4_ONLY, IPV4_IPV6. StackType *string `json:"stackType,omitempty" tf:"stack_type,omitempty"` + + // The unique identifier number for the resource. This identifier is defined by the server. + SubnetworkID *float64 `json:"subnetworkId,omitempty" tf:"subnetwork_id,omitempty"` } type SubnetworkParameters_2 struct { @@ -389,6 +418,7 @@ type SubnetworkParameters_2 struct { // Provide this property when you create the subnetwork. For example, // 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and // non-overlapping within a network. Only IPv4 is supported. + // Field is optional when reserved_internal_range is defined, otherwise required. // +kubebuilder:validation:Optional IPCidrRange *string `json:"ipCidrRange,omitempty" tf:"ip_cidr_range,omitempty"` @@ -435,13 +465,13 @@ type SubnetworkParameters_2 struct { // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` - // The purpose of the resource. This field can be either PRIVATE_RFC_1918, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). + // The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). // A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. // A subnetwork in a given region with purpose set to GLOBAL_MANAGED_PROXY is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. // A subnetwork with purpose set to PRIVATE_SERVICE_CONNECT reserves the subnet for hosting a Private Service Connect published service. // A subnetwork with purpose set to PRIVATE_NAT is used as source range for Private NAT gateways. // Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. - // If unspecified, the purpose defaults to PRIVATE_RFC_1918. + // If unspecified, the purpose defaults to PRIVATE. // +kubebuilder:validation:Optional Purpose *string `json:"purpose,omitempty" tf:"purpose,omitempty"` @@ -449,6 +479,11 @@ type SubnetworkParameters_2 struct { // +kubebuilder:validation:Required Region *string `json:"region" tf:"region,omitempty"` + // The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + // E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} + // +kubebuilder:validation:Optional + ReservedInternalRange *string `json:"reservedInternalRange,omitempty" tf:"reserved_internal_range,omitempty"` + // The role of subnetwork. // Currently, this field is only used when purpose is REGIONAL_MANAGED_PROXY. // The value can be set to ACTIVE or BACKUP. @@ -520,9 +555,8 @@ type SubnetworkStatus struct { type Subnetwork struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ipCidrRange) || (has(self.initProvider) && has(self.initProvider.ipCidrRange))",message="spec.forProvider.ipCidrRange is a required parameter" - Spec SubnetworkSpec `json:"spec"` - Status SubnetworkStatus `json:"status,omitempty"` + Spec SubnetworkSpec `json:"spec"` + Status SubnetworkStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/container/v1beta2/zz_cluster_types.go b/apis/container/v1beta2/zz_cluster_types.go index b51269e0e..d55678833 100755 --- a/apis/container/v1beta2/zz_cluster_types.go +++ b/apis/container/v1beta2/zz_cluster_types.go @@ -123,6 +123,13 @@ type AddonsConfigInitParameters struct { // Defaults to disabled; set disabled = false to enable. NetworkPolicyConfig *NetworkPolicyConfigInitParameters `json:"networkPolicyConfig,omitempty" tf:"network_policy_config,omitempty"` + // The status of the Parallelstore CSI driver addon, + // which allows the usage of a Parallelstore instances as volumes. + // It is disabled by default for Standard clusters; set enabled = true to enable. + // It is enabled by default for Autopilot clusters with version 1.29 or later; set enabled = true to enable it explicitly. + // See Enable the Parallelstore CSI driver for more information. + ParallelstoreCsiDriverConfig *ParallelstoreCsiDriverConfigInitParameters `json:"parallelstoreCsiDriverConfig,omitempty" tf:"parallelstore_csi_driver_config,omitempty"` + // . The status of the Ray Operator // addon. // It is disabled by default. Set enabled = true to enable. The minimum @@ -189,6 +196,13 @@ type AddonsConfigObservation struct { // Defaults to disabled; set disabled = false to enable. NetworkPolicyConfig *NetworkPolicyConfigObservation `json:"networkPolicyConfig,omitempty" tf:"network_policy_config,omitempty"` + // The status of the Parallelstore CSI driver addon, + // which allows the usage of a Parallelstore instances as volumes. + // It is disabled by default for Standard clusters; set enabled = true to enable. + // It is enabled by default for Autopilot clusters with version 1.29 or later; set enabled = true to enable it explicitly. + // See Enable the Parallelstore CSI driver for more information. + ParallelstoreCsiDriverConfig *ParallelstoreCsiDriverConfigObservation `json:"parallelstoreCsiDriverConfig,omitempty" tf:"parallelstore_csi_driver_config,omitempty"` + // . The status of the Ray Operator // addon. // It is disabled by default. Set enabled = true to enable. The minimum @@ -265,6 +279,14 @@ type AddonsConfigParameters struct { // +kubebuilder:validation:Optional NetworkPolicyConfig *NetworkPolicyConfigParameters `json:"networkPolicyConfig,omitempty" tf:"network_policy_config,omitempty"` + // The status of the Parallelstore CSI driver addon, + // which allows the usage of a Parallelstore instances as volumes. + // It is disabled by default for Standard clusters; set enabled = true to enable. + // It is enabled by default for Autopilot clusters with version 1.29 or later; set enabled = true to enable it explicitly. + // See Enable the Parallelstore CSI driver for more information. + // +kubebuilder:validation:Optional + ParallelstoreCsiDriverConfig *ParallelstoreCsiDriverConfigParameters `json:"parallelstoreCsiDriverConfig,omitempty" tf:"parallelstore_csi_driver_config,omitempty"` + // . The status of the Ray Operator // addon. // It is disabled by default. Set enabled = true to enable. The minimum @@ -286,9 +308,6 @@ type AdvancedDatapathObservabilityConfigInitParameters struct { // Whether or not Relay is enabled. EnableRelay *bool `json:"enableRelay,omitempty" tf:"enable_relay,omitempty"` - - // Mode used to make Relay available. Deprecated in favor of enable_relay field. Remove this attribute's configuration as this field will be removed in the next major release and enable_relay will become a required field. - RelayMode *string `json:"relayMode,omitempty" tf:"relay_mode,omitempty"` } type AdvancedDatapathObservabilityConfigObservation struct { @@ -298,9 +317,6 @@ type AdvancedDatapathObservabilityConfigObservation struct { // Whether or not Relay is enabled. EnableRelay *bool `json:"enableRelay,omitempty" tf:"enable_relay,omitempty"` - - // Mode used to make Relay available. Deprecated in favor of enable_relay field. Remove this attribute's configuration as this field will be removed in the next major release and enable_relay will become a required field. - RelayMode *string `json:"relayMode,omitempty" tf:"relay_mode,omitempty"` } type AdvancedDatapathObservabilityConfigParameters struct { @@ -311,11 +327,7 @@ type AdvancedDatapathObservabilityConfigParameters struct { // Whether or not Relay is enabled. // +kubebuilder:validation:Optional - EnableRelay *bool `json:"enableRelay,omitempty" tf:"enable_relay,omitempty"` - - // Mode used to make Relay available. Deprecated in favor of enable_relay field. Remove this attribute's configuration as this field will be removed in the next major release and enable_relay will become a required field. - // +kubebuilder:validation:Optional - RelayMode *string `json:"relayMode,omitempty" tf:"relay_mode,omitempty"` + EnableRelay *bool `json:"enableRelay" tf:"enable_relay,omitempty"` } type AdvancedMachineFeaturesInitParameters struct { @@ -858,6 +870,10 @@ type ClusterInitParameters struct { // Configuration for Confidential Nodes feature. Structure is documented below documented below. ConfidentialNodes *ConfidentialNodesInitParameters `json:"confidentialNodes,omitempty" tf:"confidential_nodes,omitempty"` + // Configuration for all of the cluster's control plane endpoints. + // Structure is documented below. + ControlPlaneEndpointsConfig *ControlPlaneEndpointsConfigInitParameters `json:"controlPlaneEndpointsConfig,omitempty" tf:"control_plane_endpoints_config,omitempty"` + // Configuration for the // Cost Allocation feature. // Structure is documented below. @@ -881,8 +897,6 @@ type ClusterInitParameters struct { // GKE SNAT DefaultSnatStatus contains the desired state of whether default sNAT should be disabled on the cluster, API doc. Structure is documented below DefaultSnatStatus *DefaultSnatStatusInitParameters `json:"defaultSnatStatus,omitempty" tf:"default_snat_status,omitempty"` - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // Description of the cluster. Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -895,6 +909,9 @@ type ClusterInitParameters struct { // Whether CiliumClusterWideNetworkPolicy is enabled on this cluster. Defaults to false. EnableCiliumClusterwideNetworkPolicy *bool `json:"enableCiliumClusterwideNetworkPolicy,omitempty" tf:"enable_cilium_clusterwide_network_policy,omitempty"` + // Whether FQDN Network Policy is enabled on this cluster. Users who enable this feature for existing Standard clusters must restart the GKE Dataplane V2 anetd DaemonSet after enabling it. See the Enable FQDN Network Policy in an existing cluster for more information. + EnableFqdnNetworkPolicy *bool `json:"enableFqdnNetworkPolicy,omitempty" tf:"enable_fqdn_network_policy,omitempty"` + // Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network. EnableIntranodeVisibility *bool `json:"enableIntranodeVisibility,omitempty" tf:"enable_intranode_visibility,omitempty"` @@ -926,6 +943,9 @@ type ClusterInitParameters struct { // See the official documentation. EnableTpu *bool `json:"enableTpu,omitempty" tf:"enable_tpu,omitempty"` + // Configuration for [Enterprise edition].(https://cloud.google.com/kubernetes-engine/enterprise/docs/concepts/gke-editions). Structure is documented below. + EnterpriseConfig *EnterpriseConfigInitParameters `json:"enterpriseConfig,omitempty" tf:"enterprise_config,omitempty"` + // Fleet configuration for the cluster. Structure is documented below. Fleet *FleetInitParameters `json:"fleet,omitempty" tf:"fleet,omitempty"` @@ -1088,6 +1108,11 @@ type ClusterInitParameters struct { // Structure is documented below. ResourceUsageExportConfig *ResourceUsageExportConfigInitParameters `json:"resourceUsageExportConfig,omitempty" tf:"resource_usage_export_config,omitempty"` + // Configuration for the + // SecretManagerConfig feature. + // Structure is documented below. + SecretManagerConfig *SecretManagerConfigInitParameters `json:"secretManagerConfig,omitempty" tf:"secret_manager_config,omitempty"` + // Enable/Disable Security Posture API features for the cluster. Structure is documented below. SecurityPostureConfig *SecurityPostureConfigInitParameters `json:"securityPostureConfig,omitempty" tf:"security_posture_config,omitempty"` @@ -1108,6 +1133,8 @@ type ClusterInitParameters struct { // +kubebuilder:validation:Optional SubnetworkSelector *v1.Selector `json:"subnetworkSelector,omitempty" tf:"-"` + UserManagedKeysConfig *UserManagedKeysConfigInitParameters `json:"userManagedKeysConfig,omitempty" tf:"user_managed_keys_config,omitempty"` + // Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it. // Structure is documented below. VerticalPodAutoscaling *VerticalPodAutoscalingInitParameters `json:"verticalPodAutoscaling,omitempty" tf:"vertical_pod_autoscaling,omitempty"` @@ -1154,6 +1181,10 @@ type ClusterObservation struct { // Configuration for Confidential Nodes feature. Structure is documented below documented below. ConfidentialNodes *ConfidentialNodesObservation `json:"confidentialNodes,omitempty" tf:"confidential_nodes,omitempty"` + // Configuration for all of the cluster's control plane endpoints. + // Structure is documented below. + ControlPlaneEndpointsConfig *ControlPlaneEndpointsConfigObservation `json:"controlPlaneEndpointsConfig,omitempty" tf:"control_plane_endpoints_config,omitempty"` + // Configuration for the // Cost Allocation feature. // Structure is documented below. @@ -1182,6 +1213,9 @@ type ClusterObservation struct { // Description of the cluster. Description *string `json:"description,omitempty" tf:"description,omitempty"` + // +mapType=granular + EffectiveLabels map[string]*string `json:"effectiveLabels,omitempty" tf:"effective_labels,omitempty"` + // Enable Autopilot for this cluster. Defaults to false. // Note that when this option is enabled, certain features of Standard GKE are not available. // See the official documentation @@ -1191,6 +1225,9 @@ type ClusterObservation struct { // Whether CiliumClusterWideNetworkPolicy is enabled on this cluster. Defaults to false. EnableCiliumClusterwideNetworkPolicy *bool `json:"enableCiliumClusterwideNetworkPolicy,omitempty" tf:"enable_cilium_clusterwide_network_policy,omitempty"` + // Whether FQDN Network Policy is enabled on this cluster. Users who enable this feature for existing Standard clusters must restart the GKE Dataplane V2 anetd DaemonSet after enabling it. See the Enable FQDN Network Policy in an existing cluster for more information. + EnableFqdnNetworkPolicy *bool `json:"enableFqdnNetworkPolicy,omitempty" tf:"enable_fqdn_network_policy,omitempty"` + // Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network. EnableIntranodeVisibility *bool `json:"enableIntranodeVisibility,omitempty" tf:"enable_intranode_visibility,omitempty"` @@ -1225,6 +1262,9 @@ type ClusterObservation struct { // The IP address of this cluster's Kubernetes master. Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + // Configuration for [Enterprise edition].(https://cloud.google.com/kubernetes-engine/enterprise/docs/concepts/gke-editions). Structure is documented below. + EnterpriseConfig *EnterpriseConfigObservation `json:"enterpriseConfig,omitempty" tf:"enterprise_config,omitempty"` + // Fleet configuration for the cluster. Structure is documented below. Fleet *FleetObservation `json:"fleet,omitempty" tf:"fleet,omitempty"` @@ -1406,6 +1446,11 @@ type ClusterObservation struct { // Structure is documented below. ResourceUsageExportConfig *ResourceUsageExportConfigObservation `json:"resourceUsageExportConfig,omitempty" tf:"resource_usage_export_config,omitempty"` + // Configuration for the + // SecretManagerConfig feature. + // Structure is documented below. + SecretManagerConfig *SecretManagerConfigObservation `json:"secretManagerConfig,omitempty" tf:"secret_manager_config,omitempty"` + // Enable/Disable Security Posture API features for the cluster. Structure is documented below. SecurityPostureConfig *SecurityPostureConfigObservation `json:"securityPostureConfig,omitempty" tf:"security_posture_config,omitempty"` @@ -1425,11 +1470,17 @@ type ClusterObservation struct { // subnetwork in which the cluster's instances are launched. Subnetwork *string `json:"subnetwork,omitempty" tf:"subnetwork,omitempty"` + // The combination of labels configured directly on the resource and default labels configured on the provider. + // +mapType=granular + TerraformLabels map[string]*string `json:"terraformLabels,omitempty" tf:"terraform_labels,omitempty"` + // The IP address range of the Cloud TPUs in this cluster, in // CIDR // notation (e.g. 1.2.3.4/29). TpuIPv4CidrBlock *string `json:"tpuIpv4CidrBlock,omitempty" tf:"tpu_ipv4_cidr_block,omitempty"` + UserManagedKeysConfig *UserManagedKeysConfigObservation `json:"userManagedKeysConfig,omitempty" tf:"user_managed_keys_config,omitempty"` + // Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it. // Structure is documented below. VerticalPodAutoscaling *VerticalPodAutoscalingObservation `json:"verticalPodAutoscaling,omitempty" tf:"vertical_pod_autoscaling,omitempty"` @@ -1483,6 +1534,11 @@ type ClusterParameters struct { // +kubebuilder:validation:Optional ConfidentialNodes *ConfidentialNodesParameters `json:"confidentialNodes,omitempty" tf:"confidential_nodes,omitempty"` + // Configuration for all of the cluster's control plane endpoints. + // Structure is documented below. + // +kubebuilder:validation:Optional + ControlPlaneEndpointsConfig *ControlPlaneEndpointsConfigParameters `json:"controlPlaneEndpointsConfig,omitempty" tf:"control_plane_endpoints_config,omitempty"` + // Configuration for the // Cost Allocation feature. // Structure is documented below. @@ -1512,9 +1568,6 @@ type ClusterParameters struct { // +kubebuilder:validation:Optional DefaultSnatStatus *DefaultSnatStatusParameters `json:"defaultSnatStatus,omitempty" tf:"default_snat_status,omitempty"` - // +kubebuilder:validation:Optional - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // Description of the cluster. // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -1530,6 +1583,10 @@ type ClusterParameters struct { // +kubebuilder:validation:Optional EnableCiliumClusterwideNetworkPolicy *bool `json:"enableCiliumClusterwideNetworkPolicy,omitempty" tf:"enable_cilium_clusterwide_network_policy,omitempty"` + // Whether FQDN Network Policy is enabled on this cluster. Users who enable this feature for existing Standard clusters must restart the GKE Dataplane V2 anetd DaemonSet after enabling it. See the Enable FQDN Network Policy in an existing cluster for more information. + // +kubebuilder:validation:Optional + EnableFqdnNetworkPolicy *bool `json:"enableFqdnNetworkPolicy,omitempty" tf:"enable_fqdn_network_policy,omitempty"` + // Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network. // +kubebuilder:validation:Optional EnableIntranodeVisibility *bool `json:"enableIntranodeVisibility,omitempty" tf:"enable_intranode_visibility,omitempty"` @@ -1569,6 +1626,10 @@ type ClusterParameters struct { // +kubebuilder:validation:Optional EnableTpu *bool `json:"enableTpu,omitempty" tf:"enable_tpu,omitempty"` + // Configuration for [Enterprise edition].(https://cloud.google.com/kubernetes-engine/enterprise/docs/concepts/gke-editions). Structure is documented below. + // +kubebuilder:validation:Optional + EnterpriseConfig *EnterpriseConfigParameters `json:"enterpriseConfig,omitempty" tf:"enterprise_config,omitempty"` + // Fleet configuration for the cluster. Structure is documented below. // +kubebuilder:validation:Optional Fleet *FleetParameters `json:"fleet,omitempty" tf:"fleet,omitempty"` @@ -1770,6 +1831,12 @@ type ClusterParameters struct { // +kubebuilder:validation:Optional ResourceUsageExportConfig *ResourceUsageExportConfigParameters `json:"resourceUsageExportConfig,omitempty" tf:"resource_usage_export_config,omitempty"` + // Configuration for the + // SecretManagerConfig feature. + // Structure is documented below. + // +kubebuilder:validation:Optional + SecretManagerConfig *SecretManagerConfigParameters `json:"secretManagerConfig,omitempty" tf:"secret_manager_config,omitempty"` + // Enable/Disable Security Posture API features for the cluster. Structure is documented below. // +kubebuilder:validation:Optional SecurityPostureConfig *SecurityPostureConfigParameters `json:"securityPostureConfig,omitempty" tf:"security_posture_config,omitempty"` @@ -1793,6 +1860,9 @@ type ClusterParameters struct { // +kubebuilder:validation:Optional SubnetworkSelector *v1.Selector `json:"subnetworkSelector,omitempty" tf:"-"` + // +kubebuilder:validation:Optional + UserManagedKeysConfig *UserManagedKeysConfigParameters `json:"userManagedKeysConfig,omitempty" tf:"user_managed_keys_config,omitempty"` + // Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it. // Structure is documented below. // +kubebuilder:validation:Optional @@ -1901,6 +1971,25 @@ type ContainerdConfigPrivateRegistryAccessConfigObservation struct { type ContainerdConfigPrivateRegistryAccessConfigParameters struct { } +type ControlPlaneEndpointsConfigInitParameters struct { + + // DNS endpoint configuration. + DNSEndpointConfig *DNSEndpointConfigInitParameters `json:"dnsEndpointConfig,omitempty" tf:"dns_endpoint_config,omitempty"` +} + +type ControlPlaneEndpointsConfigObservation struct { + + // DNS endpoint configuration. + DNSEndpointConfig *DNSEndpointConfigObservation `json:"dnsEndpointConfig,omitempty" tf:"dns_endpoint_config,omitempty"` +} + +type ControlPlaneEndpointsConfigParameters struct { + + // DNS endpoint configuration. + // +kubebuilder:validation:Optional + DNSEndpointConfig *DNSEndpointConfigParameters `json:"dnsEndpointConfig,omitempty" tf:"dns_endpoint_config,omitempty"` +} + type CostManagementConfigInitParameters struct { // Whether to enable the cost allocation feature. @@ -1941,6 +2030,9 @@ type DNSCacheConfigParameters struct { type DNSConfigInitParameters struct { + // This will enable Cloud DNS additive VPC scope. Must provide a domain name that is unique within the VPC. For this to work cluster_dns = "CLOUD_DNS" and cluster_dns_scope = "CLUSTER_SCOPE" must both be set as well. + AdditiveVPCScopeDNSDomain *string `json:"additiveVpcScopeDnsDomain,omitempty" tf:"additive_vpc_scope_dns_domain,omitempty"` + // Which in-cluster DNS provider should be used. PROVIDER_UNSPECIFIED (default) or PLATFORM_DEFAULT or CLOUD_DNS. ClusterDNS *string `json:"clusterDns,omitempty" tf:"cluster_dns,omitempty"` @@ -1953,6 +2045,9 @@ type DNSConfigInitParameters struct { type DNSConfigObservation struct { + // This will enable Cloud DNS additive VPC scope. Must provide a domain name that is unique within the VPC. For this to work cluster_dns = "CLOUD_DNS" and cluster_dns_scope = "CLUSTER_SCOPE" must both be set as well. + AdditiveVPCScopeDNSDomain *string `json:"additiveVpcScopeDnsDomain,omitempty" tf:"additive_vpc_scope_dns_domain,omitempty"` + // Which in-cluster DNS provider should be used. PROVIDER_UNSPECIFIED (default) or PLATFORM_DEFAULT or CLOUD_DNS. ClusterDNS *string `json:"clusterDns,omitempty" tf:"cluster_dns,omitempty"` @@ -1965,6 +2060,10 @@ type DNSConfigObservation struct { type DNSConfigParameters struct { + // This will enable Cloud DNS additive VPC scope. Must provide a domain name that is unique within the VPC. For this to work cluster_dns = "CLOUD_DNS" and cluster_dns_scope = "CLUSTER_SCOPE" must both be set as well. + // +kubebuilder:validation:Optional + AdditiveVPCScopeDNSDomain *string `json:"additiveVpcScopeDnsDomain,omitempty" tf:"additive_vpc_scope_dns_domain,omitempty"` + // Which in-cluster DNS provider should be used. PROVIDER_UNSPECIFIED (default) or PLATFORM_DEFAULT or CLOUD_DNS. // +kubebuilder:validation:Optional ClusterDNS *string `json:"clusterDns,omitempty" tf:"cluster_dns,omitempty"` @@ -1978,6 +2077,35 @@ type DNSConfigParameters struct { ClusterDNSScope *string `json:"clusterDnsScope,omitempty" tf:"cluster_dns_scope,omitempty"` } +type DNSEndpointConfigInitParameters struct { + + // Controls whether user traffic is allowed over this endpoint. Note that GCP-managed services may still use the endpoint even if this is false. + AllowExternalTraffic *bool `json:"allowExternalTraffic,omitempty" tf:"allow_external_traffic,omitempty"` + + // (Output) The cluster's DNS endpoint. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` +} + +type DNSEndpointConfigObservation struct { + + // Controls whether user traffic is allowed over this endpoint. Note that GCP-managed services may still use the endpoint even if this is false. + AllowExternalTraffic *bool `json:"allowExternalTraffic,omitempty" tf:"allow_external_traffic,omitempty"` + + // (Output) The cluster's DNS endpoint. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` +} + +type DNSEndpointConfigParameters struct { + + // Controls whether user traffic is allowed over this endpoint. Note that GCP-managed services may still use the endpoint even if this is false. + // +kubebuilder:validation:Optional + AllowExternalTraffic *bool `json:"allowExternalTraffic,omitempty" tf:"allow_external_traffic,omitempty"` + + // (Output) The cluster's DNS endpoint. + // +kubebuilder:validation:Optional + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` +} + type DailyMaintenanceWindowInitParameters struct { StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` } @@ -2086,6 +2214,28 @@ type EnableK8SBetaApisParameters struct { EnabledApis []*string `json:"enabledApis" tf:"enabled_apis,omitempty"` } +type EnterpriseConfigInitParameters struct { + + // Sets the tier of the cluster. Available options include STANDARD and ENTERPRISE. + DesiredTier *string `json:"desiredTier,omitempty" tf:"desired_tier,omitempty"` +} + +type EnterpriseConfigObservation struct { + + // The effective tier of the cluster. + ClusterTier *string `json:"clusterTier,omitempty" tf:"cluster_tier,omitempty"` + + // Sets the tier of the cluster. Available options include STANDARD and ENTERPRISE. + DesiredTier *string `json:"desiredTier,omitempty" tf:"desired_tier,omitempty"` +} + +type EnterpriseConfigParameters struct { + + // Sets the tier of the cluster. Available options include STANDARD and ENTERPRISE. + // +kubebuilder:validation:Optional + DesiredTier *string `json:"desiredTier,omitempty" tf:"desired_tier,omitempty"` +} + type EphemeralStorageLocalSsdConfigInitParameters struct { // The amount of local SSD disks that will be @@ -2327,7 +2477,7 @@ type GpuDriverInstallationConfigInitParameters struct { // Mode for how the GPU driver is installed. // Accepted values are: - GpuDriverVersion *string `json:"gpuDriverVersion,omitempty" tf:"gpu_driver_version"` + GpuDriverVersion *string `json:"gpuDriverVersion,omitempty" tf:"gpu_driver_version,omitempty"` } type GpuDriverInstallationConfigObservation struct { @@ -2342,17 +2492,17 @@ type GpuDriverInstallationConfigParameters struct { // Mode for how the GPU driver is installed. // Accepted values are: // +kubebuilder:validation:Optional - GpuDriverVersion *string `json:"gpuDriverVersion,omitempty" tf:"gpu_driver_version"` + GpuDriverVersion *string `json:"gpuDriverVersion" tf:"gpu_driver_version,omitempty"` } type GpuSharingConfigInitParameters struct { // The type of GPU sharing strategy to enable on the GPU node. // Accepted values are: - GpuSharingStrategy *string `json:"gpuSharingStrategy,omitempty" tf:"gpu_sharing_strategy"` + GpuSharingStrategy *string `json:"gpuSharingStrategy,omitempty" tf:"gpu_sharing_strategy,omitempty"` // The maximum number of containers that can share a GPU. - MaxSharedClientsPerGpu *float64 `json:"maxSharedClientsPerGpu,omitempty" tf:"max_shared_clients_per_gpu"` + MaxSharedClientsPerGpu *float64 `json:"maxSharedClientsPerGpu,omitempty" tf:"max_shared_clients_per_gpu,omitempty"` } type GpuSharingConfigObservation struct { @@ -2370,11 +2520,11 @@ type GpuSharingConfigParameters struct { // The type of GPU sharing strategy to enable on the GPU node. // Accepted values are: // +kubebuilder:validation:Optional - GpuSharingStrategy *string `json:"gpuSharingStrategy,omitempty" tf:"gpu_sharing_strategy"` + GpuSharingStrategy *string `json:"gpuSharingStrategy" tf:"gpu_sharing_strategy,omitempty"` // The maximum number of containers that can share a GPU. // +kubebuilder:validation:Optional - MaxSharedClientsPerGpu *float64 `json:"maxSharedClientsPerGpu,omitempty" tf:"max_shared_clients_per_gpu"` + MaxSharedClientsPerGpu *float64 `json:"maxSharedClientsPerGpu" tf:"max_shared_clients_per_gpu,omitempty"` } type GuestAcceleratorGpuDriverInstallationConfigInitParameters struct { @@ -2409,19 +2559,19 @@ type GuestAcceleratorGpuSharingConfigParameters struct { type GuestAcceleratorInitParameters struct { // The number of the guest accelerator cards exposed to this instance. - Count *float64 `json:"count,omitempty" tf:"count"` + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` // Configuration for auto installation of GPU driver. Structure is documented below. - GpuDriverInstallationConfig *GpuDriverInstallationConfigInitParameters `json:"gpuDriverInstallationConfig,omitempty" tf:"gpu_driver_installation_config"` + GpuDriverInstallationConfig *GpuDriverInstallationConfigInitParameters `json:"gpuDriverInstallationConfig,omitempty" tf:"gpu_driver_installation_config,omitempty"` // Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide. - GpuPartitionSize *string `json:"gpuPartitionSize,omitempty" tf:"gpu_partition_size"` + GpuPartitionSize *string `json:"gpuPartitionSize,omitempty" tf:"gpu_partition_size,omitempty"` // Configuration for GPU sharing. Structure is documented below. - GpuSharingConfig *GpuSharingConfigInitParameters `json:"gpuSharingConfig,omitempty" tf:"gpu_sharing_config"` + GpuSharingConfig *GpuSharingConfigInitParameters `json:"gpuSharingConfig,omitempty" tf:"gpu_sharing_config,omitempty"` // The accelerator type resource to expose to this instance. E.g. nvidia-tesla-k80. - Type *string `json:"type,omitempty" tf:"type"` + Type *string `json:"type,omitempty" tf:"type,omitempty"` } type GuestAcceleratorObservation struct { @@ -2446,23 +2596,23 @@ type GuestAcceleratorParameters struct { // The number of the guest accelerator cards exposed to this instance. // +kubebuilder:validation:Optional - Count *float64 `json:"count,omitempty" tf:"count"` + Count *float64 `json:"count" tf:"count,omitempty"` // Configuration for auto installation of GPU driver. Structure is documented below. // +kubebuilder:validation:Optional - GpuDriverInstallationConfig *GpuDriverInstallationConfigParameters `json:"gpuDriverInstallationConfig,omitempty" tf:"gpu_driver_installation_config"` + GpuDriverInstallationConfig *GpuDriverInstallationConfigParameters `json:"gpuDriverInstallationConfig,omitempty" tf:"gpu_driver_installation_config,omitempty"` // Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide. // +kubebuilder:validation:Optional - GpuPartitionSize *string `json:"gpuPartitionSize,omitempty" tf:"gpu_partition_size"` + GpuPartitionSize *string `json:"gpuPartitionSize,omitempty" tf:"gpu_partition_size,omitempty"` // Configuration for GPU sharing. Structure is documented below. // +kubebuilder:validation:Optional - GpuSharingConfig *GpuSharingConfigParameters `json:"gpuSharingConfig,omitempty" tf:"gpu_sharing_config"` + GpuSharingConfig *GpuSharingConfigParameters `json:"gpuSharingConfig,omitempty" tf:"gpu_sharing_config,omitempty"` // The accelerator type resource to expose to this instance. E.g. nvidia-tesla-k80. // +kubebuilder:validation:Optional - Type *string `json:"type,omitempty" tf:"type"` + Type *string `json:"type" tf:"type,omitempty"` } type GvnicInitParameters struct { @@ -2542,6 +2692,35 @@ type HostMaintenancePolicyParameters struct { MaintenanceInterval *string `json:"maintenanceInterval" tf:"maintenance_interval,omitempty"` } +type HugepagesConfigInitParameters struct { + + // Amount of 1G hugepages. + HugepageSize1G *float64 `json:"hugepageSize1G,omitempty" tf:"hugepage_size_1g,omitempty"` + + // Amount of 2M hugepages. + HugepageSize2M *float64 `json:"hugepageSize2M,omitempty" tf:"hugepage_size_2m,omitempty"` +} + +type HugepagesConfigObservation struct { + + // Amount of 1G hugepages. + HugepageSize1G *float64 `json:"hugepageSize1G,omitempty" tf:"hugepage_size_1g,omitempty"` + + // Amount of 2M hugepages. + HugepageSize2M *float64 `json:"hugepageSize2M,omitempty" tf:"hugepage_size_2m,omitempty"` +} + +type HugepagesConfigParameters struct { + + // Amount of 1G hugepages. + // +kubebuilder:validation:Optional + HugepageSize1G *float64 `json:"hugepageSize1G,omitempty" tf:"hugepage_size_1g,omitempty"` + + // Amount of 2M hugepages. + // +kubebuilder:validation:Optional + HugepageSize2M *float64 `json:"hugepageSize2M,omitempty" tf:"hugepage_size_2m,omitempty"` +} + type IPAllocationPolicyInitParameters struct { // The configuration for additional pod secondary ranges at @@ -2702,10 +2881,12 @@ type KubeletConfigInitParameters struct { // The CPU management policy on the node. See // K8S CPU Management Policies. - // One of "none" or "static". Defaults to none when kubelet_config is unset. + // One of "none" or "static". If unset (or set to the empty string ""), the API will treat the field as if set to "none". + // Prior to the 6.4.0 this field was marked as required. The workaround for the required field + // is setting the empty string "", which will function identically to not setting this field. CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` - // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. + // only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. InsecureKubeletReadonlyPortEnabled *string `json:"insecureKubeletReadonlyPortEnabled,omitempty" tf:"insecure_kubelet_readonly_port_enabled,omitempty"` // Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. @@ -2726,10 +2907,12 @@ type KubeletConfigObservation struct { // The CPU management policy on the node. See // K8S CPU Management Policies. - // One of "none" or "static". Defaults to none when kubelet_config is unset. + // One of "none" or "static". If unset (or set to the empty string ""), the API will treat the field as if set to "none". + // Prior to the 6.4.0 this field was marked as required. The workaround for the required field + // is setting the empty string "", which will function identically to not setting this field. CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` - // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. + // only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. InsecureKubeletReadonlyPortEnabled *string `json:"insecureKubeletReadonlyPortEnabled,omitempty" tf:"insecure_kubelet_readonly_port_enabled,omitempty"` // Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. @@ -2752,11 +2935,13 @@ type KubeletConfigParameters struct { // The CPU management policy on the node. See // K8S CPU Management Policies. - // One of "none" or "static". Defaults to none when kubelet_config is unset. + // One of "none" or "static". If unset (or set to the empty string ""), the API will treat the field as if set to "none". + // Prior to the 6.4.0 this field was marked as required. The workaround for the required field + // is setting the empty string "", which will function identically to not setting this field. // +kubebuilder:validation:Optional - CPUManagerPolicy *string `json:"cpuManagerPolicy" tf:"cpu_manager_policy,omitempty"` + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` - // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. + // only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. // +kubebuilder:validation:Optional InsecureKubeletReadonlyPortEnabled *string `json:"insecureKubeletReadonlyPortEnabled,omitempty" tf:"insecure_kubelet_readonly_port_enabled,omitempty"` @@ -2765,12 +2950,30 @@ type KubeletConfigParameters struct { PodPidsLimit *float64 `json:"podPidsLimit,omitempty" tf:"pod_pids_limit,omitempty"` } +type LinuxNodeConfigHugepagesConfigInitParameters struct { +} + +type LinuxNodeConfigHugepagesConfigObservation struct { + + // Amount of 1G hugepages. + HugepageSize1G *float64 `json:"hugepageSize1G,omitempty" tf:"hugepage_size_1g,omitempty"` + + // Amount of 2M hugepages. + HugepageSize2M *float64 `json:"hugepageSize2M,omitempty" tf:"hugepage_size_2m,omitempty"` +} + +type LinuxNodeConfigHugepagesConfigParameters struct { +} + type LinuxNodeConfigInitParameters struct { // Possible cgroup modes that can be used. // Accepted values are: CgroupMode *string `json:"cgroupMode,omitempty" tf:"cgroup_mode,omitempty"` + // Amounts for 2M and 1G hugepages. Structure is documented below. + HugepagesConfig *HugepagesConfigInitParameters `json:"hugepagesConfig,omitempty" tf:"hugepages_config,omitempty"` + // The Linux kernel parameters to be applied to the nodes // and all pods running on the nodes. Specified as a map from the key, such as // net.core.wmem_max, to a string value. Currently supported attributes can be found here. @@ -2785,6 +2988,9 @@ type LinuxNodeConfigObservation struct { // Accepted values are: CgroupMode *string `json:"cgroupMode,omitempty" tf:"cgroup_mode,omitempty"` + // Amounts for 2M and 1G hugepages. Structure is documented below. + HugepagesConfig *HugepagesConfigObservation `json:"hugepagesConfig,omitempty" tf:"hugepages_config,omitempty"` + // The Linux kernel parameters to be applied to the nodes // and all pods running on the nodes. Specified as a map from the key, such as // net.core.wmem_max, to a string value. Currently supported attributes can be found here. @@ -2800,6 +3006,10 @@ type LinuxNodeConfigParameters struct { // +kubebuilder:validation:Optional CgroupMode *string `json:"cgroupMode,omitempty" tf:"cgroup_mode,omitempty"` + // Amounts for 2M and 1G hugepages. Structure is documented below. + // +kubebuilder:validation:Optional + HugepagesConfig *HugepagesConfigParameters `json:"hugepagesConfig,omitempty" tf:"hugepages_config,omitempty"` + // The Linux kernel parameters to be applied to the nodes // and all pods running on the nodes. Specified as a map from the key, such as // net.core.wmem_max, to a string value. Currently supported attributes can be found here. @@ -3023,6 +3233,9 @@ type MasterAuthorizedNetworksConfigInitParameters struct { // Whether Kubernetes master is // accessible via Google Compute Engine Public IPs. GCPPublicCidrsAccessEnabled *bool `json:"gcpPublicCidrsAccessEnabled,omitempty" tf:"gcp_public_cidrs_access_enabled,omitempty"` + + // Whether authorized networks is enforced on the private endpoint or not. + PrivateEndpointEnforcementEnabled *bool `json:"privateEndpointEnforcementEnabled,omitempty" tf:"private_endpoint_enforcement_enabled,omitempty"` } type MasterAuthorizedNetworksConfigObservation struct { @@ -3034,6 +3247,9 @@ type MasterAuthorizedNetworksConfigObservation struct { // Whether Kubernetes master is // accessible via Google Compute Engine Public IPs. GCPPublicCidrsAccessEnabled *bool `json:"gcpPublicCidrsAccessEnabled,omitempty" tf:"gcp_public_cidrs_access_enabled,omitempty"` + + // Whether authorized networks is enforced on the private endpoint or not. + PrivateEndpointEnforcementEnabled *bool `json:"privateEndpointEnforcementEnabled,omitempty" tf:"private_endpoint_enforcement_enabled,omitempty"` } type MasterAuthorizedNetworksConfigParameters struct { @@ -3047,6 +3263,10 @@ type MasterAuthorizedNetworksConfigParameters struct { // accessible via Google Compute Engine Public IPs. // +kubebuilder:validation:Optional GCPPublicCidrsAccessEnabled *bool `json:"gcpPublicCidrsAccessEnabled,omitempty" tf:"gcp_public_cidrs_access_enabled,omitempty"` + + // Whether authorized networks is enforced on the private endpoint or not. + // +kubebuilder:validation:Optional + PrivateEndpointEnforcementEnabled *bool `json:"privateEndpointEnforcementEnabled,omitempty" tf:"private_endpoint_enforcement_enabled,omitempty"` } type MasterGlobalAccessConfigInitParameters struct { @@ -3386,11 +3606,33 @@ type NodeConfigDefaultsContainerdConfigPrivateRegistryAccessConfigParameters str Enabled *bool `json:"enabled" tf:"enabled,omitempty"` } +type NodeConfigDefaultsGcfsConfigInitParameters struct { + + // Enables vertical pod autoscaling + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type NodeConfigDefaultsGcfsConfigObservation struct { + + // Enables vertical pod autoscaling + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type NodeConfigDefaultsGcfsConfigParameters struct { + + // Enables vertical pod autoscaling + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + type NodeConfigDefaultsInitParameters struct { // Parameters to customize containerd runtime. Structure is documented below. ContainerdConfig *NodeConfigDefaultsContainerdConfigInitParameters `json:"containerdConfig,omitempty" tf:"containerd_config,omitempty"` + // The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable image streaming across all the node pools within the cluster. Structure is documented below. + GcfsConfig *NodeConfigDefaultsGcfsConfigInitParameters `json:"gcfsConfig,omitempty" tf:"gcfs_config,omitempty"` + // only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. InsecureKubeletReadonlyPortEnabled *string `json:"insecureKubeletReadonlyPortEnabled,omitempty" tf:"insecure_kubelet_readonly_port_enabled,omitempty"` @@ -3403,6 +3645,9 @@ type NodeConfigDefaultsObservation struct { // Parameters to customize containerd runtime. Structure is documented below. ContainerdConfig *NodeConfigDefaultsContainerdConfigObservation `json:"containerdConfig,omitempty" tf:"containerd_config,omitempty"` + // The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable image streaming across all the node pools within the cluster. Structure is documented below. + GcfsConfig *NodeConfigDefaultsGcfsConfigObservation `json:"gcfsConfig,omitempty" tf:"gcfs_config,omitempty"` + // only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. InsecureKubeletReadonlyPortEnabled *string `json:"insecureKubeletReadonlyPortEnabled,omitempty" tf:"insecure_kubelet_readonly_port_enabled,omitempty"` @@ -3416,6 +3661,10 @@ type NodeConfigDefaultsParameters struct { // +kubebuilder:validation:Optional ContainerdConfig *NodeConfigDefaultsContainerdConfigParameters `json:"containerdConfig,omitempty" tf:"containerd_config,omitempty"` + // The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable image streaming across all the node pools within the cluster. Structure is documented below. + // +kubebuilder:validation:Optional + GcfsConfig *NodeConfigDefaultsGcfsConfigParameters `json:"gcfsConfig,omitempty" tf:"gcfs_config,omitempty"` + // only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. // +kubebuilder:validation:Optional InsecureKubeletReadonlyPortEnabled *string `json:"insecureKubeletReadonlyPortEnabled,omitempty" tf:"insecure_kubelet_readonly_port_enabled,omitempty"` @@ -3570,8 +3819,11 @@ type NodeConfigInitParameters struct { GcfsConfig *GcfsConfigInitParameters `json:"gcfsConfig,omitempty" tf:"gcfs_config,omitempty"` // List of the type and count of accelerator cards attached to the instance. - // Structure documented below.12 this field is an - // Attribute as Block + // Structure documented below. + // Note: As of 6.0.0, argument syntax + // is no longer supported for this field in favor of block syntax. + // To dynamically set a list of guest accelerators, use dynamic blocks. + // To set an empty list, use a single guest_accelerator block with count = 0. GuestAccelerator []GuestAcceleratorInitParameters `json:"guestAccelerator,omitempty" tf:"guest_accelerator,omitempty"` // Google Virtual NIC (gVNIC) is a virtual network interface. @@ -3608,6 +3860,10 @@ type NodeConfigInitParameters struct { // attached to each cluster node. Defaults to 0. LocalSsdCount *float64 `json:"localSsdCount,omitempty" tf:"local_ssd_count,omitempty"` + // Possible Local SSD encryption modes: + // Accepted values are: + LocalSsdEncryptionMode *string `json:"localSsdEncryptionMode,omitempty" tf:"local_ssd_encryption_mode,omitempty"` + // wide default value. Valid values include DEFAULT and MAX_THROUGHPUT. See Increasing logging agent throughput for more information. LoggingVariant *string `json:"loggingVariant,omitempty" tf:"logging_variant,omitempty"` @@ -3683,6 +3939,9 @@ type NodeConfigInitParameters struct { // for more information. Defaults to false. Spot *bool `json:"spot,omitempty" tf:"spot,omitempty"` + // The list of Storage Pools where boot disks are provisioned. + StoragePools []*string `json:"storagePools,omitempty" tf:"storage_pools,omitempty"` + // The list of instance tags applied to all nodes. Tags are used to identify // valid sources or targets for network firewalls. Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` @@ -3714,10 +3973,12 @@ type NodeConfigKubeletConfigObservation struct { // The CPU management policy on the node. See // K8S CPU Management Policies. - // One of "none" or "static". Defaults to none when kubelet_config is unset. + // One of "none" or "static". If unset (or set to the empty string ""), the API will treat the field as if set to "none". + // Prior to the 6.4.0 this field was marked as required. The workaround for the required field + // is setting the empty string "", which will function identically to not setting this field. CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` - // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. + // only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. InsecureKubeletReadonlyPortEnabled *string `json:"insecureKubeletReadonlyPortEnabled,omitempty" tf:"insecure_kubelet_readonly_port_enabled,omitempty"` // Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. @@ -3736,6 +3997,9 @@ type NodeConfigLinuxNodeConfigObservation struct { // Accepted values are: CgroupMode *string `json:"cgroupMode,omitempty" tf:"cgroup_mode,omitempty"` + // Amounts for 2M and 1G hugepages. Structure is documented below. + HugepagesConfig *LinuxNodeConfigHugepagesConfigObservation `json:"hugepagesConfig,omitempty" tf:"hugepages_config,omitempty"` + // The Linux kernel parameters to be applied to the nodes // and all pods running on the nodes. Specified as a map from the key, such as // net.core.wmem_max, to a string value. Currently supported attributes can be found here. @@ -3807,8 +4071,11 @@ type NodeConfigObservation struct { GcfsConfig *GcfsConfigObservation `json:"gcfsConfig,omitempty" tf:"gcfs_config,omitempty"` // List of the type and count of accelerator cards attached to the instance. - // Structure documented below.12 this field is an - // Attribute as Block + // Structure documented below. + // Note: As of 6.0.0, argument syntax + // is no longer supported for this field in favor of block syntax. + // To dynamically set a list of guest accelerators, use dynamic blocks. + // To set an empty list, use a single guest_accelerator block with count = 0. GuestAccelerator []GuestAcceleratorObservation `json:"guestAccelerator,omitempty" tf:"guest_accelerator,omitempty"` // Google Virtual NIC (gVNIC) is a virtual network interface. @@ -3845,6 +4112,10 @@ type NodeConfigObservation struct { // attached to each cluster node. Defaults to 0. LocalSsdCount *float64 `json:"localSsdCount,omitempty" tf:"local_ssd_count,omitempty"` + // Possible Local SSD encryption modes: + // Accepted values are: + LocalSsdEncryptionMode *string `json:"localSsdEncryptionMode,omitempty" tf:"local_ssd_encryption_mode,omitempty"` + // wide default value. Valid values include DEFAULT and MAX_THROUGHPUT. See Increasing logging agent throughput for more information. LoggingVariant *string `json:"loggingVariant,omitempty" tf:"logging_variant,omitempty"` @@ -3910,6 +4181,9 @@ type NodeConfigObservation struct { // for more information. Defaults to false. Spot *bool `json:"spot,omitempty" tf:"spot,omitempty"` + // The list of Storage Pools where boot disks are provisioned. + StoragePools []*string `json:"storagePools,omitempty" tf:"storage_pools,omitempty"` + // The list of instance tags applied to all nodes. Tags are used to identify // valid sources or targets for network firewalls. Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` @@ -3978,8 +4252,11 @@ type NodeConfigParameters struct { GcfsConfig *GcfsConfigParameters `json:"gcfsConfig,omitempty" tf:"gcfs_config,omitempty"` // List of the type and count of accelerator cards attached to the instance. - // Structure documented below.12 this field is an - // Attribute as Block + // Structure documented below. + // Note: As of 6.0.0, argument syntax + // is no longer supported for this field in favor of block syntax. + // To dynamically set a list of guest accelerators, use dynamic blocks. + // To set an empty list, use a single guest_accelerator block with count = 0. // +kubebuilder:validation:Optional GuestAccelerator []GuestAcceleratorParameters `json:"guestAccelerator,omitempty" tf:"guest_accelerator,omitempty"` @@ -4025,6 +4302,11 @@ type NodeConfigParameters struct { // +kubebuilder:validation:Optional LocalSsdCount *float64 `json:"localSsdCount,omitempty" tf:"local_ssd_count,omitempty"` + // Possible Local SSD encryption modes: + // Accepted values are: + // +kubebuilder:validation:Optional + LocalSsdEncryptionMode *string `json:"localSsdEncryptionMode,omitempty" tf:"local_ssd_encryption_mode,omitempty"` + // wide default value. Valid values include DEFAULT and MAX_THROUGHPUT. See Increasing logging agent throughput for more information. // +kubebuilder:validation:Optional LoggingVariant *string `json:"loggingVariant,omitempty" tf:"logging_variant,omitempty"` @@ -4115,6 +4397,10 @@ type NodeConfigParameters struct { // +kubebuilder:validation:Optional Spot *bool `json:"spot,omitempty" tf:"spot,omitempty"` + // The list of Storage Pools where boot disks are provisioned. + // +kubebuilder:validation:Optional + StoragePools []*string `json:"storagePools,omitempty" tf:"storage_pools,omitempty"` + // The list of instance tags applied to all nodes. Tags are used to identify // valid sources or targets for network firewalls. // +kubebuilder:validation:Optional @@ -4240,25 +4526,28 @@ type NodeConfigWorkloadMetadataConfigParameters struct { type NodeKubeletConfigInitParameters struct { - // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. + // only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. InsecureKubeletReadonlyPortEnabled *string `json:"insecureKubeletReadonlyPortEnabled,omitempty" tf:"insecure_kubelet_readonly_port_enabled,omitempty"` } type NodeKubeletConfigObservation struct { - // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. + // only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. InsecureKubeletReadonlyPortEnabled *string `json:"insecureKubeletReadonlyPortEnabled,omitempty" tf:"insecure_kubelet_readonly_port_enabled,omitempty"` } type NodeKubeletConfigParameters struct { - // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. + // only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE. // +kubebuilder:validation:Optional InsecureKubeletReadonlyPortEnabled *string `json:"insecureKubeletReadonlyPortEnabled,omitempty" tf:"insecure_kubelet_readonly_port_enabled,omitempty"` } type NodePoolAutoConfigInitParameters struct { + // Linux system configuration for the cluster's automatically provisioned node pools. Only cgroup_mode field is supported in node_pool_auto_config. Structure is documented below. + LinuxNodeConfig *NodePoolAutoConfigLinuxNodeConfigInitParameters `json:"linuxNodeConfig,omitempty" tf:"linux_node_config,omitempty"` + // The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. NetworkTags *NetworkTagsInitParameters `json:"networkTags,omitempty" tf:"network_tags,omitempty"` @@ -4271,8 +4560,33 @@ type NodePoolAutoConfigInitParameters struct { ResourceManagerTags map[string]*string `json:"resourceManagerTags,omitempty" tf:"resource_manager_tags,omitempty"` } +type NodePoolAutoConfigLinuxNodeConfigInitParameters struct { + + // Possible cgroup modes that can be used. + // Accepted values are: + CgroupMode *string `json:"cgroupMode,omitempty" tf:"cgroup_mode,omitempty"` +} + +type NodePoolAutoConfigLinuxNodeConfigObservation struct { + + // Possible cgroup modes that can be used. + // Accepted values are: + CgroupMode *string `json:"cgroupMode,omitempty" tf:"cgroup_mode,omitempty"` +} + +type NodePoolAutoConfigLinuxNodeConfigParameters struct { + + // Possible cgroup modes that can be used. + // Accepted values are: + // +kubebuilder:validation:Optional + CgroupMode *string `json:"cgroupMode,omitempty" tf:"cgroup_mode,omitempty"` +} + type NodePoolAutoConfigObservation struct { + // Linux system configuration for the cluster's automatically provisioned node pools. Only cgroup_mode field is supported in node_pool_auto_config. Structure is documented below. + LinuxNodeConfig *NodePoolAutoConfigLinuxNodeConfigObservation `json:"linuxNodeConfig,omitempty" tf:"linux_node_config,omitempty"` + // The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. NetworkTags *NetworkTagsObservation `json:"networkTags,omitempty" tf:"network_tags,omitempty"` @@ -4287,6 +4601,10 @@ type NodePoolAutoConfigObservation struct { type NodePoolAutoConfigParameters struct { + // Linux system configuration for the cluster's automatically provisioned node pools. Only cgroup_mode field is supported in node_pool_auto_config. Structure is documented below. + // +kubebuilder:validation:Optional + LinuxNodeConfig *NodePoolAutoConfigLinuxNodeConfigParameters `json:"linuxNodeConfig,omitempty" tf:"linux_node_config,omitempty"` + // The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. // +kubebuilder:validation:Optional NetworkTags *NetworkTagsParameters `json:"networkTags,omitempty" tf:"network_tags,omitempty"` @@ -4396,8 +4714,11 @@ type NodePoolNodeConfigObservation struct { GcfsConfig *NodeConfigGcfsConfigObservation `json:"gcfsConfig,omitempty" tf:"gcfs_config,omitempty"` // List of the type and count of accelerator cards attached to the instance. - // Structure documented below.12 this field is an - // Attribute as Block + // Structure documented below. + // Note: As of 6.0.0, argument syntax + // is no longer supported for this field in favor of block syntax. + // To dynamically set a list of guest accelerators, use dynamic blocks. + // To set an empty list, use a single guest_accelerator block with count = 0. GuestAccelerator []NodeConfigGuestAcceleratorObservation `json:"guestAccelerator,omitempty" tf:"guest_accelerator,omitempty"` // Google Virtual NIC (gVNIC) is a virtual network interface. @@ -4424,7 +4745,7 @@ type NodePoolNodeConfigObservation struct { // +mapType=granular Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - // Parameters that can be configured on Linux nodes. Structure is documented below. + // Linux system configuration for the cluster's automatically provisioned node pools. Only cgroup_mode field is supported in node_pool_auto_config. Structure is documented below. LinuxNodeConfig *NodeConfigLinuxNodeConfigObservation `json:"linuxNodeConfig,omitempty" tf:"linux_node_config,omitempty"` // Parameters for the local NVMe SSDs. Structure is documented below. @@ -4434,6 +4755,10 @@ type NodePoolNodeConfigObservation struct { // attached to each cluster node. Defaults to 0. LocalSsdCount *float64 `json:"localSsdCount,omitempty" tf:"local_ssd_count,omitempty"` + // Possible Local SSD encryption modes: + // Accepted values are: + LocalSsdEncryptionMode *string `json:"localSsdEncryptionMode,omitempty" tf:"local_ssd_encryption_mode,omitempty"` + // The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See Increasing logging agent throughput for more information. LoggingVariant *string `json:"loggingVariant,omitempty" tf:"logging_variant,omitempty"` @@ -4498,6 +4823,9 @@ type NodePoolNodeConfigObservation struct { // for more information. Defaults to false. Spot *bool `json:"spot,omitempty" tf:"spot,omitempty"` + // The list of Storage Pools where boot disks are provisioned. + StoragePools []*string `json:"storagePools,omitempty" tf:"storage_pools,omitempty"` + // The list of instance tags applied to all nodes. Tags are used to identify // valid sources or targets for network firewalls. Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` @@ -4623,6 +4951,25 @@ type NotificationConfigParameters struct { Pubsub *PubsubParameters `json:"pubsub" tf:"pubsub,omitempty"` } +type ParallelstoreCsiDriverConfigInitParameters struct { + + // Enables vertical pod autoscaling + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ParallelstoreCsiDriverConfigObservation struct { + + // Enables vertical pod autoscaling + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ParallelstoreCsiDriverConfigParameters struct { + + // Enables vertical pod autoscaling + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + type PlacementPolicyInitParameters struct { } @@ -5205,6 +5552,25 @@ type SecondaryBootDisksParameters struct { Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` } +type SecretManagerConfigInitParameters struct { + + // Enable the Secret Manager add-on for this cluster. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SecretManagerConfigObservation struct { + + // Enable the Secret Manager add-on for this cluster. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SecretManagerConfigParameters struct { + + // Enable the Secret Manager add-on for this cluster. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + type SecurityPostureConfigInitParameters struct { // Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include DISABLED, BASIC, and ENTERPRISE. @@ -5488,6 +5854,75 @@ type UpgradeSettingsParameters struct { Strategy *string `json:"strategy,omitempty" tf:"strategy,omitempty"` } +type UserManagedKeysConfigInitParameters struct { + AggregationCA *string `json:"aggregationCa,omitempty" tf:"aggregation_ca,omitempty"` + + ClusterCA *string `json:"clusterCa,omitempty" tf:"cluster_ca,omitempty"` + + ControlPlaneDiskEncryptionKey *string `json:"controlPlaneDiskEncryptionKey,omitempty" tf:"control_plane_disk_encryption_key,omitempty"` + + EtcdAPICA *string `json:"etcdApiCa,omitempty" tf:"etcd_api_ca,omitempty"` + + EtcdPeerCA *string `json:"etcdPeerCa,omitempty" tf:"etcd_peer_ca,omitempty"` + + GkeopsEtcdBackupEncryptionKey *string `json:"gkeopsEtcdBackupEncryptionKey,omitempty" tf:"gkeops_etcd_backup_encryption_key,omitempty"` + + // +listType=set + ServiceAccountSigningKeys []*string `json:"serviceAccountSigningKeys,omitempty" tf:"service_account_signing_keys,omitempty"` + + // +listType=set + ServiceAccountVerificationKeys []*string `json:"serviceAccountVerificationKeys,omitempty" tf:"service_account_verification_keys,omitempty"` +} + +type UserManagedKeysConfigObservation struct { + AggregationCA *string `json:"aggregationCa,omitempty" tf:"aggregation_ca,omitempty"` + + ClusterCA *string `json:"clusterCa,omitempty" tf:"cluster_ca,omitempty"` + + ControlPlaneDiskEncryptionKey *string `json:"controlPlaneDiskEncryptionKey,omitempty" tf:"control_plane_disk_encryption_key,omitempty"` + + EtcdAPICA *string `json:"etcdApiCa,omitempty" tf:"etcd_api_ca,omitempty"` + + EtcdPeerCA *string `json:"etcdPeerCa,omitempty" tf:"etcd_peer_ca,omitempty"` + + GkeopsEtcdBackupEncryptionKey *string `json:"gkeopsEtcdBackupEncryptionKey,omitempty" tf:"gkeops_etcd_backup_encryption_key,omitempty"` + + // +listType=set + ServiceAccountSigningKeys []*string `json:"serviceAccountSigningKeys,omitempty" tf:"service_account_signing_keys,omitempty"` + + // +listType=set + ServiceAccountVerificationKeys []*string `json:"serviceAccountVerificationKeys,omitempty" tf:"service_account_verification_keys,omitempty"` +} + +type UserManagedKeysConfigParameters struct { + + // +kubebuilder:validation:Optional + AggregationCA *string `json:"aggregationCa,omitempty" tf:"aggregation_ca,omitempty"` + + // +kubebuilder:validation:Optional + ClusterCA *string `json:"clusterCa,omitempty" tf:"cluster_ca,omitempty"` + + // +kubebuilder:validation:Optional + ControlPlaneDiskEncryptionKey *string `json:"controlPlaneDiskEncryptionKey,omitempty" tf:"control_plane_disk_encryption_key,omitempty"` + + // +kubebuilder:validation:Optional + EtcdAPICA *string `json:"etcdApiCa,omitempty" tf:"etcd_api_ca,omitempty"` + + // +kubebuilder:validation:Optional + EtcdPeerCA *string `json:"etcdPeerCa,omitempty" tf:"etcd_peer_ca,omitempty"` + + // +kubebuilder:validation:Optional + GkeopsEtcdBackupEncryptionKey *string `json:"gkeopsEtcdBackupEncryptionKey,omitempty" tf:"gkeops_etcd_backup_encryption_key,omitempty"` + + // +kubebuilder:validation:Optional + // +listType=set + ServiceAccountSigningKeys []*string `json:"serviceAccountSigningKeys,omitempty" tf:"service_account_signing_keys,omitempty"` + + // +kubebuilder:validation:Optional + // +listType=set + ServiceAccountVerificationKeys []*string `json:"serviceAccountVerificationKeys,omitempty" tf:"service_account_verification_keys,omitempty"` +} + type VerticalPodAutoscalingInitParameters struct { // Enables vertical pod autoscaling diff --git a/apis/container/v1beta2/zz_generated.deepcopy.go b/apis/container/v1beta2/zz_generated.deepcopy.go index 561334236..3d64f3ade 100644 --- a/apis/container/v1beta2/zz_generated.deepcopy.go +++ b/apis/container/v1beta2/zz_generated.deepcopy.go @@ -259,6 +259,11 @@ func (in *AddonsConfigInitParameters) DeepCopyInto(out *AddonsConfigInitParamete *out = new(NetworkPolicyConfigInitParameters) (*in).DeepCopyInto(*out) } + if in.ParallelstoreCsiDriverConfig != nil { + in, out := &in.ParallelstoreCsiDriverConfig, &out.ParallelstoreCsiDriverConfig + *out = new(ParallelstoreCsiDriverConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.RayOperatorConfig != nil { in, out := &in.RayOperatorConfig, &out.RayOperatorConfig *out = make([]RayOperatorConfigInitParameters, len(*in)) @@ -336,6 +341,11 @@ func (in *AddonsConfigObservation) DeepCopyInto(out *AddonsConfigObservation) { *out = new(NetworkPolicyConfigObservation) (*in).DeepCopyInto(*out) } + if in.ParallelstoreCsiDriverConfig != nil { + in, out := &in.ParallelstoreCsiDriverConfig, &out.ParallelstoreCsiDriverConfig + *out = new(ParallelstoreCsiDriverConfigObservation) + (*in).DeepCopyInto(*out) + } if in.RayOperatorConfig != nil { in, out := &in.RayOperatorConfig, &out.RayOperatorConfig *out = make([]RayOperatorConfigObservation, len(*in)) @@ -413,6 +423,11 @@ func (in *AddonsConfigParameters) DeepCopyInto(out *AddonsConfigParameters) { *out = new(NetworkPolicyConfigParameters) (*in).DeepCopyInto(*out) } + if in.ParallelstoreCsiDriverConfig != nil { + in, out := &in.ParallelstoreCsiDriverConfig, &out.ParallelstoreCsiDriverConfig + *out = new(ParallelstoreCsiDriverConfigParameters) + (*in).DeepCopyInto(*out) + } if in.RayOperatorConfig != nil { in, out := &in.RayOperatorConfig, &out.RayOperatorConfig *out = make([]RayOperatorConfigParameters, len(*in)) @@ -450,11 +465,6 @@ func (in *AdvancedDatapathObservabilityConfigInitParameters) DeepCopyInto(out *A *out = new(bool) **out = **in } - if in.RelayMode != nil { - in, out := &in.RelayMode, &out.RelayMode - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedDatapathObservabilityConfigInitParameters. @@ -480,11 +490,6 @@ func (in *AdvancedDatapathObservabilityConfigObservation) DeepCopyInto(out *Adva *out = new(bool) **out = **in } - if in.RelayMode != nil { - in, out := &in.RelayMode, &out.RelayMode - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedDatapathObservabilityConfigObservation. @@ -510,11 +515,6 @@ func (in *AdvancedDatapathObservabilityConfigParameters) DeepCopyInto(out *Advan *out = new(bool) **out = **in } - if in.RelayMode != nil { - in, out := &in.RelayMode, &out.RelayMode - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedDatapathObservabilityConfigParameters. @@ -1777,6 +1777,11 @@ func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { *out = new(ConfidentialNodesInitParameters) (*in).DeepCopyInto(*out) } + if in.ControlPlaneEndpointsConfig != nil { + in, out := &in.ControlPlaneEndpointsConfig, &out.ControlPlaneEndpointsConfig + *out = new(ControlPlaneEndpointsConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.CostManagementConfig != nil { in, out := &in.CostManagementConfig, &out.CostManagementConfig *out = new(CostManagementConfigInitParameters) @@ -1807,11 +1812,6 @@ func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { *out = new(DefaultSnatStatusInitParameters) (*in).DeepCopyInto(*out) } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -1827,6 +1827,11 @@ func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { *out = new(bool) **out = **in } + if in.EnableFqdnNetworkPolicy != nil { + in, out := &in.EnableFqdnNetworkPolicy, &out.EnableFqdnNetworkPolicy + *out = new(bool) + **out = **in + } if in.EnableIntranodeVisibility != nil { in, out := &in.EnableIntranodeVisibility, &out.EnableIntranodeVisibility *out = new(bool) @@ -1867,6 +1872,11 @@ func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { *out = new(bool) **out = **in } + if in.EnterpriseConfig != nil { + in, out := &in.EnterpriseConfig, &out.EnterpriseConfig + *out = new(EnterpriseConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.Fleet != nil { in, out := &in.Fleet, &out.Fleet *out = new(FleetInitParameters) @@ -2044,6 +2054,11 @@ func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { *out = new(ResourceUsageExportConfigInitParameters) (*in).DeepCopyInto(*out) } + if in.SecretManagerConfig != nil { + in, out := &in.SecretManagerConfig, &out.SecretManagerConfig + *out = new(SecretManagerConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.SecurityPostureConfig != nil { in, out := &in.SecurityPostureConfig, &out.SecurityPostureConfig *out = new(SecurityPostureConfigInitParameters) @@ -2069,6 +2084,11 @@ func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.UserManagedKeysConfig != nil { + in, out := &in.UserManagedKeysConfig, &out.UserManagedKeysConfig + *out = new(UserManagedKeysConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.VerticalPodAutoscaling != nil { in, out := &in.VerticalPodAutoscaling, &out.VerticalPodAutoscaling *out = new(VerticalPodAutoscalingInitParameters) @@ -2161,6 +2181,11 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = new(ConfidentialNodesObservation) (*in).DeepCopyInto(*out) } + if in.ControlPlaneEndpointsConfig != nil { + in, out := &in.ControlPlaneEndpointsConfig, &out.ControlPlaneEndpointsConfig + *out = new(ControlPlaneEndpointsConfigObservation) + (*in).DeepCopyInto(*out) + } if in.CostManagementConfig != nil { in, out := &in.CostManagementConfig, &out.CostManagementConfig *out = new(CostManagementConfigObservation) @@ -2201,6 +2226,22 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = new(string) **out = **in } + if in.EffectiveLabels != nil { + in, out := &in.EffectiveLabels, &out.EffectiveLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } if in.EnableAutopilot != nil { in, out := &in.EnableAutopilot, &out.EnableAutopilot *out = new(bool) @@ -2211,6 +2252,11 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = new(bool) **out = **in } + if in.EnableFqdnNetworkPolicy != nil { + in, out := &in.EnableFqdnNetworkPolicy, &out.EnableFqdnNetworkPolicy + *out = new(bool) + **out = **in + } if in.EnableIntranodeVisibility != nil { in, out := &in.EnableIntranodeVisibility, &out.EnableIntranodeVisibility *out = new(bool) @@ -2256,6 +2302,11 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = new(string) **out = **in } + if in.EnterpriseConfig != nil { + in, out := &in.EnterpriseConfig, &out.EnterpriseConfig + *out = new(EnterpriseConfigObservation) + (*in).DeepCopyInto(*out) + } if in.Fleet != nil { in, out := &in.Fleet, &out.Fleet *out = new(FleetObservation) @@ -2455,6 +2506,11 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = new(ResourceUsageExportConfigObservation) (*in).DeepCopyInto(*out) } + if in.SecretManagerConfig != nil { + in, out := &in.SecretManagerConfig, &out.SecretManagerConfig + *out = new(SecretManagerConfigObservation) + (*in).DeepCopyInto(*out) + } if in.SecurityPostureConfig != nil { in, out := &in.SecurityPostureConfig, &out.SecurityPostureConfig *out = new(SecurityPostureConfigObservation) @@ -2480,11 +2536,32 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = new(string) **out = **in } + if in.TerraformLabels != nil { + in, out := &in.TerraformLabels, &out.TerraformLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } if in.TpuIPv4CidrBlock != nil { in, out := &in.TpuIPv4CidrBlock, &out.TpuIPv4CidrBlock *out = new(string) **out = **in } + if in.UserManagedKeysConfig != nil { + in, out := &in.UserManagedKeysConfig, &out.UserManagedKeysConfig + *out = new(UserManagedKeysConfigObservation) + (*in).DeepCopyInto(*out) + } if in.VerticalPodAutoscaling != nil { in, out := &in.VerticalPodAutoscaling, &out.VerticalPodAutoscaling *out = new(VerticalPodAutoscalingObservation) @@ -2545,6 +2622,11 @@ func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { *out = new(ConfidentialNodesParameters) (*in).DeepCopyInto(*out) } + if in.ControlPlaneEndpointsConfig != nil { + in, out := &in.ControlPlaneEndpointsConfig, &out.ControlPlaneEndpointsConfig + *out = new(ControlPlaneEndpointsConfigParameters) + (*in).DeepCopyInto(*out) + } if in.CostManagementConfig != nil { in, out := &in.CostManagementConfig, &out.CostManagementConfig *out = new(CostManagementConfigParameters) @@ -2575,11 +2657,6 @@ func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { *out = new(DefaultSnatStatusParameters) (*in).DeepCopyInto(*out) } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -2595,6 +2672,11 @@ func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { *out = new(bool) **out = **in } + if in.EnableFqdnNetworkPolicy != nil { + in, out := &in.EnableFqdnNetworkPolicy, &out.EnableFqdnNetworkPolicy + *out = new(bool) + **out = **in + } if in.EnableIntranodeVisibility != nil { in, out := &in.EnableIntranodeVisibility, &out.EnableIntranodeVisibility *out = new(bool) @@ -2635,6 +2717,11 @@ func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { *out = new(bool) **out = **in } + if in.EnterpriseConfig != nil { + in, out := &in.EnterpriseConfig, &out.EnterpriseConfig + *out = new(EnterpriseConfigParameters) + (*in).DeepCopyInto(*out) + } if in.Fleet != nil { in, out := &in.Fleet, &out.Fleet *out = new(FleetParameters) @@ -2817,6 +2904,11 @@ func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { *out = new(ResourceUsageExportConfigParameters) (*in).DeepCopyInto(*out) } + if in.SecretManagerConfig != nil { + in, out := &in.SecretManagerConfig, &out.SecretManagerConfig + *out = new(SecretManagerConfigParameters) + (*in).DeepCopyInto(*out) + } if in.SecurityPostureConfig != nil { in, out := &in.SecurityPostureConfig, &out.SecurityPostureConfig *out = new(SecurityPostureConfigParameters) @@ -2842,6 +2934,11 @@ func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.UserManagedKeysConfig != nil { + in, out := &in.UserManagedKeysConfig, &out.UserManagedKeysConfig + *out = new(UserManagedKeysConfigParameters) + (*in).DeepCopyInto(*out) + } if in.VerticalPodAutoscaling != nil { in, out := &in.VerticalPodAutoscaling, &out.VerticalPodAutoscaling *out = new(VerticalPodAutoscalingParameters) @@ -3289,6 +3386,66 @@ func (in *ContainerdConfigPrivateRegistryAccessConfigParameters) DeepCopy() *Con return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneEndpointsConfigInitParameters) DeepCopyInto(out *ControlPlaneEndpointsConfigInitParameters) { + *out = *in + if in.DNSEndpointConfig != nil { + in, out := &in.DNSEndpointConfig, &out.DNSEndpointConfig + *out = new(DNSEndpointConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneEndpointsConfigInitParameters. +func (in *ControlPlaneEndpointsConfigInitParameters) DeepCopy() *ControlPlaneEndpointsConfigInitParameters { + if in == nil { + return nil + } + out := new(ControlPlaneEndpointsConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneEndpointsConfigObservation) DeepCopyInto(out *ControlPlaneEndpointsConfigObservation) { + *out = *in + if in.DNSEndpointConfig != nil { + in, out := &in.DNSEndpointConfig, &out.DNSEndpointConfig + *out = new(DNSEndpointConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneEndpointsConfigObservation. +func (in *ControlPlaneEndpointsConfigObservation) DeepCopy() *ControlPlaneEndpointsConfigObservation { + if in == nil { + return nil + } + out := new(ControlPlaneEndpointsConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneEndpointsConfigParameters) DeepCopyInto(out *ControlPlaneEndpointsConfigParameters) { + *out = *in + if in.DNSEndpointConfig != nil { + in, out := &in.DNSEndpointConfig, &out.DNSEndpointConfig + *out = new(DNSEndpointConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneEndpointsConfigParameters. +func (in *ControlPlaneEndpointsConfigParameters) DeepCopy() *ControlPlaneEndpointsConfigParameters { + if in == nil { + return nil + } + out := new(ControlPlaneEndpointsConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CostManagementConfigInitParameters) DeepCopyInto(out *CostManagementConfigInitParameters) { *out = *in @@ -3412,6 +3569,11 @@ func (in *DNSCacheConfigParameters) DeepCopy() *DNSCacheConfigParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DNSConfigInitParameters) DeepCopyInto(out *DNSConfigInitParameters) { *out = *in + if in.AdditiveVPCScopeDNSDomain != nil { + in, out := &in.AdditiveVPCScopeDNSDomain, &out.AdditiveVPCScopeDNSDomain + *out = new(string) + **out = **in + } if in.ClusterDNS != nil { in, out := &in.ClusterDNS, &out.ClusterDNS *out = new(string) @@ -3442,6 +3604,11 @@ func (in *DNSConfigInitParameters) DeepCopy() *DNSConfigInitParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DNSConfigObservation) DeepCopyInto(out *DNSConfigObservation) { *out = *in + if in.AdditiveVPCScopeDNSDomain != nil { + in, out := &in.AdditiveVPCScopeDNSDomain, &out.AdditiveVPCScopeDNSDomain + *out = new(string) + **out = **in + } if in.ClusterDNS != nil { in, out := &in.ClusterDNS, &out.ClusterDNS *out = new(string) @@ -3472,6 +3639,11 @@ func (in *DNSConfigObservation) DeepCopy() *DNSConfigObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DNSConfigParameters) DeepCopyInto(out *DNSConfigParameters) { *out = *in + if in.AdditiveVPCScopeDNSDomain != nil { + in, out := &in.AdditiveVPCScopeDNSDomain, &out.AdditiveVPCScopeDNSDomain + *out = new(string) + **out = **in + } if in.ClusterDNS != nil { in, out := &in.ClusterDNS, &out.ClusterDNS *out = new(string) @@ -3499,6 +3671,81 @@ func (in *DNSConfigParameters) DeepCopy() *DNSConfigParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEndpointConfigInitParameters) DeepCopyInto(out *DNSEndpointConfigInitParameters) { + *out = *in + if in.AllowExternalTraffic != nil { + in, out := &in.AllowExternalTraffic, &out.AllowExternalTraffic + *out = new(bool) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpointConfigInitParameters. +func (in *DNSEndpointConfigInitParameters) DeepCopy() *DNSEndpointConfigInitParameters { + if in == nil { + return nil + } + out := new(DNSEndpointConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEndpointConfigObservation) DeepCopyInto(out *DNSEndpointConfigObservation) { + *out = *in + if in.AllowExternalTraffic != nil { + in, out := &in.AllowExternalTraffic, &out.AllowExternalTraffic + *out = new(bool) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpointConfigObservation. +func (in *DNSEndpointConfigObservation) DeepCopy() *DNSEndpointConfigObservation { + if in == nil { + return nil + } + out := new(DNSEndpointConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEndpointConfigParameters) DeepCopyInto(out *DNSEndpointConfigParameters) { + *out = *in + if in.AllowExternalTraffic != nil { + in, out := &in.AllowExternalTraffic, &out.AllowExternalTraffic + *out = new(bool) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpointConfigParameters. +func (in *DNSEndpointConfigParameters) DeepCopy() *DNSEndpointConfigParameters { + if in == nil { + return nil + } + out := new(DNSEndpointConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DailyMaintenanceWindowInitParameters) DeepCopyInto(out *DailyMaintenanceWindowInitParameters) { *out = *in @@ -3837,6 +4084,71 @@ func (in *EnableK8SBetaApisParameters) DeepCopy() *EnableK8SBetaApisParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnterpriseConfigInitParameters) DeepCopyInto(out *EnterpriseConfigInitParameters) { + *out = *in + if in.DesiredTier != nil { + in, out := &in.DesiredTier, &out.DesiredTier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnterpriseConfigInitParameters. +func (in *EnterpriseConfigInitParameters) DeepCopy() *EnterpriseConfigInitParameters { + if in == nil { + return nil + } + out := new(EnterpriseConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnterpriseConfigObservation) DeepCopyInto(out *EnterpriseConfigObservation) { + *out = *in + if in.ClusterTier != nil { + in, out := &in.ClusterTier, &out.ClusterTier + *out = new(string) + **out = **in + } + if in.DesiredTier != nil { + in, out := &in.DesiredTier, &out.DesiredTier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnterpriseConfigObservation. +func (in *EnterpriseConfigObservation) DeepCopy() *EnterpriseConfigObservation { + if in == nil { + return nil + } + out := new(EnterpriseConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnterpriseConfigParameters) DeepCopyInto(out *EnterpriseConfigParameters) { + *out = *in + if in.DesiredTier != nil { + in, out := &in.DesiredTier, &out.DesiredTier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnterpriseConfigParameters. +func (in *EnterpriseConfigParameters) DeepCopy() *EnterpriseConfigParameters { + if in == nil { + return nil + } + out := new(EnterpriseConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EphemeralStorageLocalSsdConfigInitParameters) DeepCopyInto(out *EphemeralStorageLocalSsdConfigInitParameters) { *out = *in @@ -5196,29 +5508,104 @@ func (in *HostMaintenancePolicyParameters) DeepCopy() *HostMaintenancePolicyPara } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAllocationPolicyInitParameters) DeepCopyInto(out *IPAllocationPolicyInitParameters) { +func (in *HugepagesConfigInitParameters) DeepCopyInto(out *HugepagesConfigInitParameters) { *out = *in - if in.AdditionalPodRangesConfig != nil { - in, out := &in.AdditionalPodRangesConfig, &out.AdditionalPodRangesConfig - *out = new(AdditionalPodRangesConfigInitParameters) - (*in).DeepCopyInto(*out) - } - if in.ClusterIPv4CidrBlock != nil { - in, out := &in.ClusterIPv4CidrBlock, &out.ClusterIPv4CidrBlock - *out = new(string) + if in.HugepageSize1G != nil { + in, out := &in.HugepageSize1G, &out.HugepageSize1G + *out = new(float64) **out = **in } - if in.ClusterSecondaryRangeName != nil { - in, out := &in.ClusterSecondaryRangeName, &out.ClusterSecondaryRangeName - *out = new(string) + if in.HugepageSize2M != nil { + in, out := &in.HugepageSize2M, &out.HugepageSize2M + *out = new(float64) **out = **in } - if in.PodCidrOverprovisionConfig != nil { - in, out := &in.PodCidrOverprovisionConfig, &out.PodCidrOverprovisionConfig - *out = new(PodCidrOverprovisionConfigInitParameters) - (*in).DeepCopyInto(*out) - } - if in.ServicesIPv4CidrBlock != nil { +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugepagesConfigInitParameters. +func (in *HugepagesConfigInitParameters) DeepCopy() *HugepagesConfigInitParameters { + if in == nil { + return nil + } + out := new(HugepagesConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HugepagesConfigObservation) DeepCopyInto(out *HugepagesConfigObservation) { + *out = *in + if in.HugepageSize1G != nil { + in, out := &in.HugepageSize1G, &out.HugepageSize1G + *out = new(float64) + **out = **in + } + if in.HugepageSize2M != nil { + in, out := &in.HugepageSize2M, &out.HugepageSize2M + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugepagesConfigObservation. +func (in *HugepagesConfigObservation) DeepCopy() *HugepagesConfigObservation { + if in == nil { + return nil + } + out := new(HugepagesConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HugepagesConfigParameters) DeepCopyInto(out *HugepagesConfigParameters) { + *out = *in + if in.HugepageSize1G != nil { + in, out := &in.HugepageSize1G, &out.HugepageSize1G + *out = new(float64) + **out = **in + } + if in.HugepageSize2M != nil { + in, out := &in.HugepageSize2M, &out.HugepageSize2M + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugepagesConfigParameters. +func (in *HugepagesConfigParameters) DeepCopy() *HugepagesConfigParameters { + if in == nil { + return nil + } + out := new(HugepagesConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAllocationPolicyInitParameters) DeepCopyInto(out *IPAllocationPolicyInitParameters) { + *out = *in + if in.AdditionalPodRangesConfig != nil { + in, out := &in.AdditionalPodRangesConfig, &out.AdditionalPodRangesConfig + *out = new(AdditionalPodRangesConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterIPv4CidrBlock != nil { + in, out := &in.ClusterIPv4CidrBlock, &out.ClusterIPv4CidrBlock + *out = new(string) + **out = **in + } + if in.ClusterSecondaryRangeName != nil { + in, out := &in.ClusterSecondaryRangeName, &out.ClusterSecondaryRangeName + *out = new(string) + **out = **in + } + if in.PodCidrOverprovisionConfig != nil { + in, out := &in.PodCidrOverprovisionConfig, &out.PodCidrOverprovisionConfig + *out = new(PodCidrOverprovisionConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServicesIPv4CidrBlock != nil { in, out := &in.ServicesIPv4CidrBlock, &out.ServicesIPv4CidrBlock *out = new(string) **out = **in @@ -5525,6 +5912,61 @@ func (in *KubeletConfigParameters) DeepCopy() *KubeletConfigParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxNodeConfigHugepagesConfigInitParameters) DeepCopyInto(out *LinuxNodeConfigHugepagesConfigInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxNodeConfigHugepagesConfigInitParameters. +func (in *LinuxNodeConfigHugepagesConfigInitParameters) DeepCopy() *LinuxNodeConfigHugepagesConfigInitParameters { + if in == nil { + return nil + } + out := new(LinuxNodeConfigHugepagesConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxNodeConfigHugepagesConfigObservation) DeepCopyInto(out *LinuxNodeConfigHugepagesConfigObservation) { + *out = *in + if in.HugepageSize1G != nil { + in, out := &in.HugepageSize1G, &out.HugepageSize1G + *out = new(float64) + **out = **in + } + if in.HugepageSize2M != nil { + in, out := &in.HugepageSize2M, &out.HugepageSize2M + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxNodeConfigHugepagesConfigObservation. +func (in *LinuxNodeConfigHugepagesConfigObservation) DeepCopy() *LinuxNodeConfigHugepagesConfigObservation { + if in == nil { + return nil + } + out := new(LinuxNodeConfigHugepagesConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxNodeConfigHugepagesConfigParameters) DeepCopyInto(out *LinuxNodeConfigHugepagesConfigParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxNodeConfigHugepagesConfigParameters. +func (in *LinuxNodeConfigHugepagesConfigParameters) DeepCopy() *LinuxNodeConfigHugepagesConfigParameters { + if in == nil { + return nil + } + out := new(LinuxNodeConfigHugepagesConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinuxNodeConfigInitParameters) DeepCopyInto(out *LinuxNodeConfigInitParameters) { *out = *in @@ -5533,6 +5975,11 @@ func (in *LinuxNodeConfigInitParameters) DeepCopyInto(out *LinuxNodeConfigInitPa *out = new(string) **out = **in } + if in.HugepagesConfig != nil { + in, out := &in.HugepagesConfig, &out.HugepagesConfig + *out = new(HugepagesConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.Sysctls != nil { in, out := &in.Sysctls, &out.Sysctls *out = make(map[string]*string, len(*in)) @@ -5569,6 +6016,11 @@ func (in *LinuxNodeConfigObservation) DeepCopyInto(out *LinuxNodeConfigObservati *out = new(string) **out = **in } + if in.HugepagesConfig != nil { + in, out := &in.HugepagesConfig, &out.HugepagesConfig + *out = new(HugepagesConfigObservation) + (*in).DeepCopyInto(*out) + } if in.Sysctls != nil { in, out := &in.Sysctls, &out.Sysctls *out = make(map[string]*string, len(*in)) @@ -5605,6 +6057,11 @@ func (in *LinuxNodeConfigParameters) DeepCopyInto(out *LinuxNodeConfigParameters *out = new(string) **out = **in } + if in.HugepagesConfig != nil { + in, out := &in.HugepagesConfig, &out.HugepagesConfig + *out = new(HugepagesConfigParameters) + (*in).DeepCopyInto(*out) + } if in.Sysctls != nil { in, out := &in.Sysctls, &out.Sysctls *out = make(map[string]*string, len(*in)) @@ -6199,6 +6656,11 @@ func (in *MasterAuthorizedNetworksConfigInitParameters) DeepCopyInto(out *Master *out = new(bool) **out = **in } + if in.PrivateEndpointEnforcementEnabled != nil { + in, out := &in.PrivateEndpointEnforcementEnabled, &out.PrivateEndpointEnforcementEnabled + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterAuthorizedNetworksConfigInitParameters. @@ -6226,6 +6688,11 @@ func (in *MasterAuthorizedNetworksConfigObservation) DeepCopyInto(out *MasterAut *out = new(bool) **out = **in } + if in.PrivateEndpointEnforcementEnabled != nil { + in, out := &in.PrivateEndpointEnforcementEnabled, &out.PrivateEndpointEnforcementEnabled + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterAuthorizedNetworksConfigObservation. @@ -6253,6 +6720,11 @@ func (in *MasterAuthorizedNetworksConfigParameters) DeepCopyInto(out *MasterAuth *out = new(bool) **out = **in } + if in.PrivateEndpointEnforcementEnabled != nil { + in, out := &in.PrivateEndpointEnforcementEnabled, &out.PrivateEndpointEnforcementEnabled + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterAuthorizedNetworksConfigParameters. @@ -7708,6 +8180,66 @@ func (in *NodeConfigDefaultsContainerdConfigPrivateRegistryAccessConfigParameter return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigDefaultsGcfsConfigInitParameters) DeepCopyInto(out *NodeConfigDefaultsGcfsConfigInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigDefaultsGcfsConfigInitParameters. +func (in *NodeConfigDefaultsGcfsConfigInitParameters) DeepCopy() *NodeConfigDefaultsGcfsConfigInitParameters { + if in == nil { + return nil + } + out := new(NodeConfigDefaultsGcfsConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigDefaultsGcfsConfigObservation) DeepCopyInto(out *NodeConfigDefaultsGcfsConfigObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigDefaultsGcfsConfigObservation. +func (in *NodeConfigDefaultsGcfsConfigObservation) DeepCopy() *NodeConfigDefaultsGcfsConfigObservation { + if in == nil { + return nil + } + out := new(NodeConfigDefaultsGcfsConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigDefaultsGcfsConfigParameters) DeepCopyInto(out *NodeConfigDefaultsGcfsConfigParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigDefaultsGcfsConfigParameters. +func (in *NodeConfigDefaultsGcfsConfigParameters) DeepCopy() *NodeConfigDefaultsGcfsConfigParameters { + if in == nil { + return nil + } + out := new(NodeConfigDefaultsGcfsConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeConfigDefaultsInitParameters) DeepCopyInto(out *NodeConfigDefaultsInitParameters) { *out = *in @@ -7716,6 +8248,11 @@ func (in *NodeConfigDefaultsInitParameters) DeepCopyInto(out *NodeConfigDefaults *out = new(NodeConfigDefaultsContainerdConfigInitParameters) (*in).DeepCopyInto(*out) } + if in.GcfsConfig != nil { + in, out := &in.GcfsConfig, &out.GcfsConfig + *out = new(NodeConfigDefaultsGcfsConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.InsecureKubeletReadonlyPortEnabled != nil { in, out := &in.InsecureKubeletReadonlyPortEnabled, &out.InsecureKubeletReadonlyPortEnabled *out = new(string) @@ -7746,6 +8283,11 @@ func (in *NodeConfigDefaultsObservation) DeepCopyInto(out *NodeConfigDefaultsObs *out = new(NodeConfigDefaultsContainerdConfigObservation) (*in).DeepCopyInto(*out) } + if in.GcfsConfig != nil { + in, out := &in.GcfsConfig, &out.GcfsConfig + *out = new(NodeConfigDefaultsGcfsConfigObservation) + (*in).DeepCopyInto(*out) + } if in.InsecureKubeletReadonlyPortEnabled != nil { in, out := &in.InsecureKubeletReadonlyPortEnabled, &out.InsecureKubeletReadonlyPortEnabled *out = new(string) @@ -7776,6 +8318,11 @@ func (in *NodeConfigDefaultsParameters) DeepCopyInto(out *NodeConfigDefaultsPara *out = new(NodeConfigDefaultsContainerdConfigParameters) (*in).DeepCopyInto(*out) } + if in.GcfsConfig != nil { + in, out := &in.GcfsConfig, &out.GcfsConfig + *out = new(NodeConfigDefaultsGcfsConfigParameters) + (*in).DeepCopyInto(*out) + } if in.InsecureKubeletReadonlyPortEnabled != nil { in, out := &in.InsecureKubeletReadonlyPortEnabled, &out.InsecureKubeletReadonlyPortEnabled *out = new(string) @@ -8424,6 +8971,11 @@ func (in *NodeConfigInitParameters) DeepCopyInto(out *NodeConfigInitParameters) *out = new(float64) **out = **in } + if in.LocalSsdEncryptionMode != nil { + in, out := &in.LocalSsdEncryptionMode, &out.LocalSsdEncryptionMode + *out = new(string) + **out = **in + } if in.LoggingVariant != nil { in, out := &in.LoggingVariant, &out.LoggingVariant *out = new(string) @@ -8550,6 +9102,17 @@ func (in *NodeConfigInitParameters) DeepCopyInto(out *NodeConfigInitParameters) *out = new(bool) **out = **in } + if in.StoragePools != nil { + in, out := &in.StoragePools, &out.StoragePools + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*string, len(*in)) @@ -8655,6 +9218,81 @@ func (in *NodeConfigKubeletConfigParameters) DeepCopy() *NodeConfigKubeletConfig return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigLinuxNodeConfigHugepagesConfigInitParameters) DeepCopyInto(out *NodeConfigLinuxNodeConfigHugepagesConfigInitParameters) { + *out = *in + if in.HugepageSize1G != nil { + in, out := &in.HugepageSize1G, &out.HugepageSize1G + *out = new(float64) + **out = **in + } + if in.HugepageSize2M != nil { + in, out := &in.HugepageSize2M, &out.HugepageSize2M + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigLinuxNodeConfigHugepagesConfigInitParameters. +func (in *NodeConfigLinuxNodeConfigHugepagesConfigInitParameters) DeepCopy() *NodeConfigLinuxNodeConfigHugepagesConfigInitParameters { + if in == nil { + return nil + } + out := new(NodeConfigLinuxNodeConfigHugepagesConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigLinuxNodeConfigHugepagesConfigObservation) DeepCopyInto(out *NodeConfigLinuxNodeConfigHugepagesConfigObservation) { + *out = *in + if in.HugepageSize1G != nil { + in, out := &in.HugepageSize1G, &out.HugepageSize1G + *out = new(float64) + **out = **in + } + if in.HugepageSize2M != nil { + in, out := &in.HugepageSize2M, &out.HugepageSize2M + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigLinuxNodeConfigHugepagesConfigObservation. +func (in *NodeConfigLinuxNodeConfigHugepagesConfigObservation) DeepCopy() *NodeConfigLinuxNodeConfigHugepagesConfigObservation { + if in == nil { + return nil + } + out := new(NodeConfigLinuxNodeConfigHugepagesConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigLinuxNodeConfigHugepagesConfigParameters) DeepCopyInto(out *NodeConfigLinuxNodeConfigHugepagesConfigParameters) { + *out = *in + if in.HugepageSize1G != nil { + in, out := &in.HugepageSize1G, &out.HugepageSize1G + *out = new(float64) + **out = **in + } + if in.HugepageSize2M != nil { + in, out := &in.HugepageSize2M, &out.HugepageSize2M + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigLinuxNodeConfigHugepagesConfigParameters. +func (in *NodeConfigLinuxNodeConfigHugepagesConfigParameters) DeepCopy() *NodeConfigLinuxNodeConfigHugepagesConfigParameters { + if in == nil { + return nil + } + out := new(NodeConfigLinuxNodeConfigHugepagesConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeConfigLinuxNodeConfigInitParameters) DeepCopyInto(out *NodeConfigLinuxNodeConfigInitParameters) { *out = *in @@ -8678,6 +9316,11 @@ func (in *NodeConfigLinuxNodeConfigObservation) DeepCopyInto(out *NodeConfigLinu *out = new(string) **out = **in } + if in.HugepagesConfig != nil { + in, out := &in.HugepagesConfig, &out.HugepagesConfig + *out = new(LinuxNodeConfigHugepagesConfigObservation) + (*in).DeepCopyInto(*out) + } if in.Sysctls != nil { in, out := &in.Sysctls, &out.Sysctls *out = make(map[string]*string, len(*in)) @@ -8889,6 +9532,11 @@ func (in *NodeConfigObservation) DeepCopyInto(out *NodeConfigObservation) { *out = new(float64) **out = **in } + if in.LocalSsdEncryptionMode != nil { + in, out := &in.LocalSsdEncryptionMode, &out.LocalSsdEncryptionMode + *out = new(string) + **out = **in + } if in.LoggingVariant != nil { in, out := &in.LoggingVariant, &out.LoggingVariant *out = new(string) @@ -9005,6 +9653,17 @@ func (in *NodeConfigObservation) DeepCopyInto(out *NodeConfigObservation) { *out = new(bool) **out = **in } + if in.StoragePools != nil { + in, out := &in.StoragePools, &out.StoragePools + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*string, len(*in)) @@ -9151,6 +9810,11 @@ func (in *NodeConfigParameters) DeepCopyInto(out *NodeConfigParameters) { *out = new(float64) **out = **in } + if in.LocalSsdEncryptionMode != nil { + in, out := &in.LocalSsdEncryptionMode, &out.LocalSsdEncryptionMode + *out = new(string) + **out = **in + } if in.LoggingVariant != nil { in, out := &in.LoggingVariant, &out.LoggingVariant *out = new(string) @@ -9277,6 +9941,17 @@ func (in *NodeConfigParameters) DeepCopyInto(out *NodeConfigParameters) { *out = new(bool) **out = **in } + if in.StoragePools != nil { + in, out := &in.StoragePools, &out.StoragePools + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*string, len(*in)) @@ -9868,6 +10543,11 @@ func (in *NodePool) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodePoolAutoConfigInitParameters) DeepCopyInto(out *NodePoolAutoConfigInitParameters) { *out = *in + if in.LinuxNodeConfig != nil { + in, out := &in.LinuxNodeConfig, &out.LinuxNodeConfig + *out = new(NodePoolAutoConfigLinuxNodeConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.NetworkTags != nil { in, out := &in.NetworkTags, &out.NetworkTags *out = new(NetworkTagsInitParameters) @@ -9906,9 +10586,74 @@ func (in *NodePoolAutoConfigInitParameters) DeepCopy() *NodePoolAutoConfigInitPa return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolAutoConfigLinuxNodeConfigInitParameters) DeepCopyInto(out *NodePoolAutoConfigLinuxNodeConfigInitParameters) { + *out = *in + if in.CgroupMode != nil { + in, out := &in.CgroupMode, &out.CgroupMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolAutoConfigLinuxNodeConfigInitParameters. +func (in *NodePoolAutoConfigLinuxNodeConfigInitParameters) DeepCopy() *NodePoolAutoConfigLinuxNodeConfigInitParameters { + if in == nil { + return nil + } + out := new(NodePoolAutoConfigLinuxNodeConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolAutoConfigLinuxNodeConfigObservation) DeepCopyInto(out *NodePoolAutoConfigLinuxNodeConfigObservation) { + *out = *in + if in.CgroupMode != nil { + in, out := &in.CgroupMode, &out.CgroupMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolAutoConfigLinuxNodeConfigObservation. +func (in *NodePoolAutoConfigLinuxNodeConfigObservation) DeepCopy() *NodePoolAutoConfigLinuxNodeConfigObservation { + if in == nil { + return nil + } + out := new(NodePoolAutoConfigLinuxNodeConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolAutoConfigLinuxNodeConfigParameters) DeepCopyInto(out *NodePoolAutoConfigLinuxNodeConfigParameters) { + *out = *in + if in.CgroupMode != nil { + in, out := &in.CgroupMode, &out.CgroupMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolAutoConfigLinuxNodeConfigParameters. +func (in *NodePoolAutoConfigLinuxNodeConfigParameters) DeepCopy() *NodePoolAutoConfigLinuxNodeConfigParameters { + if in == nil { + return nil + } + out := new(NodePoolAutoConfigLinuxNodeConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodePoolAutoConfigObservation) DeepCopyInto(out *NodePoolAutoConfigObservation) { *out = *in + if in.LinuxNodeConfig != nil { + in, out := &in.LinuxNodeConfig, &out.LinuxNodeConfig + *out = new(NodePoolAutoConfigLinuxNodeConfigObservation) + (*in).DeepCopyInto(*out) + } if in.NetworkTags != nil { in, out := &in.NetworkTags, &out.NetworkTags *out = new(NetworkTagsObservation) @@ -9950,6 +10695,11 @@ func (in *NodePoolAutoConfigObservation) DeepCopy() *NodePoolAutoConfigObservati // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodePoolAutoConfigParameters) DeepCopyInto(out *NodePoolAutoConfigParameters) { *out = *in + if in.LinuxNodeConfig != nil { + in, out := &in.LinuxNodeConfig, &out.LinuxNodeConfig + *out = new(NodePoolAutoConfigLinuxNodeConfigParameters) + (*in).DeepCopyInto(*out) + } if in.NetworkTags != nil { in, out := &in.NetworkTags, &out.NetworkTags *out = new(NetworkTagsParameters) @@ -11519,6 +12269,11 @@ func (in *NodePoolNodeConfigInitParameters_2) DeepCopyInto(out *NodePoolNodeConf *out = new(float64) **out = **in } + if in.LocalSsdEncryptionMode != nil { + in, out := &in.LocalSsdEncryptionMode, &out.LocalSsdEncryptionMode + *out = new(string) + **out = **in + } if in.LoggingVariant != nil { in, out := &in.LoggingVariant, &out.LoggingVariant *out = new(string) @@ -11645,6 +12400,17 @@ func (in *NodePoolNodeConfigInitParameters_2) DeepCopyInto(out *NodePoolNodeConf *out = new(bool) **out = **in } + if in.StoragePools != nil { + in, out := &in.StoragePools, &out.StoragePools + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*string, len(*in)) @@ -11808,6 +12574,11 @@ func (in *NodePoolNodeConfigLinuxNodeConfigInitParameters) DeepCopyInto(out *Nod *out = new(string) **out = **in } + if in.HugepagesConfig != nil { + in, out := &in.HugepagesConfig, &out.HugepagesConfig + *out = new(NodeConfigLinuxNodeConfigHugepagesConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.Sysctls != nil { in, out := &in.Sysctls, &out.Sysctls *out = make(map[string]*string, len(*in)) @@ -11844,6 +12615,11 @@ func (in *NodePoolNodeConfigLinuxNodeConfigObservation) DeepCopyInto(out *NodePo *out = new(string) **out = **in } + if in.HugepagesConfig != nil { + in, out := &in.HugepagesConfig, &out.HugepagesConfig + *out = new(NodeConfigLinuxNodeConfigHugepagesConfigObservation) + (*in).DeepCopyInto(*out) + } if in.Sysctls != nil { in, out := &in.Sysctls, &out.Sysctls *out = make(map[string]*string, len(*in)) @@ -11880,6 +12656,11 @@ func (in *NodePoolNodeConfigLinuxNodeConfigParameters) DeepCopyInto(out *NodePoo *out = new(string) **out = **in } + if in.HugepagesConfig != nil { + in, out := &in.HugepagesConfig, &out.HugepagesConfig + *out = new(NodeConfigLinuxNodeConfigHugepagesConfigParameters) + (*in).DeepCopyInto(*out) + } if in.Sysctls != nil { in, out := &in.Sysctls, &out.Sysctls *out = make(map[string]*string, len(*in)) @@ -12086,6 +12867,11 @@ func (in *NodePoolNodeConfigObservation) DeepCopyInto(out *NodePoolNodeConfigObs *out = new(float64) **out = **in } + if in.LocalSsdEncryptionMode != nil { + in, out := &in.LocalSsdEncryptionMode, &out.LocalSsdEncryptionMode + *out = new(string) + **out = **in + } if in.LoggingVariant != nil { in, out := &in.LoggingVariant, &out.LoggingVariant *out = new(string) @@ -12202,6 +12988,17 @@ func (in *NodePoolNodeConfigObservation) DeepCopyInto(out *NodePoolNodeConfigObs *out = new(bool) **out = **in } + if in.StoragePools != nil { + in, out := &in.StoragePools, &out.StoragePools + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*string, len(*in)) @@ -12355,6 +13152,11 @@ func (in *NodePoolNodeConfigObservation_2) DeepCopyInto(out *NodePoolNodeConfigO *out = new(float64) **out = **in } + if in.LocalSsdEncryptionMode != nil { + in, out := &in.LocalSsdEncryptionMode, &out.LocalSsdEncryptionMode + *out = new(string) + **out = **in + } if in.LoggingVariant != nil { in, out := &in.LoggingVariant, &out.LoggingVariant *out = new(string) @@ -12471,6 +13273,17 @@ func (in *NodePoolNodeConfigObservation_2) DeepCopyInto(out *NodePoolNodeConfigO *out = new(bool) **out = **in } + if in.StoragePools != nil { + in, out := &in.StoragePools, &out.StoragePools + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*string, len(*in)) @@ -12632,6 +13445,11 @@ func (in *NodePoolNodeConfigParameters_2) DeepCopyInto(out *NodePoolNodeConfigPa *out = new(float64) **out = **in } + if in.LocalSsdEncryptionMode != nil { + in, out := &in.LocalSsdEncryptionMode, &out.LocalSsdEncryptionMode + *out = new(string) + **out = **in + } if in.LoggingVariant != nil { in, out := &in.LoggingVariant, &out.LoggingVariant *out = new(string) @@ -12758,6 +13576,17 @@ func (in *NodePoolNodeConfigParameters_2) DeepCopyInto(out *NodePoolNodeConfigPa *out = new(bool) **out = **in } + if in.StoragePools != nil { + in, out := &in.StoragePools, &out.StoragePools + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*string, len(*in)) @@ -14174,6 +15003,66 @@ func (in *NotificationConfigParameters) DeepCopy() *NotificationConfigParameters return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParallelstoreCsiDriverConfigInitParameters) DeepCopyInto(out *ParallelstoreCsiDriverConfigInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelstoreCsiDriverConfigInitParameters. +func (in *ParallelstoreCsiDriverConfigInitParameters) DeepCopy() *ParallelstoreCsiDriverConfigInitParameters { + if in == nil { + return nil + } + out := new(ParallelstoreCsiDriverConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParallelstoreCsiDriverConfigObservation) DeepCopyInto(out *ParallelstoreCsiDriverConfigObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelstoreCsiDriverConfigObservation. +func (in *ParallelstoreCsiDriverConfigObservation) DeepCopy() *ParallelstoreCsiDriverConfigObservation { + if in == nil { + return nil + } + out := new(ParallelstoreCsiDriverConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParallelstoreCsiDriverConfigParameters) DeepCopyInto(out *ParallelstoreCsiDriverConfigParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelstoreCsiDriverConfigParameters. +func (in *ParallelstoreCsiDriverConfigParameters) DeepCopy() *ParallelstoreCsiDriverConfigParameters { + if in == nil { + return nil + } + out := new(ParallelstoreCsiDriverConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlacementPolicyInitParameters) DeepCopyInto(out *PlacementPolicyInitParameters) { *out = *in @@ -15514,6 +16403,66 @@ func (in *SecondaryBootDisksParameters) DeepCopy() *SecondaryBootDisksParameters return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretManagerConfigInitParameters) DeepCopyInto(out *SecretManagerConfigInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretManagerConfigInitParameters. +func (in *SecretManagerConfigInitParameters) DeepCopy() *SecretManagerConfigInitParameters { + if in == nil { + return nil + } + out := new(SecretManagerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretManagerConfigObservation) DeepCopyInto(out *SecretManagerConfigObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretManagerConfigObservation. +func (in *SecretManagerConfigObservation) DeepCopy() *SecretManagerConfigObservation { + if in == nil { + return nil + } + out := new(SecretManagerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretManagerConfigParameters) DeepCopyInto(out *SecretManagerConfigParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretManagerConfigParameters. +func (in *SecretManagerConfigParameters) DeepCopy() *SecretManagerConfigParameters { + if in == nil { + return nil + } + out := new(SecretManagerConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecurityPostureConfigInitParameters) DeepCopyInto(out *SecurityPostureConfigInitParameters) { *out = *in @@ -16401,6 +17350,207 @@ func (in *UpgradeSettingsParameters) DeepCopy() *UpgradeSettingsParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserManagedKeysConfigInitParameters) DeepCopyInto(out *UserManagedKeysConfigInitParameters) { + *out = *in + if in.AggregationCA != nil { + in, out := &in.AggregationCA, &out.AggregationCA + *out = new(string) + **out = **in + } + if in.ClusterCA != nil { + in, out := &in.ClusterCA, &out.ClusterCA + *out = new(string) + **out = **in + } + if in.ControlPlaneDiskEncryptionKey != nil { + in, out := &in.ControlPlaneDiskEncryptionKey, &out.ControlPlaneDiskEncryptionKey + *out = new(string) + **out = **in + } + if in.EtcdAPICA != nil { + in, out := &in.EtcdAPICA, &out.EtcdAPICA + *out = new(string) + **out = **in + } + if in.EtcdPeerCA != nil { + in, out := &in.EtcdPeerCA, &out.EtcdPeerCA + *out = new(string) + **out = **in + } + if in.GkeopsEtcdBackupEncryptionKey != nil { + in, out := &in.GkeopsEtcdBackupEncryptionKey, &out.GkeopsEtcdBackupEncryptionKey + *out = new(string) + **out = **in + } + if in.ServiceAccountSigningKeys != nil { + in, out := &in.ServiceAccountSigningKeys, &out.ServiceAccountSigningKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountVerificationKeys != nil { + in, out := &in.ServiceAccountVerificationKeys, &out.ServiceAccountVerificationKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserManagedKeysConfigInitParameters. +func (in *UserManagedKeysConfigInitParameters) DeepCopy() *UserManagedKeysConfigInitParameters { + if in == nil { + return nil + } + out := new(UserManagedKeysConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserManagedKeysConfigObservation) DeepCopyInto(out *UserManagedKeysConfigObservation) { + *out = *in + if in.AggregationCA != nil { + in, out := &in.AggregationCA, &out.AggregationCA + *out = new(string) + **out = **in + } + if in.ClusterCA != nil { + in, out := &in.ClusterCA, &out.ClusterCA + *out = new(string) + **out = **in + } + if in.ControlPlaneDiskEncryptionKey != nil { + in, out := &in.ControlPlaneDiskEncryptionKey, &out.ControlPlaneDiskEncryptionKey + *out = new(string) + **out = **in + } + if in.EtcdAPICA != nil { + in, out := &in.EtcdAPICA, &out.EtcdAPICA + *out = new(string) + **out = **in + } + if in.EtcdPeerCA != nil { + in, out := &in.EtcdPeerCA, &out.EtcdPeerCA + *out = new(string) + **out = **in + } + if in.GkeopsEtcdBackupEncryptionKey != nil { + in, out := &in.GkeopsEtcdBackupEncryptionKey, &out.GkeopsEtcdBackupEncryptionKey + *out = new(string) + **out = **in + } + if in.ServiceAccountSigningKeys != nil { + in, out := &in.ServiceAccountSigningKeys, &out.ServiceAccountSigningKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountVerificationKeys != nil { + in, out := &in.ServiceAccountVerificationKeys, &out.ServiceAccountVerificationKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserManagedKeysConfigObservation. +func (in *UserManagedKeysConfigObservation) DeepCopy() *UserManagedKeysConfigObservation { + if in == nil { + return nil + } + out := new(UserManagedKeysConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserManagedKeysConfigParameters) DeepCopyInto(out *UserManagedKeysConfigParameters) { + *out = *in + if in.AggregationCA != nil { + in, out := &in.AggregationCA, &out.AggregationCA + *out = new(string) + **out = **in + } + if in.ClusterCA != nil { + in, out := &in.ClusterCA, &out.ClusterCA + *out = new(string) + **out = **in + } + if in.ControlPlaneDiskEncryptionKey != nil { + in, out := &in.ControlPlaneDiskEncryptionKey, &out.ControlPlaneDiskEncryptionKey + *out = new(string) + **out = **in + } + if in.EtcdAPICA != nil { + in, out := &in.EtcdAPICA, &out.EtcdAPICA + *out = new(string) + **out = **in + } + if in.EtcdPeerCA != nil { + in, out := &in.EtcdPeerCA, &out.EtcdPeerCA + *out = new(string) + **out = **in + } + if in.GkeopsEtcdBackupEncryptionKey != nil { + in, out := &in.GkeopsEtcdBackupEncryptionKey, &out.GkeopsEtcdBackupEncryptionKey + *out = new(string) + **out = **in + } + if in.ServiceAccountSigningKeys != nil { + in, out := &in.ServiceAccountSigningKeys, &out.ServiceAccountSigningKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountVerificationKeys != nil { + in, out := &in.ServiceAccountVerificationKeys, &out.ServiceAccountVerificationKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserManagedKeysConfigParameters. +func (in *UserManagedKeysConfigParameters) DeepCopy() *UserManagedKeysConfigParameters { + if in == nil { + return nil + } + out := new(UserManagedKeysConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VerticalPodAutoscalingInitParameters) DeepCopyInto(out *VerticalPodAutoscalingInitParameters) { *out = *in diff --git a/apis/container/v1beta2/zz_nodepool_types.go b/apis/container/v1beta2/zz_nodepool_types.go index f31a6e890..1195f35a7 100755 --- a/apis/container/v1beta2/zz_nodepool_types.go +++ b/apis/container/v1beta2/zz_nodepool_types.go @@ -176,7 +176,7 @@ type NodeConfigGuestAcceleratorGpuDriverInstallationConfigInitParameters struct // The Kubernetes version for the nodes in this pool. Note that if this field // and auto_upgrade are both specified, they will fight each other for what the node version should // be, so setting both is highly discouraged. - GpuDriverVersion *string `json:"gpuDriverVersion,omitempty" tf:"gpu_driver_version"` + GpuDriverVersion *string `json:"gpuDriverVersion,omitempty" tf:"gpu_driver_version,omitempty"` } type NodeConfigGuestAcceleratorGpuDriverInstallationConfigObservation struct { @@ -193,13 +193,13 @@ type NodeConfigGuestAcceleratorGpuDriverInstallationConfigParameters struct { // and auto_upgrade are both specified, they will fight each other for what the node version should // be, so setting both is highly discouraged. // +kubebuilder:validation:Optional - GpuDriverVersion *string `json:"gpuDriverVersion,omitempty" tf:"gpu_driver_version"` + GpuDriverVersion *string `json:"gpuDriverVersion" tf:"gpu_driver_version,omitempty"` } type NodeConfigGuestAcceleratorGpuSharingConfigInitParameters struct { - GpuSharingStrategy *string `json:"gpuSharingStrategy,omitempty" tf:"gpu_sharing_strategy"` + GpuSharingStrategy *string `json:"gpuSharingStrategy,omitempty" tf:"gpu_sharing_strategy,omitempty"` - MaxSharedClientsPerGpu *float64 `json:"maxSharedClientsPerGpu,omitempty" tf:"max_shared_clients_per_gpu"` + MaxSharedClientsPerGpu *float64 `json:"maxSharedClientsPerGpu,omitempty" tf:"max_shared_clients_per_gpu,omitempty"` } type NodeConfigGuestAcceleratorGpuSharingConfigObservation struct { @@ -211,10 +211,31 @@ type NodeConfigGuestAcceleratorGpuSharingConfigObservation struct { type NodeConfigGuestAcceleratorGpuSharingConfigParameters struct { // +kubebuilder:validation:Optional - GpuSharingStrategy *string `json:"gpuSharingStrategy,omitempty" tf:"gpu_sharing_strategy"` + GpuSharingStrategy *string `json:"gpuSharingStrategy" tf:"gpu_sharing_strategy,omitempty"` + + // +kubebuilder:validation:Optional + MaxSharedClientsPerGpu *float64 `json:"maxSharedClientsPerGpu" tf:"max_shared_clients_per_gpu,omitempty"` +} + +type NodeConfigLinuxNodeConfigHugepagesConfigInitParameters struct { + HugepageSize1G *float64 `json:"hugepageSize1G,omitempty" tf:"hugepage_size_1g,omitempty"` + + HugepageSize2M *float64 `json:"hugepageSize2M,omitempty" tf:"hugepage_size_2m,omitempty"` +} + +type NodeConfigLinuxNodeConfigHugepagesConfigObservation struct { + HugepageSize1G *float64 `json:"hugepageSize1G,omitempty" tf:"hugepage_size_1g,omitempty"` + + HugepageSize2M *float64 `json:"hugepageSize2M,omitempty" tf:"hugepage_size_2m,omitempty"` +} + +type NodeConfigLinuxNodeConfigHugepagesConfigParameters struct { + + // +kubebuilder:validation:Optional + HugepageSize1G *float64 `json:"hugepageSize1G,omitempty" tf:"hugepage_size_1g,omitempty"` // +kubebuilder:validation:Optional - MaxSharedClientsPerGpu *float64 `json:"maxSharedClientsPerGpu,omitempty" tf:"max_shared_clients_per_gpu"` + HugepageSize2M *float64 `json:"hugepageSize2M,omitempty" tf:"hugepage_size_2m,omitempty"` } type NodeConfigSoleTenantConfigNodeAffinityInitParameters struct { @@ -664,18 +685,18 @@ type NodePoolNodeConfigGcfsConfigParameters struct { } type NodePoolNodeConfigGuestAcceleratorInitParameters struct { - Count *float64 `json:"count,omitempty" tf:"count"` + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` - GpuDriverInstallationConfig *NodeConfigGuestAcceleratorGpuDriverInstallationConfigInitParameters `json:"gpuDriverInstallationConfig,omitempty" tf:"gpu_driver_installation_config"` + GpuDriverInstallationConfig *NodeConfigGuestAcceleratorGpuDriverInstallationConfigInitParameters `json:"gpuDriverInstallationConfig,omitempty" tf:"gpu_driver_installation_config,omitempty"` - GpuPartitionSize *string `json:"gpuPartitionSize,omitempty" tf:"gpu_partition_size"` + GpuPartitionSize *string `json:"gpuPartitionSize,omitempty" tf:"gpu_partition_size,omitempty"` - GpuSharingConfig *NodeConfigGuestAcceleratorGpuSharingConfigInitParameters `json:"gpuSharingConfig,omitempty" tf:"gpu_sharing_config"` + GpuSharingConfig *NodeConfigGuestAcceleratorGpuSharingConfigInitParameters `json:"gpuSharingConfig,omitempty" tf:"gpu_sharing_config,omitempty"` // The type of the policy. Supports a single value: COMPACT. // Specifying COMPACT placement policy type places node pool's nodes in a closer // physical proximity in order to reduce network latency between nodes. - Type *string `json:"type,omitempty" tf:"type"` + Type *string `json:"type,omitempty" tf:"type,omitempty"` } type NodePoolNodeConfigGuestAcceleratorObservation struct { @@ -696,22 +717,22 @@ type NodePoolNodeConfigGuestAcceleratorObservation struct { type NodePoolNodeConfigGuestAcceleratorParameters struct { // +kubebuilder:validation:Optional - Count *float64 `json:"count,omitempty" tf:"count"` + Count *float64 `json:"count" tf:"count,omitempty"` // +kubebuilder:validation:Optional - GpuDriverInstallationConfig *NodeConfigGuestAcceleratorGpuDriverInstallationConfigParameters `json:"gpuDriverInstallationConfig,omitempty" tf:"gpu_driver_installation_config"` + GpuDriverInstallationConfig *NodeConfigGuestAcceleratorGpuDriverInstallationConfigParameters `json:"gpuDriverInstallationConfig,omitempty" tf:"gpu_driver_installation_config,omitempty"` // +kubebuilder:validation:Optional - GpuPartitionSize *string `json:"gpuPartitionSize,omitempty" tf:"gpu_partition_size"` + GpuPartitionSize *string `json:"gpuPartitionSize,omitempty" tf:"gpu_partition_size,omitempty"` // +kubebuilder:validation:Optional - GpuSharingConfig *NodeConfigGuestAcceleratorGpuSharingConfigParameters `json:"gpuSharingConfig,omitempty" tf:"gpu_sharing_config"` + GpuSharingConfig *NodeConfigGuestAcceleratorGpuSharingConfigParameters `json:"gpuSharingConfig,omitempty" tf:"gpu_sharing_config,omitempty"` // The type of the policy. Supports a single value: COMPACT. // Specifying COMPACT placement policy type places node pool's nodes in a closer // physical proximity in order to reduce network latency between nodes. // +kubebuilder:validation:Optional - Type *string `json:"type,omitempty" tf:"type"` + Type *string `json:"type" tf:"type,omitempty"` } type NodePoolNodeConfigGvnicInitParameters struct { @@ -789,6 +810,10 @@ type NodePoolNodeConfigInitParameters_2 struct { LocalSsdCount *float64 `json:"localSsdCount,omitempty" tf:"local_ssd_count,omitempty"` + // Possible Local SSD encryption modes: + // Accepted values are: + LocalSsdEncryptionMode *string `json:"localSsdEncryptionMode,omitempty" tf:"local_ssd_encryption_mode,omitempty"` + LoggingVariant *string `json:"loggingVariant,omitempty" tf:"logging_variant,omitempty"` MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` @@ -835,6 +860,8 @@ type NodePoolNodeConfigInitParameters_2 struct { Spot *bool `json:"spot,omitempty" tf:"spot,omitempty"` + StoragePools []*string `json:"storagePools,omitempty" tf:"storage_pools,omitempty"` + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` Taint []NodePoolNodeConfigTaintInitParameters `json:"taint,omitempty" tf:"taint,omitempty"` @@ -875,7 +902,7 @@ type NodePoolNodeConfigKubeletConfigParameters struct { CPUCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty" tf:"cpu_cfs_quota_period,omitempty"` // +kubebuilder:validation:Optional - CPUManagerPolicy *string `json:"cpuManagerPolicy" tf:"cpu_manager_policy,omitempty"` + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` // +kubebuilder:validation:Optional InsecureKubeletReadonlyPortEnabled *string `json:"insecureKubeletReadonlyPortEnabled,omitempty" tf:"insecure_kubelet_readonly_port_enabled,omitempty"` @@ -887,6 +914,8 @@ type NodePoolNodeConfigKubeletConfigParameters struct { type NodePoolNodeConfigLinuxNodeConfigInitParameters struct { CgroupMode *string `json:"cgroupMode,omitempty" tf:"cgroup_mode,omitempty"` + HugepagesConfig *NodeConfigLinuxNodeConfigHugepagesConfigInitParameters `json:"hugepagesConfig,omitempty" tf:"hugepages_config,omitempty"` + // +mapType=granular Sysctls map[string]*string `json:"sysctls,omitempty" tf:"sysctls,omitempty"` } @@ -894,6 +923,8 @@ type NodePoolNodeConfigLinuxNodeConfigInitParameters struct { type NodePoolNodeConfigLinuxNodeConfigObservation struct { CgroupMode *string `json:"cgroupMode,omitempty" tf:"cgroup_mode,omitempty"` + HugepagesConfig *NodeConfigLinuxNodeConfigHugepagesConfigObservation `json:"hugepagesConfig,omitempty" tf:"hugepages_config,omitempty"` + // +mapType=granular Sysctls map[string]*string `json:"sysctls,omitempty" tf:"sysctls,omitempty"` } @@ -903,6 +934,9 @@ type NodePoolNodeConfigLinuxNodeConfigParameters struct { // +kubebuilder:validation:Optional CgroupMode *string `json:"cgroupMode,omitempty" tf:"cgroup_mode,omitempty"` + // +kubebuilder:validation:Optional + HugepagesConfig *NodeConfigLinuxNodeConfigHugepagesConfigParameters `json:"hugepagesConfig,omitempty" tf:"hugepages_config,omitempty"` + // +kubebuilder:validation:Optional // +mapType=granular Sysctls map[string]*string `json:"sysctls,omitempty" tf:"sysctls,omitempty"` @@ -966,6 +1000,10 @@ type NodePoolNodeConfigObservation_2 struct { LocalSsdCount *float64 `json:"localSsdCount,omitempty" tf:"local_ssd_count,omitempty"` + // Possible Local SSD encryption modes: + // Accepted values are: + LocalSsdEncryptionMode *string `json:"localSsdEncryptionMode,omitempty" tf:"local_ssd_encryption_mode,omitempty"` + LoggingVariant *string `json:"loggingVariant,omitempty" tf:"logging_variant,omitempty"` MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` @@ -1002,6 +1040,8 @@ type NodePoolNodeConfigObservation_2 struct { Spot *bool `json:"spot,omitempty" tf:"spot,omitempty"` + StoragePools []*string `json:"storagePools,omitempty" tf:"storage_pools,omitempty"` + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` Taint []NodePoolNodeConfigTaintObservation `json:"taint,omitempty" tf:"taint,omitempty"` @@ -1071,6 +1111,11 @@ type NodePoolNodeConfigParameters_2 struct { // +kubebuilder:validation:Optional LocalSsdCount *float64 `json:"localSsdCount,omitempty" tf:"local_ssd_count,omitempty"` + // Possible Local SSD encryption modes: + // Accepted values are: + // +kubebuilder:validation:Optional + LocalSsdEncryptionMode *string `json:"localSsdEncryptionMode,omitempty" tf:"local_ssd_encryption_mode,omitempty"` + // +kubebuilder:validation:Optional LoggingVariant *string `json:"loggingVariant,omitempty" tf:"logging_variant,omitempty"` @@ -1132,6 +1177,9 @@ type NodePoolNodeConfigParameters_2 struct { // +kubebuilder:validation:Optional Spot *bool `json:"spot,omitempty" tf:"spot,omitempty"` + // +kubebuilder:validation:Optional + StoragePools []*string `json:"storagePools,omitempty" tf:"storage_pools,omitempty"` + // +kubebuilder:validation:Optional Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` diff --git a/apis/containerattached/v1beta2/zz_cluster_types.go b/apis/containerattached/v1beta2/zz_cluster_types.go index 201d1d4ec..bdd25a57b 100755 --- a/apis/containerattached/v1beta2/zz_cluster_types.go +++ b/apis/containerattached/v1beta2/zz_cluster_types.go @@ -107,7 +107,7 @@ type ClusterInitParameters struct { // Structure is documented below. BinaryAuthorization *BinaryAuthorizationInitParameters `json:"binaryAuthorization,omitempty" tf:"binary_authorization,omitempty"` - // Policy to determine what flags to send on delete. + // Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS DeletionPolicy *string `json:"deletionPolicy,omitempty" tf:"deletion_policy,omitempty"` // A human readable description of this attached cluster. Cannot be longer @@ -152,6 +152,10 @@ type ClusterInitParameters struct { // Support for proxy configuration. // Structure is documented below. ProxyConfig *ProxyConfigInitParameters `json:"proxyConfig,omitempty" tf:"proxy_config,omitempty"` + + // Enable/Disable Security Posture API features for the cluster. + // Structure is documented below. + SecurityPostureConfig *SecurityPostureConfigInitParameters `json:"securityPostureConfig,omitempty" tf:"security_posture_config,omitempty"` } type ClusterObservation struct { @@ -181,7 +185,7 @@ type ClusterObservation struct { // Output only. The time at which this cluster was created. CreateTime *string `json:"createTime,omitempty" tf:"create_time,omitempty"` - // Policy to determine what flags to send on delete. + // Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS DeletionPolicy *string `json:"deletionPolicy,omitempty" tf:"deletion_policy,omitempty"` // A human readable description of this attached cluster. Cannot be longer @@ -246,6 +250,10 @@ type ClusterObservation struct { // If set, there are currently changes in flight to the cluster. Reconciling *bool `json:"reconciling,omitempty" tf:"reconciling,omitempty"` + // Enable/Disable Security Posture API features for the cluster. + // Structure is documented below. + SecurityPostureConfig *SecurityPostureConfigObservation `json:"securityPostureConfig,omitempty" tf:"security_posture_config,omitempty"` + // The current state of the cluster. Possible values: // STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, // DEGRADED @@ -284,7 +292,7 @@ type ClusterParameters struct { // +kubebuilder:validation:Optional BinaryAuthorization *BinaryAuthorizationParameters `json:"binaryAuthorization,omitempty" tf:"binary_authorization,omitempty"` - // Policy to determine what flags to send on delete. + // Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS // +kubebuilder:validation:Optional DeletionPolicy *string `json:"deletionPolicy,omitempty" tf:"deletion_policy,omitempty"` @@ -343,6 +351,11 @@ type ClusterParameters struct { // Structure is documented below. // +kubebuilder:validation:Optional ProxyConfig *ProxyConfigParameters `json:"proxyConfig,omitempty" tf:"proxy_config,omitempty"` + + // Enable/Disable Security Posture API features for the cluster. + // Structure is documented below. + // +kubebuilder:validation:Optional + SecurityPostureConfig *SecurityPostureConfigParameters `json:"securityPostureConfig,omitempty" tf:"security_posture_config,omitempty"` } type ComponentConfigInitParameters struct { @@ -547,6 +560,28 @@ type ProxyConfigParameters struct { KubernetesSecret *KubernetesSecretParameters `json:"kubernetesSecret,omitempty" tf:"kubernetes_secret,omitempty"` } +type SecurityPostureConfigInitParameters struct { + + // Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. + // Possible values are: VULNERABILITY_DISABLED, VULNERABILITY_ENTERPRISE. + VulnerabilityMode *string `json:"vulnerabilityMode,omitempty" tf:"vulnerability_mode,omitempty"` +} + +type SecurityPostureConfigObservation struct { + + // Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. + // Possible values are: VULNERABILITY_DISABLED, VULNERABILITY_ENTERPRISE. + VulnerabilityMode *string `json:"vulnerabilityMode,omitempty" tf:"vulnerability_mode,omitempty"` +} + +type SecurityPostureConfigParameters struct { + + // Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. + // Possible values are: VULNERABILITY_DISABLED, VULNERABILITY_ENTERPRISE. + // +kubebuilder:validation:Optional + VulnerabilityMode *string `json:"vulnerabilityMode" tf:"vulnerability_mode,omitempty"` +} + type WorkloadIdentityConfigInitParameters struct { } diff --git a/apis/containerattached/v1beta2/zz_generated.deepcopy.go b/apis/containerattached/v1beta2/zz_generated.deepcopy.go index 56e2c0d1f..af353f11d 100644 --- a/apis/containerattached/v1beta2/zz_generated.deepcopy.go +++ b/apis/containerattached/v1beta2/zz_generated.deepcopy.go @@ -289,6 +289,11 @@ func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { *out = new(ProxyConfigInitParameters) (*in).DeepCopyInto(*out) } + if in.SecurityPostureConfig != nil { + in, out := &in.SecurityPostureConfig, &out.SecurityPostureConfig + *out = new(SecurityPostureConfigInitParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. @@ -465,6 +470,11 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = new(bool) **out = **in } + if in.SecurityPostureConfig != nil { + in, out := &in.SecurityPostureConfig, &out.SecurityPostureConfig + *out = new(SecurityPostureConfigObservation) + (*in).DeepCopyInto(*out) + } if in.State != nil { in, out := &in.State, &out.State *out = new(string) @@ -583,6 +593,11 @@ func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { *out = new(ProxyConfigParameters) (*in).DeepCopyInto(*out) } + if in.SecurityPostureConfig != nil { + in, out := &in.SecurityPostureConfig, &out.SecurityPostureConfig + *out = new(SecurityPostureConfigParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. @@ -1213,6 +1228,66 @@ func (in *ProxyConfigParameters) DeepCopy() *ProxyConfigParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityPostureConfigInitParameters) DeepCopyInto(out *SecurityPostureConfigInitParameters) { + *out = *in + if in.VulnerabilityMode != nil { + in, out := &in.VulnerabilityMode, &out.VulnerabilityMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityPostureConfigInitParameters. +func (in *SecurityPostureConfigInitParameters) DeepCopy() *SecurityPostureConfigInitParameters { + if in == nil { + return nil + } + out := new(SecurityPostureConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityPostureConfigObservation) DeepCopyInto(out *SecurityPostureConfigObservation) { + *out = *in + if in.VulnerabilityMode != nil { + in, out := &in.VulnerabilityMode, &out.VulnerabilityMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityPostureConfigObservation. +func (in *SecurityPostureConfigObservation) DeepCopy() *SecurityPostureConfigObservation { + if in == nil { + return nil + } + out := new(SecurityPostureConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityPostureConfigParameters) DeepCopyInto(out *SecurityPostureConfigParameters) { + *out = *in + if in.VulnerabilityMode != nil { + in, out := &in.VulnerabilityMode, &out.VulnerabilityMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityPostureConfigParameters. +func (in *SecurityPostureConfigParameters) DeepCopy() *SecurityPostureConfigParameters { + if in == nil { + return nil + } + out := new(SecurityPostureConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkloadIdentityConfigInitParameters) DeepCopyInto(out *WorkloadIdentityConfigInitParameters) { *out = *in diff --git a/apis/containeraws/v1beta2/zz_generated.deepcopy.go b/apis/containeraws/v1beta2/zz_generated.deepcopy.go index 57a03c0e5..b7f16aacb 100644 --- a/apis/containeraws/v1beta2/zz_generated.deepcopy.go +++ b/apis/containeraws/v1beta2/zz_generated.deepcopy.go @@ -2012,6 +2012,111 @@ func (in *FleetParameters) DeepCopy() *FleetParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigInitParameters) DeepCopyInto(out *KubeletConfigInitParameters) { + *out = *in + if in.CPUCfsQuota != nil { + in, out := &in.CPUCfsQuota, &out.CPUCfsQuota + *out = new(bool) + **out = **in + } + if in.CPUCfsQuotaPeriod != nil { + in, out := &in.CPUCfsQuotaPeriod, &out.CPUCfsQuotaPeriod + *out = new(string) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.PodPidsLimit != nil { + in, out := &in.PodPidsLimit, &out.PodPidsLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigInitParameters. +func (in *KubeletConfigInitParameters) DeepCopy() *KubeletConfigInitParameters { + if in == nil { + return nil + } + out := new(KubeletConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigObservation) DeepCopyInto(out *KubeletConfigObservation) { + *out = *in + if in.CPUCfsQuota != nil { + in, out := &in.CPUCfsQuota, &out.CPUCfsQuota + *out = new(bool) + **out = **in + } + if in.CPUCfsQuotaPeriod != nil { + in, out := &in.CPUCfsQuotaPeriod, &out.CPUCfsQuotaPeriod + *out = new(string) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.PodPidsLimit != nil { + in, out := &in.PodPidsLimit, &out.PodPidsLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigObservation. +func (in *KubeletConfigObservation) DeepCopy() *KubeletConfigObservation { + if in == nil { + return nil + } + out := new(KubeletConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigParameters) DeepCopyInto(out *KubeletConfigParameters) { + *out = *in + if in.CPUCfsQuota != nil { + in, out := &in.CPUCfsQuota, &out.CPUCfsQuota + *out = new(bool) + **out = **in + } + if in.CPUCfsQuotaPeriod != nil { + in, out := &in.CPUCfsQuotaPeriod, &out.CPUCfsQuotaPeriod + *out = new(string) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.PodPidsLimit != nil { + in, out := &in.PodPidsLimit, &out.PodPidsLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigParameters. +func (in *KubeletConfigParameters) DeepCopy() *KubeletConfigParameters { + if in == nil { + return nil + } + out := new(KubeletConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MainVolumeInitParameters) DeepCopyInto(out *MainVolumeInitParameters) { *out = *in @@ -2449,6 +2554,11 @@ func (in *NodePoolInitParameters) DeepCopyInto(out *NodePoolInitParameters) { *out = new(ConfigInitParameters) (*in).DeepCopyInto(*out) } + if in.KubeletConfig != nil { + in, out := &in.KubeletConfig, &out.KubeletConfig + *out = new(KubeletConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.Management != nil { in, out := &in.Management, &out.Management *out = new(ManagementInitParameters) @@ -2588,6 +2698,11 @@ func (in *NodePoolObservation) DeepCopyInto(out *NodePoolObservation) { *out = new(string) **out = **in } + if in.KubeletConfig != nil { + in, out := &in.KubeletConfig, &out.KubeletConfig + *out = new(KubeletConfigObservation) + (*in).DeepCopyInto(*out) + } if in.Location != nil { in, out := &in.Location, &out.Location *out = new(string) @@ -2699,6 +2814,11 @@ func (in *NodePoolParameters) DeepCopyInto(out *NodePoolParameters) { *out = new(ConfigParameters) (*in).DeepCopyInto(*out) } + if in.KubeletConfig != nil { + in, out := &in.KubeletConfig, &out.KubeletConfig + *out = new(KubeletConfigParameters) + (*in).DeepCopyInto(*out) + } if in.Location != nil { in, out := &in.Location, &out.Location *out = new(string) diff --git a/apis/containeraws/v1beta2/zz_nodepool_types.go b/apis/containeraws/v1beta2/zz_nodepool_types.go index a565163e4..caddfa80e 100755 --- a/apis/containeraws/v1beta2/zz_nodepool_types.go +++ b/apis/containeraws/v1beta2/zz_nodepool_types.go @@ -322,6 +322,55 @@ type ConfigSSHConfigParameters struct { EC2KeyPair *string `json:"ec2KeyPair" tf:"ec2_key_pair,omitempty"` } +type KubeletConfigInitParameters struct { + + // Whether or not to enable CPU CFS quota. Defaults to true. + CPUCfsQuota *bool `json:"cpuCfsQuota,omitempty" tf:"cpu_cfs_quota,omitempty"` + + // Optional. The CPU CFS quota period to use for the node. Defaults to "100ms". + CPUCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty" tf:"cpu_cfs_quota_period,omitempty"` + + // The CpuManagerPolicy to use for the node. Defaults to "none". + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` + + // Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset. + PodPidsLimit *float64 `json:"podPidsLimit,omitempty" tf:"pod_pids_limit,omitempty"` +} + +type KubeletConfigObservation struct { + + // Whether or not to enable CPU CFS quota. Defaults to true. + CPUCfsQuota *bool `json:"cpuCfsQuota,omitempty" tf:"cpu_cfs_quota,omitempty"` + + // Optional. The CPU CFS quota period to use for the node. Defaults to "100ms". + CPUCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty" tf:"cpu_cfs_quota_period,omitempty"` + + // The CpuManagerPolicy to use for the node. Defaults to "none". + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` + + // Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset. + PodPidsLimit *float64 `json:"podPidsLimit,omitempty" tf:"pod_pids_limit,omitempty"` +} + +type KubeletConfigParameters struct { + + // Whether or not to enable CPU CFS quota. Defaults to true. + // +kubebuilder:validation:Optional + CPUCfsQuota *bool `json:"cpuCfsQuota,omitempty" tf:"cpu_cfs_quota,omitempty"` + + // Optional. The CPU CFS quota period to use for the node. Defaults to "100ms". + // +kubebuilder:validation:Optional + CPUCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty" tf:"cpu_cfs_quota_period,omitempty"` + + // The CpuManagerPolicy to use for the node. Defaults to "none". + // +kubebuilder:validation:Optional + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` + + // Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset. + // +kubebuilder:validation:Optional + PodPidsLimit *float64 `json:"podPidsLimit,omitempty" tf:"pod_pids_limit,omitempty"` +} + type ManagementInitParameters struct { // Optional. Whether or not the nodes will be automatically repaired. @@ -372,6 +421,9 @@ type NodePoolInitParameters struct { // The configuration of the node pool. Config *ConfigInitParameters `json:"config,omitempty" tf:"config,omitempty"` + // The kubelet configuration for the node pool. + KubeletConfig *KubeletConfigInitParameters `json:"kubeletConfig,omitempty" tf:"kubelet_config,omitempty"` + // The Management configuration for this node pool. Management *ManagementInitParameters `json:"management,omitempty" tf:"management,omitempty"` @@ -418,6 +470,9 @@ type NodePoolObservation struct { // an identifier for the resource with format projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` + // The kubelet configuration for the node pool. + KubeletConfig *KubeletConfigObservation `json:"kubeletConfig,omitempty" tf:"kubelet_config,omitempty"` + // The location for the resource Location *string `json:"location,omitempty" tf:"location,omitempty"` @@ -480,6 +535,10 @@ type NodePoolParameters struct { // +kubebuilder:validation:Optional Config *ConfigParameters `json:"config,omitempty" tf:"config,omitempty"` + // The kubelet configuration for the node pool. + // +kubebuilder:validation:Optional + KubeletConfig *KubeletConfigParameters `json:"kubeletConfig,omitempty" tf:"kubelet_config,omitempty"` + // The location for the resource // +kubebuilder:validation:Required Location *string `json:"location" tf:"location,omitempty"` diff --git a/apis/dataproc/v1beta2/zz_cluster_types.go b/apis/dataproc/v1beta2/zz_cluster_types.go index a8e8fe5d5..7f33ebda5 100755 --- a/apis/dataproc/v1beta2/zz_cluster_types.go +++ b/apis/dataproc/v1beta2/zz_cluster_types.go @@ -656,6 +656,25 @@ type ClusterParameters struct { VirtualClusterConfig *VirtualClusterConfigParameters `json:"virtualClusterConfig,omitempty" tf:"virtual_cluster_config,omitempty"` } +type ConfidentialInstanceConfigInitParameters struct { + + // Defines whether the instance should have confidential compute enabled. + EnableConfidentialCompute *bool `json:"enableConfidentialCompute,omitempty" tf:"enable_confidential_compute,omitempty"` +} + +type ConfidentialInstanceConfigObservation struct { + + // Defines whether the instance should have confidential compute enabled. + EnableConfidentialCompute *bool `json:"enableConfidentialCompute,omitempty" tf:"enable_confidential_compute,omitempty"` +} + +type ConfidentialInstanceConfigParameters struct { + + // Defines whether the instance should have confidential compute enabled. + // +kubebuilder:validation:Optional + EnableConfidentialCompute *bool `json:"enableConfidentialCompute,omitempty" tf:"enable_confidential_compute,omitempty"` +} + type ConfigInitParameters struct { // The number of local SSD disks to attach to the node, @@ -876,6 +895,9 @@ type EndpointConfigParameters struct { type GceClusterConfigInitParameters struct { + // Confidential Instance Config for clusters using Confidential VMs + ConfidentialInstanceConfig *ConfidentialInstanceConfigInitParameters `json:"confidentialInstanceConfig,omitempty" tf:"confidential_instance_config,omitempty"` + // By default, clusters are not restricted to internal IP addresses, // and will have ephemeral external IP addresses assigned to each instance. If set to true, all // instances in the cluster will only have internal IP addresses. Note: Private Google Access @@ -945,6 +967,9 @@ type GceClusterConfigInitParameters struct { type GceClusterConfigObservation struct { + // Confidential Instance Config for clusters using Confidential VMs + ConfidentialInstanceConfig *ConfidentialInstanceConfigObservation `json:"confidentialInstanceConfig,omitempty" tf:"confidential_instance_config,omitempty"` + // By default, clusters are not restricted to internal IP addresses, // and will have ephemeral external IP addresses assigned to each instance. If set to true, all // instances in the cluster will only have internal IP addresses. Note: Private Google Access @@ -1004,6 +1029,10 @@ type GceClusterConfigObservation struct { type GceClusterConfigParameters struct { + // Confidential Instance Config for clusters using Confidential VMs + // +kubebuilder:validation:Optional + ConfidentialInstanceConfig *ConfidentialInstanceConfigParameters `json:"confidentialInstanceConfig,omitempty" tf:"confidential_instance_config,omitempty"` + // By default, clusters are not restricted to internal IP addresses, // and will have ephemeral external IP addresses assigned to each instance. If set to true, all // instances in the cluster will only have internal IP addresses. Note: Private Google Access @@ -1162,6 +1191,8 @@ type InstanceFlexibilityPolicyInitParameters struct { // List of instance selection options that the group will use when creating new VMs. InstanceSelectionList []InstanceSelectionListInitParameters `json:"instanceSelectionList,omitempty" tf:"instance_selection_list,omitempty"` + + ProvisioningModelMix *ProvisioningModelMixInitParameters `json:"provisioningModelMix,omitempty" tf:"provisioning_model_mix,omitempty"` } type InstanceFlexibilityPolicyObservation struct { @@ -1170,6 +1201,8 @@ type InstanceFlexibilityPolicyObservation struct { InstanceSelectionList []InstanceSelectionListObservation `json:"instanceSelectionList,omitempty" tf:"instance_selection_list,omitempty"` InstanceSelectionResults []InstanceSelectionResultsObservation `json:"instanceSelectionResults,omitempty" tf:"instance_selection_results,omitempty"` + + ProvisioningModelMix *ProvisioningModelMixObservation `json:"provisioningModelMix,omitempty" tf:"provisioning_model_mix,omitempty"` } type InstanceFlexibilityPolicyParameters struct { @@ -1177,6 +1210,9 @@ type InstanceFlexibilityPolicyParameters struct { // List of instance selection options that the group will use when creating new VMs. // +kubebuilder:validation:Optional InstanceSelectionList []InstanceSelectionListParameters `json:"instanceSelectionList,omitempty" tf:"instance_selection_list,omitempty"` + + // +kubebuilder:validation:Optional + ProvisioningModelMix *ProvisioningModelMixParameters `json:"provisioningModelMix,omitempty" tf:"provisioning_model_mix,omitempty"` } type InstanceSelectionListInitParameters struct { @@ -2174,6 +2210,27 @@ type PreemptibleWorkerConfigParameters struct { Preemptibility *string `json:"preemptibility,omitempty" tf:"preemptibility,omitempty"` } +type ProvisioningModelMixInitParameters struct { + StandardCapacityBase *float64 `json:"standardCapacityBase,omitempty" tf:"standard_capacity_base,omitempty"` + + StandardCapacityPercentAboveBase *float64 `json:"standardCapacityPercentAboveBase,omitempty" tf:"standard_capacity_percent_above_base,omitempty"` +} + +type ProvisioningModelMixObservation struct { + StandardCapacityBase *float64 `json:"standardCapacityBase,omitempty" tf:"standard_capacity_base,omitempty"` + + StandardCapacityPercentAboveBase *float64 `json:"standardCapacityPercentAboveBase,omitempty" tf:"standard_capacity_percent_above_base,omitempty"` +} + +type ProvisioningModelMixParameters struct { + + // +kubebuilder:validation:Optional + StandardCapacityBase *float64 `json:"standardCapacityBase,omitempty" tf:"standard_capacity_base,omitempty"` + + // +kubebuilder:validation:Optional + StandardCapacityPercentAboveBase *float64 `json:"standardCapacityPercentAboveBase,omitempty" tf:"standard_capacity_percent_above_base,omitempty"` +} + type ReservationAffinityInitParameters struct { // Corresponds to the type of reservation consumption. diff --git a/apis/dataproc/v1beta2/zz_generated.deepcopy.go b/apis/dataproc/v1beta2/zz_generated.deepcopy.go index f8fc0cb7f..0c8fe0541 100644 --- a/apis/dataproc/v1beta2/zz_generated.deepcopy.go +++ b/apis/dataproc/v1beta2/zz_generated.deepcopy.go @@ -1747,6 +1747,66 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidentialInstanceConfigInitParameters) DeepCopyInto(out *ConfidentialInstanceConfigInitParameters) { + *out = *in + if in.EnableConfidentialCompute != nil { + in, out := &in.EnableConfidentialCompute, &out.EnableConfidentialCompute + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidentialInstanceConfigInitParameters. +func (in *ConfidentialInstanceConfigInitParameters) DeepCopy() *ConfidentialInstanceConfigInitParameters { + if in == nil { + return nil + } + out := new(ConfidentialInstanceConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidentialInstanceConfigObservation) DeepCopyInto(out *ConfidentialInstanceConfigObservation) { + *out = *in + if in.EnableConfidentialCompute != nil { + in, out := &in.EnableConfidentialCompute, &out.EnableConfidentialCompute + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidentialInstanceConfigObservation. +func (in *ConfidentialInstanceConfigObservation) DeepCopy() *ConfidentialInstanceConfigObservation { + if in == nil { + return nil + } + out := new(ConfidentialInstanceConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidentialInstanceConfigParameters) DeepCopyInto(out *ConfidentialInstanceConfigParameters) { + *out = *in + if in.EnableConfidentialCompute != nil { + in, out := &in.EnableConfidentialCompute, &out.EnableConfidentialCompute + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidentialInstanceConfigParameters. +func (in *ConfidentialInstanceConfigParameters) DeepCopy() *ConfidentialInstanceConfigParameters { + if in == nil { + return nil + } + out := new(ConfidentialInstanceConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigAutoscalingConfigInitParameters) DeepCopyInto(out *ConfigAutoscalingConfigInitParameters) { *out = *in @@ -3975,6 +4035,11 @@ func (in *EndpointConfigParameters) DeepCopy() *EndpointConfigParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GceClusterConfigInitParameters) DeepCopyInto(out *GceClusterConfigInitParameters) { *out = *in + if in.ConfidentialInstanceConfig != nil { + in, out := &in.ConfidentialInstanceConfig, &out.ConfidentialInstanceConfig + *out = new(ConfidentialInstanceConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.InternalIPOnly != nil { in, out := &in.InternalIPOnly, &out.InternalIPOnly *out = new(bool) @@ -4138,6 +4203,11 @@ func (in *GceClusterConfigNodeGroupAffinityParameters) DeepCopy() *GceClusterCon // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GceClusterConfigObservation) DeepCopyInto(out *GceClusterConfigObservation) { *out = *in + if in.ConfidentialInstanceConfig != nil { + in, out := &in.ConfidentialInstanceConfig, &out.ConfidentialInstanceConfig + *out = new(ConfidentialInstanceConfigObservation) + (*in).DeepCopyInto(*out) + } if in.InternalIPOnly != nil { in, out := &in.InternalIPOnly, &out.InternalIPOnly *out = new(bool) @@ -4231,6 +4301,11 @@ func (in *GceClusterConfigObservation) DeepCopy() *GceClusterConfigObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GceClusterConfigParameters) DeepCopyInto(out *GceClusterConfigParameters) { *out = *in + if in.ConfidentialInstanceConfig != nil { + in, out := &in.ConfidentialInstanceConfig, &out.ConfidentialInstanceConfig + *out = new(ConfidentialInstanceConfigParameters) + (*in).DeepCopyInto(*out) + } if in.InternalIPOnly != nil { in, out := &in.InternalIPOnly, &out.InternalIPOnly *out = new(bool) @@ -6108,6 +6183,11 @@ func (in *InstanceFlexibilityPolicyInitParameters) DeepCopyInto(out *InstanceFle (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ProvisioningModelMix != nil { + in, out := &in.ProvisioningModelMix, &out.ProvisioningModelMix + *out = new(ProvisioningModelMixInitParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceFlexibilityPolicyInitParameters. @@ -6137,6 +6217,11 @@ func (in *InstanceFlexibilityPolicyObservation) DeepCopyInto(out *InstanceFlexib (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ProvisioningModelMix != nil { + in, out := &in.ProvisioningModelMix, &out.ProvisioningModelMix + *out = new(ProvisioningModelMixObservation) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceFlexibilityPolicyObservation. @@ -6159,6 +6244,11 @@ func (in *InstanceFlexibilityPolicyParameters) DeepCopyInto(out *InstanceFlexibi (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ProvisioningModelMix != nil { + in, out := &in.ProvisioningModelMix, &out.ProvisioningModelMix + *out = new(ProvisioningModelMixParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceFlexibilityPolicyParameters. @@ -9064,6 +9154,11 @@ func (in *MetastoreServiceObservation) DeepCopyInto(out *MetastoreServiceObserva *out = new(string) **out = **in } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } if in.EffectiveLabels != nil { in, out := &in.EffectiveLabels, &out.EffectiveLabels *out = make(map[string]*string, len(*in)) @@ -11898,6 +11993,81 @@ func (in *PrestoJobQueryListParameters) DeepCopy() *PrestoJobQueryListParameters return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningModelMixInitParameters) DeepCopyInto(out *ProvisioningModelMixInitParameters) { + *out = *in + if in.StandardCapacityBase != nil { + in, out := &in.StandardCapacityBase, &out.StandardCapacityBase + *out = new(float64) + **out = **in + } + if in.StandardCapacityPercentAboveBase != nil { + in, out := &in.StandardCapacityPercentAboveBase, &out.StandardCapacityPercentAboveBase + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningModelMixInitParameters. +func (in *ProvisioningModelMixInitParameters) DeepCopy() *ProvisioningModelMixInitParameters { + if in == nil { + return nil + } + out := new(ProvisioningModelMixInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningModelMixObservation) DeepCopyInto(out *ProvisioningModelMixObservation) { + *out = *in + if in.StandardCapacityBase != nil { + in, out := &in.StandardCapacityBase, &out.StandardCapacityBase + *out = new(float64) + **out = **in + } + if in.StandardCapacityPercentAboveBase != nil { + in, out := &in.StandardCapacityPercentAboveBase, &out.StandardCapacityPercentAboveBase + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningModelMixObservation. +func (in *ProvisioningModelMixObservation) DeepCopy() *ProvisioningModelMixObservation { + if in == nil { + return nil + } + out := new(ProvisioningModelMixObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningModelMixParameters) DeepCopyInto(out *ProvisioningModelMixParameters) { + *out = *in + if in.StandardCapacityBase != nil { + in, out := &in.StandardCapacityBase, &out.StandardCapacityBase + *out = new(float64) + **out = **in + } + if in.StandardCapacityPercentAboveBase != nil { + in, out := &in.StandardCapacityPercentAboveBase, &out.StandardCapacityPercentAboveBase + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningModelMixParameters. +func (in *ProvisioningModelMixParameters) DeepCopy() *ProvisioningModelMixParameters { + if in == nil { + return nil + } + out := new(ProvisioningModelMixParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PysparkConfigInitParameters) DeepCopyInto(out *PysparkConfigInitParameters) { *out = *in diff --git a/apis/dataproc/v1beta2/zz_metastoreservice_types.go b/apis/dataproc/v1beta2/zz_metastoreservice_types.go index 2def9ec37..1547a92e9 100755 --- a/apis/dataproc/v1beta2/zz_metastoreservice_types.go +++ b/apis/dataproc/v1beta2/zz_metastoreservice_types.go @@ -456,6 +456,9 @@ type MetastoreServiceObservation struct { // Possible values are: MYSQL, SPANNER. DatabaseType *string `json:"databaseType,omitempty" tf:"database_type,omitempty"` + // Indicates if the dataproc metastore should be protected against accidental deletions. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // +mapType=granular EffectiveLabels map[string]*string `json:"effectiveLabels,omitempty" tf:"effective_labels,omitempty"` diff --git a/apis/dataproc/v1beta2/zz_workflowtemplate_types.go b/apis/dataproc/v1beta2/zz_workflowtemplate_types.go index 63b878cb2..0539e64b5 100755 --- a/apis/dataproc/v1beta2/zz_workflowtemplate_types.go +++ b/apis/dataproc/v1beta2/zz_workflowtemplate_types.go @@ -111,7 +111,7 @@ type ConfigGceClusterConfigInitParameters struct { // If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. InternalIPOnly *bool `json:"internalIpOnly,omitempty" tf:"internal_ip_only,omitempty"` - // The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + // The Compute Engine metadata entries to add to all instances (see About VM metadata). // +mapType=granular Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` @@ -139,7 +139,7 @@ type ConfigGceClusterConfigInitParameters struct { // The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 * sub0 Subnetwork *string `json:"subnetwork,omitempty" tf:"subnetwork,omitempty"` - // The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + // The Compute Engine tags to add to all instances (see Manage tags for resources). // +listType=set Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` @@ -152,7 +152,7 @@ type ConfigGceClusterConfigObservation struct { // If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. InternalIPOnly *bool `json:"internalIpOnly,omitempty" tf:"internal_ip_only,omitempty"` - // The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + // The Compute Engine metadata entries to add to all instances (see About VM metadata). // +mapType=granular Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` @@ -180,7 +180,7 @@ type ConfigGceClusterConfigObservation struct { // The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 * sub0 Subnetwork *string `json:"subnetwork,omitempty" tf:"subnetwork,omitempty"` - // The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + // The Compute Engine tags to add to all instances (see Manage tags for resources). // +listType=set Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` @@ -194,7 +194,7 @@ type ConfigGceClusterConfigParameters struct { // +kubebuilder:validation:Optional InternalIPOnly *bool `json:"internalIpOnly,omitempty" tf:"internal_ip_only,omitempty"` - // The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + // The Compute Engine metadata entries to add to all instances (see About VM metadata). // +kubebuilder:validation:Optional // +mapType=granular Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` @@ -231,7 +231,7 @@ type ConfigGceClusterConfigParameters struct { // +kubebuilder:validation:Optional Subnetwork *string `json:"subnetwork,omitempty" tf:"subnetwork,omitempty"` - // The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + // The Compute Engine tags to add to all instances (see Manage tags for resources). // +kubebuilder:validation:Optional // +listType=set Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` @@ -243,42 +243,42 @@ type ConfigGceClusterConfigParameters struct { type ConfigLifecycleConfigInitParameters struct { - // The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3)). AutoDeleteTTL *string `json:"autoDeleteTtl,omitempty" tf:"auto_delete_ttl,omitempty"` - // The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The time when cluster will be auto-deleted (see JSON representation of JSON Mapping - Language Guide (proto 3)). AutoDeleteTime *string `json:"autoDeleteTime,omitempty" tf:"auto_delete_time,omitempty"` - // The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + // The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3). IdleDeleteTTL *string `json:"idleDeleteTtl,omitempty" tf:"idle_delete_ttl,omitempty"` } type ConfigLifecycleConfigObservation struct { - // The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3)). AutoDeleteTTL *string `json:"autoDeleteTtl,omitempty" tf:"auto_delete_ttl,omitempty"` - // The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The time when cluster will be auto-deleted (see JSON representation of JSON Mapping - Language Guide (proto 3)). AutoDeleteTime *string `json:"autoDeleteTime,omitempty" tf:"auto_delete_time,omitempty"` - // The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + // The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3). IdleDeleteTTL *string `json:"idleDeleteTtl,omitempty" tf:"idle_delete_ttl,omitempty"` - // Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of JSON Mapping - Language Guide (proto 3)). IdleStartTime *string `json:"idleStartTime,omitempty" tf:"idle_start_time,omitempty"` } type ConfigLifecycleConfigParameters struct { - // The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3)). // +kubebuilder:validation:Optional AutoDeleteTTL *string `json:"autoDeleteTtl,omitempty" tf:"auto_delete_ttl,omitempty"` - // The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The time when cluster will be auto-deleted (see JSON representation of JSON Mapping - Language Guide (proto 3)). // +kubebuilder:validation:Optional AutoDeleteTime *string `json:"autoDeleteTime,omitempty" tf:"auto_delete_time,omitempty"` - // The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + // The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3). // +kubebuilder:validation:Optional IdleDeleteTTL *string `json:"idleDeleteTtl,omitempty" tf:"idle_delete_ttl,omitempty"` } @@ -365,7 +365,7 @@ type ConfigMasterConfigInitParameters struct { // The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`. MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` - // Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform. MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` // The number of VM instances in the instance group. For master instance groups, must be set to 1. @@ -398,7 +398,7 @@ type ConfigMasterConfigObservation struct { // Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. ManagedGroupConfig []ManagedGroupConfigObservation `json:"managedGroupConfig,omitempty" tf:"managed_group_config,omitempty"` - // Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform. MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` // The number of VM instances in the instance group. For master instance groups, must be set to 1. @@ -426,7 +426,7 @@ type ConfigMasterConfigParameters struct { // +kubebuilder:validation:Optional MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` - // Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform. // +kubebuilder:validation:Optional MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` @@ -453,7 +453,7 @@ type ConfigSecondaryWorkerConfigInitParameters struct { // The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`. MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` - // Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform. MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` // The number of VM instances in the instance group. For master instance groups, must be set to 1. @@ -486,7 +486,7 @@ type ConfigSecondaryWorkerConfigObservation struct { // Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. ManagedGroupConfig []SecondaryWorkerConfigManagedGroupConfigObservation `json:"managedGroupConfig,omitempty" tf:"managed_group_config,omitempty"` - // Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform. MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` // The number of VM instances in the instance group. For master instance groups, must be set to 1. @@ -514,7 +514,7 @@ type ConfigSecondaryWorkerConfigParameters struct { // +kubebuilder:validation:Optional MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` - // Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform. // +kubebuilder:validation:Optional MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` @@ -670,7 +670,7 @@ type ConfigWorkerConfigInitParameters struct { // The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`. MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` - // Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform. MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` // The number of VM instances in the instance group. For master instance groups, must be set to 1. @@ -703,7 +703,7 @@ type ConfigWorkerConfigObservation struct { // Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. ManagedGroupConfig []WorkerConfigManagedGroupConfigObservation `json:"managedGroupConfig,omitempty" tf:"managed_group_config,omitempty"` - // Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform. MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` // The number of VM instances in the instance group. For master instance groups, must be set to 1. @@ -731,7 +731,7 @@ type ConfigWorkerConfigParameters struct { // +kubebuilder:validation:Optional MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` - // Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform. // +kubebuilder:validation:Optional MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` @@ -1035,7 +1035,7 @@ type InitializationActionsInitParameters struct { // Required. Cloud Storage URI of executable file. ExecutableFile *string `json:"executableFile,omitempty" tf:"executable_file,omitempty"` - // Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + // Amount of time executable has to complete. Default is 10 minutes (see JSON representation of JSON Mapping - Language Guide (proto 3)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` } @@ -1044,7 +1044,7 @@ type InitializationActionsObservation struct { // Required. Cloud Storage URI of executable file. ExecutableFile *string `json:"executableFile,omitempty" tf:"executable_file,omitempty"` - // Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + // Amount of time executable has to complete. Default is 10 minutes (see JSON representation of JSON Mapping - Language Guide (proto 3)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` } @@ -1054,7 +1054,7 @@ type InitializationActionsParameters struct { // +kubebuilder:validation:Optional ExecutableFile *string `json:"executableFile,omitempty" tf:"executable_file,omitempty"` - // Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + // Amount of time executable has to complete. Default is 10 minutes (see JSON representation of JSON Mapping - Language Guide (proto 3)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. // +kubebuilder:validation:Optional ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` } @@ -1252,7 +1252,7 @@ type ManagedClusterConfigInitParameters struct { // The config settings for software inside the cluster. SoftwareConfig *ConfigSoftwareConfigInitParameters `json:"softwareConfig,omitempty" tf:"software_config,omitempty"` - // A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets). StagingBucket *string `json:"stagingBucket,omitempty" tf:"staging_bucket,omitempty"` // A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. @@ -1294,7 +1294,7 @@ type ManagedClusterConfigObservation struct { // The config settings for software inside the cluster. SoftwareConfig *ConfigSoftwareConfigObservation `json:"softwareConfig,omitempty" tf:"software_config,omitempty"` - // A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets). StagingBucket *string `json:"stagingBucket,omitempty" tf:"staging_bucket,omitempty"` // A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. @@ -1346,7 +1346,7 @@ type ManagedClusterConfigParameters struct { // +kubebuilder:validation:Optional SoftwareConfig *ConfigSoftwareConfigParameters `json:"softwareConfig,omitempty" tf:"software_config,omitempty"` - // A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets). // +kubebuilder:validation:Optional StagingBucket *string `json:"stagingBucket,omitempty" tf:"staging_bucket,omitempty"` diff --git a/apis/datastore/v1beta1/zz_generated.conversion_hubs.go b/apis/datastore/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 083a481d1..000000000 --- a/apis/datastore/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *Index) Hub() {} diff --git a/apis/datastore/v1beta1/zz_generated.deepcopy.go b/apis/datastore/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index a5df7eae4..000000000 --- a/apis/datastore/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,303 +0,0 @@ -//go:build !ignore_autogenerated - -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by controller-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Index) DeepCopyInto(out *Index) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Index. -func (in *Index) DeepCopy() *Index { - if in == nil { - return nil - } - out := new(Index) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Index) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexInitParameters) DeepCopyInto(out *IndexInitParameters) { - *out = *in - if in.Ancestor != nil { - in, out := &in.Ancestor, &out.Ancestor - *out = new(string) - **out = **in - } - if in.Kind != nil { - in, out := &in.Kind, &out.Kind - *out = new(string) - **out = **in - } - if in.Project != nil { - in, out := &in.Project, &out.Project - *out = new(string) - **out = **in - } - if in.Properties != nil { - in, out := &in.Properties, &out.Properties - *out = make([]PropertiesInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexInitParameters. -func (in *IndexInitParameters) DeepCopy() *IndexInitParameters { - if in == nil { - return nil - } - out := new(IndexInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexList) DeepCopyInto(out *IndexList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Index, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexList. -func (in *IndexList) DeepCopy() *IndexList { - if in == nil { - return nil - } - out := new(IndexList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IndexList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexObservation) DeepCopyInto(out *IndexObservation) { - *out = *in - if in.Ancestor != nil { - in, out := &in.Ancestor, &out.Ancestor - *out = new(string) - **out = **in - } - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - if in.IndexID != nil { - in, out := &in.IndexID, &out.IndexID - *out = new(string) - **out = **in - } - if in.Kind != nil { - in, out := &in.Kind, &out.Kind - *out = new(string) - **out = **in - } - if in.Project != nil { - in, out := &in.Project, &out.Project - *out = new(string) - **out = **in - } - if in.Properties != nil { - in, out := &in.Properties, &out.Properties - *out = make([]PropertiesObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexObservation. -func (in *IndexObservation) DeepCopy() *IndexObservation { - if in == nil { - return nil - } - out := new(IndexObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexParameters) DeepCopyInto(out *IndexParameters) { - *out = *in - if in.Ancestor != nil { - in, out := &in.Ancestor, &out.Ancestor - *out = new(string) - **out = **in - } - if in.Kind != nil { - in, out := &in.Kind, &out.Kind - *out = new(string) - **out = **in - } - if in.Project != nil { - in, out := &in.Project, &out.Project - *out = new(string) - **out = **in - } - if in.Properties != nil { - in, out := &in.Properties, &out.Properties - *out = make([]PropertiesParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexParameters. -func (in *IndexParameters) DeepCopy() *IndexParameters { - if in == nil { - return nil - } - out := new(IndexParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexSpec) DeepCopyInto(out *IndexSpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) - in.InitProvider.DeepCopyInto(&out.InitProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexSpec. -func (in *IndexSpec) DeepCopy() *IndexSpec { - if in == nil { - return nil - } - out := new(IndexSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexStatus) DeepCopyInto(out *IndexStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - in.AtProvider.DeepCopyInto(&out.AtProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexStatus. -func (in *IndexStatus) DeepCopy() *IndexStatus { - if in == nil { - return nil - } - out := new(IndexStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PropertiesInitParameters) DeepCopyInto(out *PropertiesInitParameters) { - *out = *in - if in.Direction != nil { - in, out := &in.Direction, &out.Direction - *out = new(string) - **out = **in - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertiesInitParameters. -func (in *PropertiesInitParameters) DeepCopy() *PropertiesInitParameters { - if in == nil { - return nil - } - out := new(PropertiesInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PropertiesObservation) DeepCopyInto(out *PropertiesObservation) { - *out = *in - if in.Direction != nil { - in, out := &in.Direction, &out.Direction - *out = new(string) - **out = **in - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertiesObservation. -func (in *PropertiesObservation) DeepCopy() *PropertiesObservation { - if in == nil { - return nil - } - out := new(PropertiesObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PropertiesParameters) DeepCopyInto(out *PropertiesParameters) { - *out = *in - if in.Direction != nil { - in, out := &in.Direction, &out.Direction - *out = new(string) - **out = **in - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertiesParameters. -func (in *PropertiesParameters) DeepCopy() *PropertiesParameters { - if in == nil { - return nil - } - out := new(PropertiesParameters) - in.DeepCopyInto(out) - return out -} diff --git a/apis/datastore/v1beta1/zz_generated.managed.go b/apis/datastore/v1beta1/zz_generated.managed.go deleted file mode 100644 index a8d8461d1..000000000 --- a/apis/datastore/v1beta1/zz_generated.managed.go +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 -// Code generated by angryjet. DO NOT EDIT. - -package v1beta1 - -import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - -// GetCondition of this Index. -func (mg *Index) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this Index. -func (mg *Index) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetManagementPolicies of this Index. -func (mg *Index) GetManagementPolicies() xpv1.ManagementPolicies { - return mg.Spec.ManagementPolicies -} - -// GetProviderConfigReference of this Index. -func (mg *Index) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -// GetPublishConnectionDetailsTo of this Index. -func (mg *Index) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { - return mg.Spec.PublishConnectionDetailsTo -} - -// GetWriteConnectionSecretToReference of this Index. -func (mg *Index) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this Index. -func (mg *Index) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this Index. -func (mg *Index) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetManagementPolicies of this Index. -func (mg *Index) SetManagementPolicies(r xpv1.ManagementPolicies) { - mg.Spec.ManagementPolicies = r -} - -// SetProviderConfigReference of this Index. -func (mg *Index) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -// SetPublishConnectionDetailsTo of this Index. -func (mg *Index) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { - mg.Spec.PublishConnectionDetailsTo = r -} - -// SetWriteConnectionSecretToReference of this Index. -func (mg *Index) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} diff --git a/apis/datastore/v1beta1/zz_generated.managedlist.go b/apis/datastore/v1beta1/zz_generated.managedlist.go deleted file mode 100644 index 30838c27c..000000000 --- a/apis/datastore/v1beta1/zz_generated.managedlist.go +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 -// Code generated by angryjet. DO NOT EDIT. - -package v1beta1 - -import resource "github.com/crossplane/crossplane-runtime/pkg/resource" - -// GetItems of this IndexList. -func (l *IndexList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} diff --git a/apis/datastore/v1beta1/zz_groupversion_info.go b/apis/datastore/v1beta1/zz_groupversion_info.go deleted file mode 100755 index 238412cbc..000000000 --- a/apis/datastore/v1beta1/zz_groupversion_info.go +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -// +kubebuilder:object:generate=true -// +groupName=datastore.gcp.upbound.io -// +versionName=v1beta1 -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -// Package type metadata. -const ( - CRDGroup = "datastore.gcp.upbound.io" - CRDVersion = "v1beta1" -) - -var ( - // CRDGroupVersion is the API Group Version used to register the objects - CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/apis/datastore/v1beta1/zz_index_types.go b/apis/datastore/v1beta1/zz_index_types.go deleted file mode 100755 index 292874da2..000000000 --- a/apis/datastore/v1beta1/zz_index_types.go +++ /dev/null @@ -1,175 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - - v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -type IndexInitParameters struct { - - // Policy for including ancestors in the index. - // Default value is NONE. - // Possible values are: NONE, ALL_ANCESTORS. - Ancestor *string `json:"ancestor,omitempty" tf:"ancestor,omitempty"` - - // The entity kind which the index applies to. - Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` - - // The ID of the project in which the resource belongs. - // If it is not provided, the provider project is used. - Project *string `json:"project,omitempty" tf:"project,omitempty"` - - // An ordered list of properties to index on. - // Structure is documented below. - Properties []PropertiesInitParameters `json:"properties,omitempty" tf:"properties,omitempty"` -} - -type IndexObservation struct { - - // Policy for including ancestors in the index. - // Default value is NONE. - // Possible values are: NONE, ALL_ANCESTORS. - Ancestor *string `json:"ancestor,omitempty" tf:"ancestor,omitempty"` - - // an identifier for the resource with format projects/{{project}}/indexes/{{index_id}} - ID *string `json:"id,omitempty" tf:"id,omitempty"` - - // The index id. - IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` - - // The entity kind which the index applies to. - Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` - - // The ID of the project in which the resource belongs. - // If it is not provided, the provider project is used. - Project *string `json:"project,omitempty" tf:"project,omitempty"` - - // An ordered list of properties to index on. - // Structure is documented below. - Properties []PropertiesObservation `json:"properties,omitempty" tf:"properties,omitempty"` -} - -type IndexParameters struct { - - // Policy for including ancestors in the index. - // Default value is NONE. - // Possible values are: NONE, ALL_ANCESTORS. - // +kubebuilder:validation:Optional - Ancestor *string `json:"ancestor,omitempty" tf:"ancestor,omitempty"` - - // The entity kind which the index applies to. - // +kubebuilder:validation:Optional - Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` - - // The ID of the project in which the resource belongs. - // If it is not provided, the provider project is used. - // +kubebuilder:validation:Optional - Project *string `json:"project,omitempty" tf:"project,omitempty"` - - // An ordered list of properties to index on. - // Structure is documented below. - // +kubebuilder:validation:Optional - Properties []PropertiesParameters `json:"properties,omitempty" tf:"properties,omitempty"` -} - -type PropertiesInitParameters struct { - - // The direction the index should optimize for sorting. - // Possible values are: ASCENDING, DESCENDING. - Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` - - // The property name to index. - Name *string `json:"name,omitempty" tf:"name,omitempty"` -} - -type PropertiesObservation struct { - - // The direction the index should optimize for sorting. - // Possible values are: ASCENDING, DESCENDING. - Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` - - // The property name to index. - Name *string `json:"name,omitempty" tf:"name,omitempty"` -} - -type PropertiesParameters struct { - - // The direction the index should optimize for sorting. - // Possible values are: ASCENDING, DESCENDING. - // +kubebuilder:validation:Optional - Direction *string `json:"direction" tf:"direction,omitempty"` - - // The property name to index. - // +kubebuilder:validation:Optional - Name *string `json:"name" tf:"name,omitempty"` -} - -// IndexSpec defines the desired state of Index -type IndexSpec struct { - v1.ResourceSpec `json:",inline"` - ForProvider IndexParameters `json:"forProvider"` - // THIS IS A BETA FIELD. It will be honored - // unless the Management Policies feature flag is disabled. - // InitProvider holds the same fields as ForProvider, with the exception - // of Identifier and other resource reference fields. The fields that are - // in InitProvider are merged into ForProvider when the resource is created. - // The same fields are also added to the terraform ignore_changes hook, to - // avoid updating them after creation. This is useful for fields that are - // required on creation, but we do not desire to update them after creation, - // for example because of an external controller is managing them, like an - // autoscaler. - InitProvider IndexInitParameters `json:"initProvider,omitempty"` -} - -// IndexStatus defines the observed state of Index. -type IndexStatus struct { - v1.ResourceStatus `json:",inline"` - AtProvider IndexObservation `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:storageversion - -// Index is the Schema for the Indexs API. Describes a composite index for Firestore in Datastore Mode. -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} -type Index struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kind) || (has(self.initProvider) && has(self.initProvider.kind))",message="spec.forProvider.kind is a required parameter" - Spec IndexSpec `json:"spec"` - Status IndexStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// IndexList contains a list of Indexs -type IndexList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Index `json:"items"` -} - -// Repository type metadata. -var ( - Index_Kind = "Index" - Index_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Index_Kind}.String() - Index_KindAPIVersion = Index_Kind + "." + CRDGroupVersion.String() - Index_GroupVersionKind = CRDGroupVersion.WithKind(Index_Kind) -) - -func init() { - SchemeBuilder.Register(&Index{}, &IndexList{}) -} diff --git a/apis/dialogflowcx/v1beta2/zz_agent_types.go b/apis/dialogflowcx/v1beta2/zz_agent_types.go index 95babb204..3ef89622a 100755 --- a/apis/dialogflowcx/v1beta2/zz_agent_types.go +++ b/apis/dialogflowcx/v1beta2/zz_agent_types.go @@ -20,6 +20,12 @@ type AdvancedSettingsInitParameters struct { // Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: DtmfSettings *DtmfSettingsInitParameters `json:"dtmfSettings,omitempty" tf:"dtmf_settings,omitempty"` + + // Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: + LoggingSettings *LoggingSettingsInitParameters `json:"loggingSettings,omitempty" tf:"logging_settings,omitempty"` + + // Settings for speech to text detection. Exposed at the following levels: + SpeechSettings *SpeechSettingsInitParameters `json:"speechSettings,omitempty" tf:"speech_settings,omitempty"` } type AdvancedSettingsObservation struct { @@ -29,6 +35,12 @@ type AdvancedSettingsObservation struct { // Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: DtmfSettings *DtmfSettingsObservation `json:"dtmfSettings,omitempty" tf:"dtmf_settings,omitempty"` + + // Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: + LoggingSettings *LoggingSettingsObservation `json:"loggingSettings,omitempty" tf:"logging_settings,omitempty"` + + // Settings for speech to text detection. Exposed at the following levels: + SpeechSettings *SpeechSettingsObservation `json:"speechSettings,omitempty" tf:"speech_settings,omitempty"` } type AdvancedSettingsParameters struct { @@ -40,6 +52,14 @@ type AdvancedSettingsParameters struct { // Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: // +kubebuilder:validation:Optional DtmfSettings *DtmfSettingsParameters `json:"dtmfSettings,omitempty" tf:"dtmf_settings,omitempty"` + + // Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: + // +kubebuilder:validation:Optional + LoggingSettings *LoggingSettingsParameters `json:"loggingSettings,omitempty" tf:"logging_settings,omitempty"` + + // Settings for speech to text detection. Exposed at the following levels: + // +kubebuilder:validation:Optional + SpeechSettings *SpeechSettingsParameters `json:"speechSettings,omitempty" tf:"speech_settings,omitempty"` } type AgentInitParameters struct { @@ -384,6 +404,103 @@ type GithubSettingsParameters struct { TrackingBranch *string `json:"trackingBranch,omitempty" tf:"tracking_branch,omitempty"` } +type LoggingSettingsInitParameters struct { + + // Enables consent-based end-user input redaction, if true, a pre-defined session parameter $session.params.conversation-redaction will be used to determine if the utterance should be redacted. + EnableConsentBasedRedaction *bool `json:"enableConsentBasedRedaction,omitempty" tf:"enable_consent_based_redaction,omitempty"` + + // Enables DF Interaction logging. + EnableInteractionLogging *bool `json:"enableInteractionLogging,omitempty" tf:"enable_interaction_logging,omitempty"` + + // Enables Google Cloud Logging. + EnableStackdriverLogging *bool `json:"enableStackdriverLogging,omitempty" tf:"enable_stackdriver_logging,omitempty"` +} + +type LoggingSettingsObservation struct { + + // Enables consent-based end-user input redaction, if true, a pre-defined session parameter $session.params.conversation-redaction will be used to determine if the utterance should be redacted. + EnableConsentBasedRedaction *bool `json:"enableConsentBasedRedaction,omitempty" tf:"enable_consent_based_redaction,omitempty"` + + // Enables DF Interaction logging. + EnableInteractionLogging *bool `json:"enableInteractionLogging,omitempty" tf:"enable_interaction_logging,omitempty"` + + // Enables Google Cloud Logging. + EnableStackdriverLogging *bool `json:"enableStackdriverLogging,omitempty" tf:"enable_stackdriver_logging,omitempty"` +} + +type LoggingSettingsParameters struct { + + // Enables consent-based end-user input redaction, if true, a pre-defined session parameter $session.params.conversation-redaction will be used to determine if the utterance should be redacted. + // +kubebuilder:validation:Optional + EnableConsentBasedRedaction *bool `json:"enableConsentBasedRedaction,omitempty" tf:"enable_consent_based_redaction,omitempty"` + + // Enables DF Interaction logging. + // +kubebuilder:validation:Optional + EnableInteractionLogging *bool `json:"enableInteractionLogging,omitempty" tf:"enable_interaction_logging,omitempty"` + + // Enables Google Cloud Logging. + // +kubebuilder:validation:Optional + EnableStackdriverLogging *bool `json:"enableStackdriverLogging,omitempty" tf:"enable_stackdriver_logging,omitempty"` +} + +type SpeechSettingsInitParameters struct { + + // Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. + EndpointerSensitivity *float64 `json:"endpointerSensitivity,omitempty" tf:"endpointer_sensitivity,omitempty"` + + // Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + // An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + // +mapType=granular + Models map[string]*string `json:"models,omitempty" tf:"models,omitempty"` + + // Timeout before detecting no speech. + // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + NoSpeechTimeout *string `json:"noSpeechTimeout,omitempty" tf:"no_speech_timeout,omitempty"` + + // Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. + UseTimeoutBasedEndpointing *bool `json:"useTimeoutBasedEndpointing,omitempty" tf:"use_timeout_based_endpointing,omitempty"` +} + +type SpeechSettingsObservation struct { + + // Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. + EndpointerSensitivity *float64 `json:"endpointerSensitivity,omitempty" tf:"endpointer_sensitivity,omitempty"` + + // Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + // An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + // +mapType=granular + Models map[string]*string `json:"models,omitempty" tf:"models,omitempty"` + + // Timeout before detecting no speech. + // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + NoSpeechTimeout *string `json:"noSpeechTimeout,omitempty" tf:"no_speech_timeout,omitempty"` + + // Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. + UseTimeoutBasedEndpointing *bool `json:"useTimeoutBasedEndpointing,omitempty" tf:"use_timeout_based_endpointing,omitempty"` +} + +type SpeechSettingsParameters struct { + + // Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. + // +kubebuilder:validation:Optional + EndpointerSensitivity *float64 `json:"endpointerSensitivity,omitempty" tf:"endpointer_sensitivity,omitempty"` + + // Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + // An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + // +kubebuilder:validation:Optional + // +mapType=granular + Models map[string]*string `json:"models,omitempty" tf:"models,omitempty"` + + // Timeout before detecting no speech. + // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + // +kubebuilder:validation:Optional + NoSpeechTimeout *string `json:"noSpeechTimeout,omitempty" tf:"no_speech_timeout,omitempty"` + + // Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. + // +kubebuilder:validation:Optional + UseTimeoutBasedEndpointing *bool `json:"useTimeoutBasedEndpointing,omitempty" tf:"use_timeout_based_endpointing,omitempty"` +} + type SpeechToTextSettingsInitParameters struct { // Whether to use speech adaptation for speech recognition. diff --git a/apis/dialogflowcx/v1beta2/zz_flow_types.go b/apis/dialogflowcx/v1beta2/zz_flow_types.go index dd520915b..b2ebbbd29 100755 --- a/apis/dialogflowcx/v1beta2/zz_flow_types.go +++ b/apis/dialogflowcx/v1beta2/zz_flow_types.go @@ -74,6 +74,103 @@ type AdvancedSettingsDtmfSettingsParameters struct { MaxDigits *float64 `json:"maxDigits,omitempty" tf:"max_digits,omitempty"` } +type AdvancedSettingsLoggingSettingsInitParameters struct { + + // Enables consent-based end-user input redaction, if true, a pre-defined session parameter $session.params.conversation-redaction will be used to determine if the utterance should be redacted. + EnableConsentBasedRedaction *bool `json:"enableConsentBasedRedaction,omitempty" tf:"enable_consent_based_redaction,omitempty"` + + // Enables DF Interaction logging. + EnableInteractionLogging *bool `json:"enableInteractionLogging,omitempty" tf:"enable_interaction_logging,omitempty"` + + // Enables Google Cloud Logging. + EnableStackdriverLogging *bool `json:"enableStackdriverLogging,omitempty" tf:"enable_stackdriver_logging,omitempty"` +} + +type AdvancedSettingsLoggingSettingsObservation struct { + + // Enables consent-based end-user input redaction, if true, a pre-defined session parameter $session.params.conversation-redaction will be used to determine if the utterance should be redacted. + EnableConsentBasedRedaction *bool `json:"enableConsentBasedRedaction,omitempty" tf:"enable_consent_based_redaction,omitempty"` + + // Enables DF Interaction logging. + EnableInteractionLogging *bool `json:"enableInteractionLogging,omitempty" tf:"enable_interaction_logging,omitempty"` + + // Enables Google Cloud Logging. + EnableStackdriverLogging *bool `json:"enableStackdriverLogging,omitempty" tf:"enable_stackdriver_logging,omitempty"` +} + +type AdvancedSettingsLoggingSettingsParameters struct { + + // Enables consent-based end-user input redaction, if true, a pre-defined session parameter $session.params.conversation-redaction will be used to determine if the utterance should be redacted. + // +kubebuilder:validation:Optional + EnableConsentBasedRedaction *bool `json:"enableConsentBasedRedaction,omitempty" tf:"enable_consent_based_redaction,omitempty"` + + // Enables DF Interaction logging. + // +kubebuilder:validation:Optional + EnableInteractionLogging *bool `json:"enableInteractionLogging,omitempty" tf:"enable_interaction_logging,omitempty"` + + // Enables Google Cloud Logging. + // +kubebuilder:validation:Optional + EnableStackdriverLogging *bool `json:"enableStackdriverLogging,omitempty" tf:"enable_stackdriver_logging,omitempty"` +} + +type AdvancedSettingsSpeechSettingsInitParameters struct { + + // Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. + EndpointerSensitivity *float64 `json:"endpointerSensitivity,omitempty" tf:"endpointer_sensitivity,omitempty"` + + // Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + // An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + // +mapType=granular + Models map[string]*string `json:"models,omitempty" tf:"models,omitempty"` + + // Timeout before detecting no speech. + // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + NoSpeechTimeout *string `json:"noSpeechTimeout,omitempty" tf:"no_speech_timeout,omitempty"` + + // Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. + UseTimeoutBasedEndpointing *bool `json:"useTimeoutBasedEndpointing,omitempty" tf:"use_timeout_based_endpointing,omitempty"` +} + +type AdvancedSettingsSpeechSettingsObservation struct { + + // Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. + EndpointerSensitivity *float64 `json:"endpointerSensitivity,omitempty" tf:"endpointer_sensitivity,omitempty"` + + // Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + // An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + // +mapType=granular + Models map[string]*string `json:"models,omitempty" tf:"models,omitempty"` + + // Timeout before detecting no speech. + // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + NoSpeechTimeout *string `json:"noSpeechTimeout,omitempty" tf:"no_speech_timeout,omitempty"` + + // Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. + UseTimeoutBasedEndpointing *bool `json:"useTimeoutBasedEndpointing,omitempty" tf:"use_timeout_based_endpointing,omitempty"` +} + +type AdvancedSettingsSpeechSettingsParameters struct { + + // Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. + // +kubebuilder:validation:Optional + EndpointerSensitivity *float64 `json:"endpointerSensitivity,omitempty" tf:"endpointer_sensitivity,omitempty"` + + // Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + // An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + // +kubebuilder:validation:Optional + // +mapType=granular + Models map[string]*string `json:"models,omitempty" tf:"models,omitempty"` + + // Timeout before detecting no speech. + // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + // +kubebuilder:validation:Optional + NoSpeechTimeout *string `json:"noSpeechTimeout,omitempty" tf:"no_speech_timeout,omitempty"` + + // Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. + // +kubebuilder:validation:Optional + UseTimeoutBasedEndpointing *bool `json:"useTimeoutBasedEndpointing,omitempty" tf:"use_timeout_based_endpointing,omitempty"` +} + type ConditionalCasesInitParameters struct { // A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. @@ -184,6 +281,12 @@ type FlowAdvancedSettingsInitParameters struct { // Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: DtmfSettings *AdvancedSettingsDtmfSettingsInitParameters `json:"dtmfSettings,omitempty" tf:"dtmf_settings,omitempty"` + + // Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: + LoggingSettings *AdvancedSettingsLoggingSettingsInitParameters `json:"loggingSettings,omitempty" tf:"logging_settings,omitempty"` + + // Settings for speech to text detection. Exposed at the following levels: + SpeechSettings *AdvancedSettingsSpeechSettingsInitParameters `json:"speechSettings,omitempty" tf:"speech_settings,omitempty"` } type FlowAdvancedSettingsObservation struct { @@ -193,6 +296,12 @@ type FlowAdvancedSettingsObservation struct { // Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: DtmfSettings *AdvancedSettingsDtmfSettingsObservation `json:"dtmfSettings,omitempty" tf:"dtmf_settings,omitempty"` + + // Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: + LoggingSettings *AdvancedSettingsLoggingSettingsObservation `json:"loggingSettings,omitempty" tf:"logging_settings,omitempty"` + + // Settings for speech to text detection. Exposed at the following levels: + SpeechSettings *AdvancedSettingsSpeechSettingsObservation `json:"speechSettings,omitempty" tf:"speech_settings,omitempty"` } type FlowAdvancedSettingsParameters struct { @@ -204,6 +313,14 @@ type FlowAdvancedSettingsParameters struct { // Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: // +kubebuilder:validation:Optional DtmfSettings *AdvancedSettingsDtmfSettingsParameters `json:"dtmfSettings,omitempty" tf:"dtmf_settings,omitempty"` + + // Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: + // +kubebuilder:validation:Optional + LoggingSettings *AdvancedSettingsLoggingSettingsParameters `json:"loggingSettings,omitempty" tf:"logging_settings,omitempty"` + + // Settings for speech to text detection. Exposed at the following levels: + // +kubebuilder:validation:Optional + SpeechSettings *AdvancedSettingsSpeechSettingsParameters `json:"speechSettings,omitempty" tf:"speech_settings,omitempty"` } type FlowInitParameters struct { diff --git a/apis/dialogflowcx/v1beta2/zz_generated.deepcopy.go b/apis/dialogflowcx/v1beta2/zz_generated.deepcopy.go index d712aabe5..b3064730a 100644 --- a/apis/dialogflowcx/v1beta2/zz_generated.deepcopy.go +++ b/apis/dialogflowcx/v1beta2/zz_generated.deepcopy.go @@ -176,6 +176,16 @@ func (in *AdvancedSettingsInitParameters) DeepCopyInto(out *AdvancedSettingsInit *out = new(DtmfSettingsInitParameters) (*in).DeepCopyInto(*out) } + if in.LoggingSettings != nil { + in, out := &in.LoggingSettings, &out.LoggingSettings + *out = new(LoggingSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpeechSettings != nil { + in, out := &in.SpeechSettings, &out.SpeechSettings + *out = new(SpeechSettingsInitParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSettingsInitParameters. @@ -188,6 +198,96 @@ func (in *AdvancedSettingsInitParameters) DeepCopy() *AdvancedSettingsInitParame return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSettingsLoggingSettingsInitParameters) DeepCopyInto(out *AdvancedSettingsLoggingSettingsInitParameters) { + *out = *in + if in.EnableConsentBasedRedaction != nil { + in, out := &in.EnableConsentBasedRedaction, &out.EnableConsentBasedRedaction + *out = new(bool) + **out = **in + } + if in.EnableInteractionLogging != nil { + in, out := &in.EnableInteractionLogging, &out.EnableInteractionLogging + *out = new(bool) + **out = **in + } + if in.EnableStackdriverLogging != nil { + in, out := &in.EnableStackdriverLogging, &out.EnableStackdriverLogging + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSettingsLoggingSettingsInitParameters. +func (in *AdvancedSettingsLoggingSettingsInitParameters) DeepCopy() *AdvancedSettingsLoggingSettingsInitParameters { + if in == nil { + return nil + } + out := new(AdvancedSettingsLoggingSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSettingsLoggingSettingsObservation) DeepCopyInto(out *AdvancedSettingsLoggingSettingsObservation) { + *out = *in + if in.EnableConsentBasedRedaction != nil { + in, out := &in.EnableConsentBasedRedaction, &out.EnableConsentBasedRedaction + *out = new(bool) + **out = **in + } + if in.EnableInteractionLogging != nil { + in, out := &in.EnableInteractionLogging, &out.EnableInteractionLogging + *out = new(bool) + **out = **in + } + if in.EnableStackdriverLogging != nil { + in, out := &in.EnableStackdriverLogging, &out.EnableStackdriverLogging + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSettingsLoggingSettingsObservation. +func (in *AdvancedSettingsLoggingSettingsObservation) DeepCopy() *AdvancedSettingsLoggingSettingsObservation { + if in == nil { + return nil + } + out := new(AdvancedSettingsLoggingSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSettingsLoggingSettingsParameters) DeepCopyInto(out *AdvancedSettingsLoggingSettingsParameters) { + *out = *in + if in.EnableConsentBasedRedaction != nil { + in, out := &in.EnableConsentBasedRedaction, &out.EnableConsentBasedRedaction + *out = new(bool) + **out = **in + } + if in.EnableInteractionLogging != nil { + in, out := &in.EnableInteractionLogging, &out.EnableInteractionLogging + *out = new(bool) + **out = **in + } + if in.EnableStackdriverLogging != nil { + in, out := &in.EnableStackdriverLogging, &out.EnableStackdriverLogging + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSettingsLoggingSettingsParameters. +func (in *AdvancedSettingsLoggingSettingsParameters) DeepCopy() *AdvancedSettingsLoggingSettingsParameters { + if in == nil { + return nil + } + out := new(AdvancedSettingsLoggingSettingsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdvancedSettingsObservation) DeepCopyInto(out *AdvancedSettingsObservation) { *out = *in @@ -201,6 +301,16 @@ func (in *AdvancedSettingsObservation) DeepCopyInto(out *AdvancedSettingsObserva *out = new(DtmfSettingsObservation) (*in).DeepCopyInto(*out) } + if in.LoggingSettings != nil { + in, out := &in.LoggingSettings, &out.LoggingSettings + *out = new(LoggingSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SpeechSettings != nil { + in, out := &in.SpeechSettings, &out.SpeechSettings + *out = new(SpeechSettingsObservation) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSettingsObservation. @@ -226,6 +336,16 @@ func (in *AdvancedSettingsParameters) DeepCopyInto(out *AdvancedSettingsParamete *out = new(DtmfSettingsParameters) (*in).DeepCopyInto(*out) } + if in.LoggingSettings != nil { + in, out := &in.LoggingSettings, &out.LoggingSettings + *out = new(LoggingSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SpeechSettings != nil { + in, out := &in.SpeechSettings, &out.SpeechSettings + *out = new(SpeechSettingsParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSettingsParameters. @@ -238,6 +358,144 @@ func (in *AdvancedSettingsParameters) DeepCopy() *AdvancedSettingsParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSettingsSpeechSettingsInitParameters) DeepCopyInto(out *AdvancedSettingsSpeechSettingsInitParameters) { + *out = *in + if in.EndpointerSensitivity != nil { + in, out := &in.EndpointerSensitivity, &out.EndpointerSensitivity + *out = new(float64) + **out = **in + } + if in.Models != nil { + in, out := &in.Models, &out.Models + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NoSpeechTimeout != nil { + in, out := &in.NoSpeechTimeout, &out.NoSpeechTimeout + *out = new(string) + **out = **in + } + if in.UseTimeoutBasedEndpointing != nil { + in, out := &in.UseTimeoutBasedEndpointing, &out.UseTimeoutBasedEndpointing + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSettingsSpeechSettingsInitParameters. +func (in *AdvancedSettingsSpeechSettingsInitParameters) DeepCopy() *AdvancedSettingsSpeechSettingsInitParameters { + if in == nil { + return nil + } + out := new(AdvancedSettingsSpeechSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSettingsSpeechSettingsObservation) DeepCopyInto(out *AdvancedSettingsSpeechSettingsObservation) { + *out = *in + if in.EndpointerSensitivity != nil { + in, out := &in.EndpointerSensitivity, &out.EndpointerSensitivity + *out = new(float64) + **out = **in + } + if in.Models != nil { + in, out := &in.Models, &out.Models + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NoSpeechTimeout != nil { + in, out := &in.NoSpeechTimeout, &out.NoSpeechTimeout + *out = new(string) + **out = **in + } + if in.UseTimeoutBasedEndpointing != nil { + in, out := &in.UseTimeoutBasedEndpointing, &out.UseTimeoutBasedEndpointing + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSettingsSpeechSettingsObservation. +func (in *AdvancedSettingsSpeechSettingsObservation) DeepCopy() *AdvancedSettingsSpeechSettingsObservation { + if in == nil { + return nil + } + out := new(AdvancedSettingsSpeechSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSettingsSpeechSettingsParameters) DeepCopyInto(out *AdvancedSettingsSpeechSettingsParameters) { + *out = *in + if in.EndpointerSensitivity != nil { + in, out := &in.EndpointerSensitivity, &out.EndpointerSensitivity + *out = new(float64) + **out = **in + } + if in.Models != nil { + in, out := &in.Models, &out.Models + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NoSpeechTimeout != nil { + in, out := &in.NoSpeechTimeout, &out.NoSpeechTimeout + *out = new(string) + **out = **in + } + if in.UseTimeoutBasedEndpointing != nil { + in, out := &in.UseTimeoutBasedEndpointing, &out.UseTimeoutBasedEndpointing + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSettingsSpeechSettingsParameters. +func (in *AdvancedSettingsSpeechSettingsParameters) DeepCopy() *AdvancedSettingsSpeechSettingsParameters { + if in == nil { + return nil + } + out := new(AdvancedSettingsSpeechSettingsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Agent) DeepCopyInto(out *Agent) { *out = *in @@ -2450,6 +2708,16 @@ func (in *FlowAdvancedSettingsInitParameters) DeepCopyInto(out *FlowAdvancedSett *out = new(AdvancedSettingsDtmfSettingsInitParameters) (*in).DeepCopyInto(*out) } + if in.LoggingSettings != nil { + in, out := &in.LoggingSettings, &out.LoggingSettings + *out = new(AdvancedSettingsLoggingSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpeechSettings != nil { + in, out := &in.SpeechSettings, &out.SpeechSettings + *out = new(AdvancedSettingsSpeechSettingsInitParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowAdvancedSettingsInitParameters. @@ -2475,6 +2743,16 @@ func (in *FlowAdvancedSettingsObservation) DeepCopyInto(out *FlowAdvancedSetting *out = new(AdvancedSettingsDtmfSettingsObservation) (*in).DeepCopyInto(*out) } + if in.LoggingSettings != nil { + in, out := &in.LoggingSettings, &out.LoggingSettings + *out = new(AdvancedSettingsLoggingSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SpeechSettings != nil { + in, out := &in.SpeechSettings, &out.SpeechSettings + *out = new(AdvancedSettingsSpeechSettingsObservation) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowAdvancedSettingsObservation. @@ -2500,6 +2778,16 @@ func (in *FlowAdvancedSettingsParameters) DeepCopyInto(out *FlowAdvancedSettings *out = new(AdvancedSettingsDtmfSettingsParameters) (*in).DeepCopyInto(*out) } + if in.LoggingSettings != nil { + in, out := &in.LoggingSettings, &out.LoggingSettings + *out = new(AdvancedSettingsLoggingSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SpeechSettings != nil { + in, out := &in.SpeechSettings, &out.SpeechSettings + *out = new(AdvancedSettingsSpeechSettingsParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowAdvancedSettingsParameters. @@ -4155,6 +4443,96 @@ func (in *LiveAgentHandoffParameters) DeepCopy() *LiveAgentHandoffParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingSettingsInitParameters) DeepCopyInto(out *LoggingSettingsInitParameters) { + *out = *in + if in.EnableConsentBasedRedaction != nil { + in, out := &in.EnableConsentBasedRedaction, &out.EnableConsentBasedRedaction + *out = new(bool) + **out = **in + } + if in.EnableInteractionLogging != nil { + in, out := &in.EnableInteractionLogging, &out.EnableInteractionLogging + *out = new(bool) + **out = **in + } + if in.EnableStackdriverLogging != nil { + in, out := &in.EnableStackdriverLogging, &out.EnableStackdriverLogging + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingSettingsInitParameters. +func (in *LoggingSettingsInitParameters) DeepCopy() *LoggingSettingsInitParameters { + if in == nil { + return nil + } + out := new(LoggingSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingSettingsObservation) DeepCopyInto(out *LoggingSettingsObservation) { + *out = *in + if in.EnableConsentBasedRedaction != nil { + in, out := &in.EnableConsentBasedRedaction, &out.EnableConsentBasedRedaction + *out = new(bool) + **out = **in + } + if in.EnableInteractionLogging != nil { + in, out := &in.EnableInteractionLogging, &out.EnableInteractionLogging + *out = new(bool) + **out = **in + } + if in.EnableStackdriverLogging != nil { + in, out := &in.EnableStackdriverLogging, &out.EnableStackdriverLogging + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingSettingsObservation. +func (in *LoggingSettingsObservation) DeepCopy() *LoggingSettingsObservation { + if in == nil { + return nil + } + out := new(LoggingSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingSettingsParameters) DeepCopyInto(out *LoggingSettingsParameters) { + *out = *in + if in.EnableConsentBasedRedaction != nil { + in, out := &in.EnableConsentBasedRedaction, &out.EnableConsentBasedRedaction + *out = new(bool) + **out = **in + } + if in.EnableInteractionLogging != nil { + in, out := &in.EnableInteractionLogging, &out.EnableInteractionLogging + *out = new(bool) + **out = **in + } + if in.EnableStackdriverLogging != nil { + in, out := &in.EnableStackdriverLogging, &out.EnableStackdriverLogging + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingSettingsParameters. +func (in *LoggingSettingsParameters) DeepCopy() *LoggingSettingsParameters { + if in == nil { + return nil + } + out := new(LoggingSettingsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MessagesConversationSuccessInitParameters) DeepCopyInto(out *MessagesConversationSuccessInitParameters) { *out = *in @@ -7492,6 +7870,144 @@ func (in *SetParameterActionsParameters) DeepCopy() *SetParameterActionsParamete return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpeechSettingsInitParameters) DeepCopyInto(out *SpeechSettingsInitParameters) { + *out = *in + if in.EndpointerSensitivity != nil { + in, out := &in.EndpointerSensitivity, &out.EndpointerSensitivity + *out = new(float64) + **out = **in + } + if in.Models != nil { + in, out := &in.Models, &out.Models + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NoSpeechTimeout != nil { + in, out := &in.NoSpeechTimeout, &out.NoSpeechTimeout + *out = new(string) + **out = **in + } + if in.UseTimeoutBasedEndpointing != nil { + in, out := &in.UseTimeoutBasedEndpointing, &out.UseTimeoutBasedEndpointing + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpeechSettingsInitParameters. +func (in *SpeechSettingsInitParameters) DeepCopy() *SpeechSettingsInitParameters { + if in == nil { + return nil + } + out := new(SpeechSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpeechSettingsObservation) DeepCopyInto(out *SpeechSettingsObservation) { + *out = *in + if in.EndpointerSensitivity != nil { + in, out := &in.EndpointerSensitivity, &out.EndpointerSensitivity + *out = new(float64) + **out = **in + } + if in.Models != nil { + in, out := &in.Models, &out.Models + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NoSpeechTimeout != nil { + in, out := &in.NoSpeechTimeout, &out.NoSpeechTimeout + *out = new(string) + **out = **in + } + if in.UseTimeoutBasedEndpointing != nil { + in, out := &in.UseTimeoutBasedEndpointing, &out.UseTimeoutBasedEndpointing + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpeechSettingsObservation. +func (in *SpeechSettingsObservation) DeepCopy() *SpeechSettingsObservation { + if in == nil { + return nil + } + out := new(SpeechSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpeechSettingsParameters) DeepCopyInto(out *SpeechSettingsParameters) { + *out = *in + if in.EndpointerSensitivity != nil { + in, out := &in.EndpointerSensitivity, &out.EndpointerSensitivity + *out = new(float64) + **out = **in + } + if in.Models != nil { + in, out := &in.Models, &out.Models + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NoSpeechTimeout != nil { + in, out := &in.NoSpeechTimeout, &out.NoSpeechTimeout + *out = new(string) + **out = **in + } + if in.UseTimeoutBasedEndpointing != nil { + in, out := &in.UseTimeoutBasedEndpointing, &out.UseTimeoutBasedEndpointing + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpeechSettingsParameters. +func (in *SpeechSettingsParameters) DeepCopy() *SpeechSettingsParameters { + if in == nil { + return nil + } + out := new(SpeechSettingsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SpeechToTextSettingsInitParameters) DeepCopyInto(out *SpeechToTextSettingsInitParameters) { *out = *in diff --git a/apis/dns/v1beta2/zz_generated.deepcopy.go b/apis/dns/v1beta2/zz_generated.deepcopy.go index d17271491..a7ecdec00 100644 --- a/apis/dns/v1beta2/zz_generated.deepcopy.go +++ b/apis/dns/v1beta2/zz_generated.deepcopy.go @@ -157,6 +157,17 @@ func (in *AlternativeNameServerConfigTargetNameServersParameters) DeepCopy() *Al // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupGeoHealthCheckedTargetsInitParameters) DeepCopyInto(out *BackupGeoHealthCheckedTargetsInitParameters) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]HealthCheckedTargetsInternalLoadBalancersInitParameters, len(*in)) @@ -179,6 +190,17 @@ func (in *BackupGeoHealthCheckedTargetsInitParameters) DeepCopy() *BackupGeoHeal // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupGeoHealthCheckedTargetsObservation) DeepCopyInto(out *BackupGeoHealthCheckedTargetsObservation) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]HealthCheckedTargetsInternalLoadBalancersObservation, len(*in)) @@ -201,6 +223,17 @@ func (in *BackupGeoHealthCheckedTargetsObservation) DeepCopy() *BackupGeoHealthC // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupGeoHealthCheckedTargetsParameters) DeepCopyInto(out *BackupGeoHealthCheckedTargetsParameters) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]HealthCheckedTargetsInternalLoadBalancersParameters, len(*in)) @@ -951,6 +984,17 @@ func (in *GkeClustersParameters) DeepCopy() *GkeClustersParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HealthCheckedTargetsInitParameters) DeepCopyInto(out *HealthCheckedTargetsInitParameters) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]InternalLoadBalancersInitParameters, len(*in)) @@ -1123,6 +1167,17 @@ func (in *HealthCheckedTargetsInternalLoadBalancersParameters) DeepCopy() *Healt // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HealthCheckedTargetsObservation) DeepCopyInto(out *HealthCheckedTargetsObservation) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]InternalLoadBalancersObservation, len(*in)) @@ -1145,6 +1200,17 @@ func (in *HealthCheckedTargetsObservation) DeepCopy() *HealthCheckedTargetsObser // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HealthCheckedTargetsParameters) DeepCopyInto(out *HealthCheckedTargetsParameters) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]InternalLoadBalancersParameters, len(*in)) @@ -2507,6 +2573,17 @@ func (in *PrimaryBackupParameters) DeepCopy() *PrimaryBackupParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PrimaryInitParameters) DeepCopyInto(out *PrimaryInitParameters) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]PrimaryInternalLoadBalancersInitParameters, len(*in)) @@ -2759,6 +2836,17 @@ func (in *PrimaryInternalLoadBalancersParameters) DeepCopy() *PrimaryInternalLoa // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PrimaryObservation) DeepCopyInto(out *PrimaryObservation) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]PrimaryInternalLoadBalancersObservation, len(*in)) @@ -2781,6 +2869,17 @@ func (in *PrimaryObservation) DeepCopy() *PrimaryObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PrimaryParameters) DeepCopyInto(out *PrimaryParameters) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]PrimaryInternalLoadBalancersParameters, len(*in)) @@ -3189,6 +3288,21 @@ func (in *RoutingPolicyInitParameters) DeepCopyInto(out *RoutingPolicyInitParame (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(string) + **out = **in + } + if in.HealthCheckRef != nil { + in, out := &in.HealthCheckRef, &out.HealthCheckRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckSelector != nil { + in, out := &in.HealthCheckSelector, &out.HealthCheckSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.PrimaryBackup != nil { in, out := &in.PrimaryBackup, &out.PrimaryBackup *out = new(PrimaryBackupInitParameters) @@ -3228,6 +3342,11 @@ func (in *RoutingPolicyObservation) DeepCopyInto(out *RoutingPolicyObservation) (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(string) + **out = **in + } if in.PrimaryBackup != nil { in, out := &in.PrimaryBackup, &out.PrimaryBackup *out = new(PrimaryBackupObservation) @@ -3267,6 +3386,21 @@ func (in *RoutingPolicyParameters) DeepCopyInto(out *RoutingPolicyParameters) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(string) + **out = **in + } + if in.HealthCheckRef != nil { + in, out := &in.HealthCheckRef, &out.HealthCheckRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckSelector != nil { + in, out := &in.HealthCheckSelector, &out.HealthCheckSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.PrimaryBackup != nil { in, out := &in.PrimaryBackup, &out.PrimaryBackup *out = new(PrimaryBackupParameters) @@ -3449,6 +3583,17 @@ func (in *TargetNetworkParameters) DeepCopy() *TargetNetworkParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WrrHealthCheckedTargetsInitParameters) DeepCopyInto(out *WrrHealthCheckedTargetsInitParameters) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]WrrHealthCheckedTargetsInternalLoadBalancersInitParameters, len(*in)) @@ -3621,6 +3766,17 @@ func (in *WrrHealthCheckedTargetsInternalLoadBalancersParameters) DeepCopy() *Wr // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WrrHealthCheckedTargetsObservation) DeepCopyInto(out *WrrHealthCheckedTargetsObservation) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]WrrHealthCheckedTargetsInternalLoadBalancersObservation, len(*in)) @@ -3643,6 +3799,17 @@ func (in *WrrHealthCheckedTargetsObservation) DeepCopy() *WrrHealthCheckedTarget // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WrrHealthCheckedTargetsParameters) DeepCopyInto(out *WrrHealthCheckedTargetsParameters) { *out = *in + if in.ExternalEndpoints != nil { + in, out := &in.ExternalEndpoints, &out.ExternalEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.InternalLoadBalancers != nil { in, out := &in.InternalLoadBalancers, &out.InternalLoadBalancers *out = make([]WrrHealthCheckedTargetsInternalLoadBalancersParameters, len(*in)) diff --git a/apis/dns/v1beta2/zz_generated.resolvers.go b/apis/dns/v1beta2/zz_generated.resolvers.go index 25bc939ee..b6a253522 100644 --- a/apis/dns/v1beta2/zz_generated.resolvers.go +++ b/apis/dns/v1beta2/zz_generated.resolvers.go @@ -252,6 +252,27 @@ func (mg *RecordSet) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.ForProvider.ManagedZone = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ManagedZoneRef = rsp.ResolvedReference + if mg.Spec.ForProvider.RoutingPolicy != nil { + { + m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta2", "HealthCheck", "HealthCheckList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoutingPolicy.HealthCheck), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RoutingPolicy.HealthCheckRef, + Selector: mg.Spec.ForProvider.RoutingPolicy.HealthCheckSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoutingPolicy.HealthCheck") + } + mg.Spec.ForProvider.RoutingPolicy.HealthCheck = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoutingPolicy.HealthCheckRef = rsp.ResolvedReference + + } if mg.Spec.ForProvider.RoutingPolicy != nil { if mg.Spec.ForProvider.RoutingPolicy.PrimaryBackup != nil { if mg.Spec.ForProvider.RoutingPolicy.PrimaryBackup.Primary != nil { @@ -379,6 +400,27 @@ func (mg *RecordSet) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.InitProvider.ManagedZone = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ManagedZoneRef = rsp.ResolvedReference + if mg.Spec.InitProvider.RoutingPolicy != nil { + { + m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta2", "HealthCheck", "HealthCheckList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoutingPolicy.HealthCheck), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RoutingPolicy.HealthCheckRef, + Selector: mg.Spec.InitProvider.RoutingPolicy.HealthCheckSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoutingPolicy.HealthCheck") + } + mg.Spec.InitProvider.RoutingPolicy.HealthCheck = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoutingPolicy.HealthCheckRef = rsp.ResolvedReference + + } if mg.Spec.InitProvider.RoutingPolicy != nil { if mg.Spec.InitProvider.RoutingPolicy.PrimaryBackup != nil { if mg.Spec.InitProvider.RoutingPolicy.PrimaryBackup.Primary != nil { diff --git a/apis/dns/v1beta2/zz_recordset_types.go b/apis/dns/v1beta2/zz_recordset_types.go index a483521e0..3431836b2 100755 --- a/apis/dns/v1beta2/zz_recordset_types.go +++ b/apis/dns/v1beta2/zz_recordset_types.go @@ -15,6 +15,9 @@ import ( type BackupGeoHealthCheckedTargetsInitParameters struct { + // The list of external endpoint addresses to health check. + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. InternalLoadBalancers []HealthCheckedTargetsInternalLoadBalancersInitParameters `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` @@ -22,6 +25,9 @@ type BackupGeoHealthCheckedTargetsInitParameters struct { type BackupGeoHealthCheckedTargetsObservation struct { + // The list of external endpoint addresses to health check. + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. InternalLoadBalancers []HealthCheckedTargetsInternalLoadBalancersObservation `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` @@ -29,10 +35,14 @@ type BackupGeoHealthCheckedTargetsObservation struct { type BackupGeoHealthCheckedTargetsParameters struct { + // The list of external endpoint addresses to health check. + // +kubebuilder:validation:Optional + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. // +kubebuilder:validation:Optional - InternalLoadBalancers []HealthCheckedTargetsInternalLoadBalancersParameters `json:"internalLoadBalancers" tf:"internal_load_balancers,omitempty"` + InternalLoadBalancers []HealthCheckedTargetsInternalLoadBalancersParameters `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` } type BackupGeoInitParameters struct { @@ -121,6 +131,9 @@ type GeoParameters struct { type HealthCheckedTargetsInitParameters struct { + // The list of external endpoint addresses to health check. + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. InternalLoadBalancers []InternalLoadBalancersInitParameters `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` @@ -210,6 +223,9 @@ type HealthCheckedTargetsInternalLoadBalancersParameters struct { type HealthCheckedTargetsObservation struct { + // The list of external endpoint addresses to health check. + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. InternalLoadBalancers []InternalLoadBalancersObservation `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` @@ -217,10 +233,14 @@ type HealthCheckedTargetsObservation struct { type HealthCheckedTargetsParameters struct { + // The list of external endpoint addresses to health check. + // +kubebuilder:validation:Optional + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. // +kubebuilder:validation:Optional - InternalLoadBalancers []InternalLoadBalancersParameters `json:"internalLoadBalancers" tf:"internal_load_balancers,omitempty"` + InternalLoadBalancers []InternalLoadBalancersParameters `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` } type InternalLoadBalancersInitParameters struct { @@ -362,6 +382,9 @@ type PrimaryBackupParameters struct { type PrimaryInitParameters struct { + // The list of external endpoint addresses to health check. + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. InternalLoadBalancers []PrimaryInternalLoadBalancersInitParameters `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` @@ -531,6 +554,9 @@ type PrimaryInternalLoadBalancersParameters struct { type PrimaryObservation struct { + // The list of external endpoint addresses to health check. + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. InternalLoadBalancers []PrimaryInternalLoadBalancersObservation `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` @@ -538,10 +564,14 @@ type PrimaryObservation struct { type PrimaryParameters struct { + // The list of external endpoint addresses to health check. + // +kubebuilder:validation:Optional + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. // +kubebuilder:validation:Optional - InternalLoadBalancers []PrimaryInternalLoadBalancersParameters `json:"internalLoadBalancers" tf:"internal_load_balancers,omitempty"` + InternalLoadBalancers []PrimaryInternalLoadBalancersParameters `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` } type RecordSetInitParameters struct { @@ -668,6 +698,19 @@ type RoutingPolicyInitParameters struct { // Structure is documented below. Geo []GeoInitParameters `json:"geo,omitempty" tf:"geo,omitempty"` + // Specifies the health check (used with external endpoints). + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta2.HealthCheck + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + HealthCheck *string `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // Reference to a HealthCheck in compute to populate healthCheck. + // +kubebuilder:validation:Optional + HealthCheckRef *v1.Reference `json:"healthCheckRef,omitempty" tf:"-"` + + // Selector for a HealthCheck in compute to populate healthCheck. + // +kubebuilder:validation:Optional + HealthCheckSelector *v1.Selector `json:"healthCheckSelector,omitempty" tf:"-"` + // The configuration for a failover policy with global to regional failover. Queries are responded to with the global primary targets, but if none of the primary targets are healthy, then we fallback to a regional failover policy. // Structure is documented below. PrimaryBackup *PrimaryBackupInitParameters `json:"primaryBackup,omitempty" tf:"primary_backup,omitempty"` @@ -686,6 +729,9 @@ type RoutingPolicyObservation struct { // Structure is documented below. Geo []GeoObservation `json:"geo,omitempty" tf:"geo,omitempty"` + // Specifies the health check (used with external endpoints). + HealthCheck *string `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + // The configuration for a failover policy with global to regional failover. Queries are responded to with the global primary targets, but if none of the primary targets are healthy, then we fallback to a regional failover policy. // Structure is documented below. PrimaryBackup *PrimaryBackupObservation `json:"primaryBackup,omitempty" tf:"primary_backup,omitempty"` @@ -706,6 +752,20 @@ type RoutingPolicyParameters struct { // +kubebuilder:validation:Optional Geo []GeoParameters `json:"geo,omitempty" tf:"geo,omitempty"` + // Specifies the health check (used with external endpoints). + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta2.HealthCheck + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + HealthCheck *string `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // Reference to a HealthCheck in compute to populate healthCheck. + // +kubebuilder:validation:Optional + HealthCheckRef *v1.Reference `json:"healthCheckRef,omitempty" tf:"-"` + + // Selector for a HealthCheck in compute to populate healthCheck. + // +kubebuilder:validation:Optional + HealthCheckSelector *v1.Selector `json:"healthCheckSelector,omitempty" tf:"-"` + // The configuration for a failover policy with global to regional failover. Queries are responded to with the global primary targets, but if none of the primary targets are healthy, then we fallback to a regional failover policy. // Structure is documented below. // +kubebuilder:validation:Optional @@ -719,6 +779,9 @@ type RoutingPolicyParameters struct { type WrrHealthCheckedTargetsInitParameters struct { + // The list of external endpoint addresses to health check. + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. InternalLoadBalancers []WrrHealthCheckedTargetsInternalLoadBalancersInitParameters `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` @@ -808,6 +871,9 @@ type WrrHealthCheckedTargetsInternalLoadBalancersParameters struct { type WrrHealthCheckedTargetsObservation struct { + // The list of external endpoint addresses to health check. + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. InternalLoadBalancers []WrrHealthCheckedTargetsInternalLoadBalancersObservation `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` @@ -815,10 +881,14 @@ type WrrHealthCheckedTargetsObservation struct { type WrrHealthCheckedTargetsParameters struct { + // The list of external endpoint addresses to health check. + // +kubebuilder:validation:Optional + ExternalEndpoints []*string `json:"externalEndpoints,omitempty" tf:"external_endpoints,omitempty"` + // The list of internal load balancers to health check. // Structure is documented below. // +kubebuilder:validation:Optional - InternalLoadBalancers []WrrHealthCheckedTargetsInternalLoadBalancersParameters `json:"internalLoadBalancers" tf:"internal_load_balancers,omitempty"` + InternalLoadBalancers []WrrHealthCheckedTargetsInternalLoadBalancersParameters `json:"internalLoadBalancers,omitempty" tf:"internal_load_balancers,omitempty"` } type WrrInitParameters struct { diff --git a/apis/filestore/v1beta1/zz_backup_types.go b/apis/filestore/v1beta1/zz_backup_types.go index 7aae114c9..d1620f7d5 100755 --- a/apis/filestore/v1beta1/zz_backup_types.go +++ b/apis/filestore/v1beta1/zz_backup_types.go @@ -41,6 +41,13 @@ type BackupInitParameters struct { // Selector for a Instance in filestore to populate sourceInstance. // +kubebuilder:validation:Optional SourceInstanceSelector *v1.Selector `json:"sourceInstanceSelector,omitempty" tf:"-"` + + // A map of resource manager tags. + // Resource manager tag keys and values have the same definition as resource manager tags. + // Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/{tag_value_id}. + // The field is ignored (both PUT & PATCH) when empty. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` } type BackupObservation struct { @@ -93,6 +100,13 @@ type BackupObservation struct { // The size of the storage used by the backup. As backups share storage, this number is expected to change with backup creation/deletion. StorageBytes *string `json:"storageBytes,omitempty" tf:"storage_bytes,omitempty"` + // A map of resource manager tags. + // Resource manager tag keys and values have the same definition as resource manager tags. + // Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/{tag_value_id}. + // The field is ignored (both PUT & PATCH) when empty. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + // The combination of labels configured directly on the resource // and default labels configured on the provider. // +mapType=granular @@ -136,6 +150,14 @@ type BackupParameters struct { // Selector for a Instance in filestore to populate sourceInstance. // +kubebuilder:validation:Optional SourceInstanceSelector *v1.Selector `json:"sourceInstanceSelector,omitempty" tf:"-"` + + // A map of resource manager tags. + // Resource manager tag keys and values have the same definition as resource manager tags. + // Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/{tag_value_id}. + // The field is ignored (both PUT & PATCH) when empty. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` } // BackupSpec defines the desired state of Backup diff --git a/apis/filestore/v1beta1/zz_generated.deepcopy.go b/apis/filestore/v1beta1/zz_generated.deepcopy.go index 1bdd80993..84d65f60a 100644 --- a/apis/filestore/v1beta1/zz_generated.deepcopy.go +++ b/apis/filestore/v1beta1/zz_generated.deepcopy.go @@ -89,6 +89,22 @@ func (in *BackupInitParameters) DeepCopyInto(out *BackupInitParameters) { *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupInitParameters. @@ -233,6 +249,22 @@ func (in *BackupObservation) DeepCopyInto(out *BackupObservation) { *out = new(string) **out = **in } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } if in.TerraformLabels != nil { in, out := &in.TerraformLabels, &out.TerraformLabels *out = make(map[string]*string, len(*in)) @@ -315,6 +347,22 @@ func (in *BackupParameters) DeepCopyInto(out *BackupParameters) { *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupParameters. diff --git a/apis/filestore/v1beta2/zz_generated.deepcopy.go b/apis/filestore/v1beta2/zz_generated.deepcopy.go index 5ca03e2e4..ed6225226 100644 --- a/apis/filestore/v1beta2/zz_generated.deepcopy.go +++ b/apis/filestore/v1beta2/zz_generated.deepcopy.go @@ -124,6 +124,66 @@ func (in *FileSharesParameters) DeepCopy() *FileSharesParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedIopsInitParameters) DeepCopyInto(out *FixedIopsInitParameters) { + *out = *in + if in.MaxIops != nil { + in, out := &in.MaxIops, &out.MaxIops + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedIopsInitParameters. +func (in *FixedIopsInitParameters) DeepCopy() *FixedIopsInitParameters { + if in == nil { + return nil + } + out := new(FixedIopsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedIopsObservation) DeepCopyInto(out *FixedIopsObservation) { + *out = *in + if in.MaxIops != nil { + in, out := &in.MaxIops, &out.MaxIops + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedIopsObservation. +func (in *FixedIopsObservation) DeepCopy() *FixedIopsObservation { + if in == nil { + return nil + } + out := new(FixedIopsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedIopsParameters) DeepCopyInto(out *FixedIopsParameters) { + *out = *in + if in.MaxIops != nil { + in, out := &in.MaxIops, &out.MaxIops + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedIopsParameters. +func (in *FixedIopsParameters) DeepCopy() *FixedIopsParameters { + if in == nil { + return nil + } + out := new(FixedIopsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Instance) DeepCopyInto(out *Instance) { *out = *in @@ -154,6 +214,16 @@ func (in *Instance) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceInitParameters) DeepCopyInto(out *InstanceInitParameters) { *out = *in + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } + if in.DeletionProtectionReason != nil { + in, out := &in.DeletionProtectionReason, &out.DeletionProtectionReason + *out = new(string) + **out = **in + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -202,11 +272,21 @@ func (in *InstanceInitParameters) DeepCopyInto(out *InstanceInitParameters) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PerformanceConfig != nil { + in, out := &in.PerformanceConfig, &out.PerformanceConfig + *out = new(PerformanceConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) **out = **in } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } if in.Tier != nil { in, out := &in.Tier, &out.Tier *out = new(string) @@ -269,6 +349,16 @@ func (in *InstanceObservation) DeepCopyInto(out *InstanceObservation) { *out = new(string) **out = **in } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } + if in.DeletionProtectionReason != nil { + in, out := &in.DeletionProtectionReason, &out.DeletionProtectionReason + *out = new(string) + **out = **in + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -338,11 +428,21 @@ func (in *InstanceObservation) DeepCopyInto(out *InstanceObservation) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PerformanceConfig != nil { + in, out := &in.PerformanceConfig, &out.PerformanceConfig + *out = new(PerformanceConfigObservation) + (*in).DeepCopyInto(*out) + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) **out = **in } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } if in.TerraformLabels != nil { in, out := &in.TerraformLabels, &out.TerraformLabels *out = make(map[string]*string, len(*in)) @@ -384,6 +484,16 @@ func (in *InstanceObservation) DeepCopy() *InstanceObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceParameters) DeepCopyInto(out *InstanceParameters) { *out = *in + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } + if in.DeletionProtectionReason != nil { + in, out := &in.DeletionProtectionReason, &out.DeletionProtectionReason + *out = new(string) + **out = **in + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -437,11 +547,21 @@ func (in *InstanceParameters) DeepCopyInto(out *InstanceParameters) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PerformanceConfig != nil { + in, out := &in.PerformanceConfig, &out.PerformanceConfig + *out = new(PerformanceConfigParameters) + (*in).DeepCopyInto(*out) + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) **out = **in } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } if in.Tier != nil { in, out := &in.Tier, &out.Tier *out = new(string) @@ -499,6 +619,66 @@ func (in *InstanceStatus) DeepCopy() *InstanceStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IopsPerTbInitParameters) DeepCopyInto(out *IopsPerTbInitParameters) { + *out = *in + if in.MaxIopsPerTb != nil { + in, out := &in.MaxIopsPerTb, &out.MaxIopsPerTb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IopsPerTbInitParameters. +func (in *IopsPerTbInitParameters) DeepCopy() *IopsPerTbInitParameters { + if in == nil { + return nil + } + out := new(IopsPerTbInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IopsPerTbObservation) DeepCopyInto(out *IopsPerTbObservation) { + *out = *in + if in.MaxIopsPerTb != nil { + in, out := &in.MaxIopsPerTb, &out.MaxIopsPerTb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IopsPerTbObservation. +func (in *IopsPerTbObservation) DeepCopy() *IopsPerTbObservation { + if in == nil { + return nil + } + out := new(IopsPerTbObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IopsPerTbParameters) DeepCopyInto(out *IopsPerTbParameters) { + *out = *in + if in.MaxIopsPerTb != nil { + in, out := &in.MaxIopsPerTb, &out.MaxIopsPerTb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IopsPerTbParameters. +func (in *IopsPerTbParameters) DeepCopy() *IopsPerTbParameters { + if in == nil { + return nil + } + out := new(IopsPerTbParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NFSExportOptionsInitParameters) DeepCopyInto(out *NFSExportOptionsInitParameters) { *out = *in @@ -770,3 +950,78 @@ func (in *NetworksParameters) DeepCopy() *NetworksParameters { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceConfigInitParameters) DeepCopyInto(out *PerformanceConfigInitParameters) { + *out = *in + if in.FixedIops != nil { + in, out := &in.FixedIops, &out.FixedIops + *out = new(FixedIopsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IopsPerTb != nil { + in, out := &in.IopsPerTb, &out.IopsPerTb + *out = new(IopsPerTbInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceConfigInitParameters. +func (in *PerformanceConfigInitParameters) DeepCopy() *PerformanceConfigInitParameters { + if in == nil { + return nil + } + out := new(PerformanceConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceConfigObservation) DeepCopyInto(out *PerformanceConfigObservation) { + *out = *in + if in.FixedIops != nil { + in, out := &in.FixedIops, &out.FixedIops + *out = new(FixedIopsObservation) + (*in).DeepCopyInto(*out) + } + if in.IopsPerTb != nil { + in, out := &in.IopsPerTb, &out.IopsPerTb + *out = new(IopsPerTbObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceConfigObservation. +func (in *PerformanceConfigObservation) DeepCopy() *PerformanceConfigObservation { + if in == nil { + return nil + } + out := new(PerformanceConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceConfigParameters) DeepCopyInto(out *PerformanceConfigParameters) { + *out = *in + if in.FixedIops != nil { + in, out := &in.FixedIops, &out.FixedIops + *out = new(FixedIopsParameters) + (*in).DeepCopyInto(*out) + } + if in.IopsPerTb != nil { + in, out := &in.IopsPerTb, &out.IopsPerTb + *out = new(IopsPerTbParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceConfigParameters. +func (in *PerformanceConfigParameters) DeepCopy() *PerformanceConfigParameters { + if in == nil { + return nil + } + out := new(PerformanceConfigParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/filestore/v1beta2/zz_instance_types.go b/apis/filestore/v1beta2/zz_instance_types.go index 17c2b2e6c..4cc3026e7 100755 --- a/apis/filestore/v1beta2/zz_instance_types.go +++ b/apis/filestore/v1beta2/zz_instance_types.go @@ -74,8 +74,36 @@ type FileSharesParameters struct { SourceBackup *string `json:"sourceBackup,omitempty" tf:"source_backup,omitempty"` } +type FixedIopsInitParameters struct { + + // The number of IOPS to provision for the instance. + // max_iops must be in multiple of 1000. + MaxIops *float64 `json:"maxIops,omitempty" tf:"max_iops,omitempty"` +} + +type FixedIopsObservation struct { + + // The number of IOPS to provision for the instance. + // max_iops must be in multiple of 1000. + MaxIops *float64 `json:"maxIops,omitempty" tf:"max_iops,omitempty"` +} + +type FixedIopsParameters struct { + + // The number of IOPS to provision for the instance. + // max_iops must be in multiple of 1000. + // +kubebuilder:validation:Optional + MaxIops *float64 `json:"maxIops,omitempty" tf:"max_iops,omitempty"` +} + type InstanceInitParameters struct { + // Indicates whether the instance is protected against deletion. + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty" tf:"deletion_protection_enabled,omitempty"` + + // The reason for enabling deletion protection. + DeletionProtectionReason *string `json:"deletionProtectionReason,omitempty" tf:"deletion_protection_reason,omitempty"` + // A description of the instance. Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -106,10 +134,23 @@ type InstanceInitParameters struct { // Structure is documented below. Networks []NetworksInitParameters `json:"networks,omitempty" tf:"networks,omitempty"` + // Performance configuration for the instance. If not provided, + // the default performance settings will be used. + // Structure is documented below. + PerformanceConfig *PerformanceConfigInitParameters `json:"performanceConfig,omitempty" tf:"performance_config,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + // Either NFSv3, for using NFS version 3 as file sharing protocol, + // or NFSv4.1, for using NFS version 4.1 as file sharing protocol. + // NFSv4.1 can be used with HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE. + // The default is NFSv3. + // Default value is NFS_V3. + // Possible values are: NFS_V3, NFS_V4_1. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + // The service tier of the instance. // Possible values include: STANDARD, PREMIUM, BASIC_HDD, BASIC_SSD, HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` @@ -123,6 +164,12 @@ type InstanceObservation struct { // Creation timestamp in RFC3339 text format. CreateTime *string `json:"createTime,omitempty" tf:"create_time,omitempty"` + // Indicates whether the instance is protected against deletion. + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty" tf:"deletion_protection_enabled,omitempty"` + + // The reason for enabling deletion protection. + DeletionProtectionReason *string `json:"deletionProtectionReason,omitempty" tf:"deletion_protection_reason,omitempty"` + // A description of the instance. Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -156,10 +203,23 @@ type InstanceObservation struct { // Structure is documented below. Networks []NetworksObservation `json:"networks,omitempty" tf:"networks,omitempty"` + // Performance configuration for the instance. If not provided, + // the default performance settings will be used. + // Structure is documented below. + PerformanceConfig *PerformanceConfigObservation `json:"performanceConfig,omitempty" tf:"performance_config,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` + // Either NFSv3, for using NFS version 3 as file sharing protocol, + // or NFSv4.1, for using NFS version 4.1 as file sharing protocol. + // NFSv4.1 can be used with HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE. + // The default is NFSv3. + // Default value is NFS_V3. + // Possible values are: NFS_V3, NFS_V4_1. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + // The combination of labels configured directly on the resource // and default labels configured on the provider. // +mapType=granular @@ -175,6 +235,14 @@ type InstanceObservation struct { type InstanceParameters struct { + // Indicates whether the instance is protected against deletion. + // +kubebuilder:validation:Optional + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty" tf:"deletion_protection_enabled,omitempty"` + + // The reason for enabling deletion protection. + // +kubebuilder:validation:Optional + DeletionProtectionReason *string `json:"deletionProtectionReason,omitempty" tf:"deletion_protection_reason,omitempty"` + // A description of the instance. // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` @@ -214,11 +282,26 @@ type InstanceParameters struct { // +kubebuilder:validation:Optional Networks []NetworksParameters `json:"networks,omitempty" tf:"networks,omitempty"` + // Performance configuration for the instance. If not provided, + // the default performance settings will be used. + // Structure is documented below. + // +kubebuilder:validation:Optional + PerformanceConfig *PerformanceConfigParameters `json:"performanceConfig,omitempty" tf:"performance_config,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional Project *string `json:"project,omitempty" tf:"project,omitempty"` + // Either NFSv3, for using NFS version 3 as file sharing protocol, + // or NFSv4.1, for using NFS version 4.1 as file sharing protocol. + // NFSv4.1 can be used with HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE. + // The default is NFSv3. + // Default value is NFS_V3. + // Possible values are: NFS_V3, NFS_V4_1. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + // The service tier of the instance. // Possible values include: STANDARD, PREMIUM, BASIC_HDD, BASIC_SSD, HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE // +kubebuilder:validation:Optional @@ -229,6 +312,37 @@ type InstanceParameters struct { Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } +type IopsPerTbInitParameters struct { + + // The instance max IOPS will be calculated by multiplying + // the capacity of the instance (TB) by max_iops_per_tb, + // and rounding to the nearest 1000. The instance max IOPS + // will be changed dynamically based on the instance + // capacity. + MaxIopsPerTb *float64 `json:"maxIopsPerTb,omitempty" tf:"max_iops_per_tb,omitempty"` +} + +type IopsPerTbObservation struct { + + // The instance max IOPS will be calculated by multiplying + // the capacity of the instance (TB) by max_iops_per_tb, + // and rounding to the nearest 1000. The instance max IOPS + // will be changed dynamically based on the instance + // capacity. + MaxIopsPerTb *float64 `json:"maxIopsPerTb,omitempty" tf:"max_iops_per_tb,omitempty"` +} + +type IopsPerTbParameters struct { + + // The instance max IOPS will be calculated by multiplying + // the capacity of the instance (TB) by max_iops_per_tb, + // and rounding to the nearest 1000. The instance max IOPS + // will be changed dynamically based on the instance + // capacity. + // +kubebuilder:validation:Optional + MaxIopsPerTb *float64 `json:"maxIopsPerTb,omitempty" tf:"max_iops_per_tb,omitempty"` +} + type NFSExportOptionsInitParameters struct { // Either READ_ONLY, for allowing only read requests on the exported directory, @@ -401,6 +515,50 @@ type NetworksParameters struct { ReservedIPRange *string `json:"reservedIpRange,omitempty" tf:"reserved_ip_range,omitempty"` } +type PerformanceConfigInitParameters struct { + + // The instance will have a fixed provisioned IOPS value, + // which will remain constant regardless of instance + // capacity. + // Structure is documented below. + FixedIops *FixedIopsInitParameters `json:"fixedIops,omitempty" tf:"fixed_iops,omitempty"` + + // The instance provisioned IOPS will change dynamically + // based on the capacity of the instance. + // Structure is documented below. + IopsPerTb *IopsPerTbInitParameters `json:"iopsPerTb,omitempty" tf:"iops_per_tb,omitempty"` +} + +type PerformanceConfigObservation struct { + + // The instance will have a fixed provisioned IOPS value, + // which will remain constant regardless of instance + // capacity. + // Structure is documented below. + FixedIops *FixedIopsObservation `json:"fixedIops,omitempty" tf:"fixed_iops,omitempty"` + + // The instance provisioned IOPS will change dynamically + // based on the capacity of the instance. + // Structure is documented below. + IopsPerTb *IopsPerTbObservation `json:"iopsPerTb,omitempty" tf:"iops_per_tb,omitempty"` +} + +type PerformanceConfigParameters struct { + + // The instance will have a fixed provisioned IOPS value, + // which will remain constant regardless of instance + // capacity. + // Structure is documented below. + // +kubebuilder:validation:Optional + FixedIops *FixedIopsParameters `json:"fixedIops,omitempty" tf:"fixed_iops,omitempty"` + + // The instance provisioned IOPS will change dynamically + // based on the capacity of the instance. + // Structure is documented below. + // +kubebuilder:validation:Optional + IopsPerTb *IopsPerTbParameters `json:"iopsPerTb,omitempty" tf:"iops_per_tb,omitempty"` +} + // InstanceSpec defines the desired state of Instance type InstanceSpec struct { v1.ResourceSpec `json:",inline"` diff --git a/apis/gkehub/v1beta2/zz_membership_types.go b/apis/gkehub/v1beta2/zz_membership_types.go index 95e1cea42..1fb99764f 100755 --- a/apis/gkehub/v1beta2/zz_membership_types.go +++ b/apis/gkehub/v1beta2/zz_membership_types.go @@ -16,21 +16,21 @@ import ( type AuthorityInitParameters struct { // A JSON Web Token (JWT) issuer URI. issuer must start with https:// and // be a valid - // with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster (must be locations rather than zones).googleapis.com/v1/${google_container_cluster.my-cluster.id}". + // with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster.googleapis.com/v1/${google_container_cluster.my-cluster.id}". Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` } type AuthorityObservation struct { // A JSON Web Token (JWT) issuer URI. issuer must start with https:// and // be a valid - // with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster (must be locations rather than zones).googleapis.com/v1/${google_container_cluster.my-cluster.id}". + // with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster.googleapis.com/v1/${google_container_cluster.my-cluster.id}". Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` } type AuthorityParameters struct { // A JSON Web Token (JWT) issuer URI. issuer must start with https:// and // be a valid - // with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster (must be locations rather than zones).googleapis.com/v1/${google_container_cluster.my-cluster.id}". + // with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster.googleapis.com/v1/${google_container_cluster.my-cluster.id}". // +kubebuilder:validation:Optional Issuer *string `json:"issuer" tf:"issuer,omitempty"` } @@ -60,7 +60,7 @@ type EndpointParameters struct { type GkeClusterInitParameters struct { // Self-link of the GCP resource for the GKE cluster. - // For example: //container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster. + // For example: //container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster. // It can be at the most 1000 characters in length.googleapis.com/${google_container_cluster.my-cluster.id}" or // google_container_cluster.my-cluster.id. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/container/v1beta2.Cluster @@ -79,7 +79,7 @@ type GkeClusterInitParameters struct { type GkeClusterObservation struct { // Self-link of the GCP resource for the GKE cluster. - // For example: //container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster. + // For example: //container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster. // It can be at the most 1000 characters in length.googleapis.com/${google_container_cluster.my-cluster.id}" or // google_container_cluster.my-cluster.id. ResourceLink *string `json:"resourceLink,omitempty" tf:"resource_link,omitempty"` @@ -88,7 +88,7 @@ type GkeClusterObservation struct { type GkeClusterParameters struct { // Self-link of the GCP resource for the GKE cluster. - // For example: //container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster. + // For example: //container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster. // It can be at the most 1000 characters in length.googleapis.com/${google_container_cluster.my-cluster.id}" or // google_container_cluster.my-cluster.id. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/container/v1beta2.Cluster diff --git a/apis/healthcare/v1beta1/zz_dataset_types.go b/apis/healthcare/v1beta1/zz_dataset_types.go index f68cf8a0b..eaba365d3 100755 --- a/apis/healthcare/v1beta1/zz_dataset_types.go +++ b/apis/healthcare/v1beta1/zz_dataset_types.go @@ -15,7 +15,7 @@ import ( type DatasetInitParameters struct { - // A nested object resource + // A nested object resource. // Structure is documented below. EncryptionSpec *EncryptionSpecInitParameters `json:"encryptionSpec,omitempty" tf:"encryption_spec,omitempty"` @@ -37,7 +37,7 @@ type DatasetInitParameters struct { type DatasetObservation struct { - // A nested object resource + // A nested object resource. // Structure is documented below. EncryptionSpec *EncryptionSpecObservation `json:"encryptionSpec,omitempty" tf:"encryption_spec,omitempty"` @@ -65,7 +65,7 @@ type DatasetObservation struct { type DatasetParameters struct { - // A nested object resource + // A nested object resource. // Structure is documented below. // +kubebuilder:validation:Optional EncryptionSpec *EncryptionSpecParameters `json:"encryptionSpec,omitempty" tf:"encryption_spec,omitempty"` diff --git a/apis/iam/v1beta2/zz_generated.deepcopy.go b/apis/iam/v1beta2/zz_generated.deepcopy.go index 9bc4f2957..b0de89a61 100644 --- a/apis/iam/v1beta2/zz_generated.deepcopy.go +++ b/apis/iam/v1beta2/zz_generated.deepcopy.go @@ -73,6 +73,66 @@ func (in *AwsParameters) DeepCopy() *AwsParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntermediateCasInitParameters) DeepCopyInto(out *IntermediateCasInitParameters) { + *out = *in + if in.PemCertificate != nil { + in, out := &in.PemCertificate, &out.PemCertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateCasInitParameters. +func (in *IntermediateCasInitParameters) DeepCopy() *IntermediateCasInitParameters { + if in == nil { + return nil + } + out := new(IntermediateCasInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntermediateCasObservation) DeepCopyInto(out *IntermediateCasObservation) { + *out = *in + if in.PemCertificate != nil { + in, out := &in.PemCertificate, &out.PemCertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateCasObservation. +func (in *IntermediateCasObservation) DeepCopy() *IntermediateCasObservation { + if in == nil { + return nil + } + out := new(IntermediateCasObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntermediateCasParameters) DeepCopyInto(out *IntermediateCasParameters) { + *out = *in + if in.PemCertificate != nil { + in, out := &in.PemCertificate, &out.PemCertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateCasParameters. +func (in *IntermediateCasParameters) DeepCopy() *IntermediateCasParameters { + if in == nil { + return nil + } + out := new(IntermediateCasParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OidcInitParameters) DeepCopyInto(out *OidcInitParameters) { *out = *in @@ -241,6 +301,153 @@ func (in *SAMLParameters) DeepCopy() *SAMLParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustAnchorsInitParameters) DeepCopyInto(out *TrustAnchorsInitParameters) { + *out = *in + if in.PemCertificate != nil { + in, out := &in.PemCertificate, &out.PemCertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustAnchorsInitParameters. +func (in *TrustAnchorsInitParameters) DeepCopy() *TrustAnchorsInitParameters { + if in == nil { + return nil + } + out := new(TrustAnchorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustAnchorsObservation) DeepCopyInto(out *TrustAnchorsObservation) { + *out = *in + if in.PemCertificate != nil { + in, out := &in.PemCertificate, &out.PemCertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustAnchorsObservation. +func (in *TrustAnchorsObservation) DeepCopy() *TrustAnchorsObservation { + if in == nil { + return nil + } + out := new(TrustAnchorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustAnchorsParameters) DeepCopyInto(out *TrustAnchorsParameters) { + *out = *in + if in.PemCertificate != nil { + in, out := &in.PemCertificate, &out.PemCertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustAnchorsParameters. +func (in *TrustAnchorsParameters) DeepCopy() *TrustAnchorsParameters { + if in == nil { + return nil + } + out := new(TrustAnchorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustStoreInitParameters) DeepCopyInto(out *TrustStoreInitParameters) { + *out = *in + if in.IntermediateCas != nil { + in, out := &in.IntermediateCas, &out.IntermediateCas + *out = make([]IntermediateCasInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrustAnchors != nil { + in, out := &in.TrustAnchors, &out.TrustAnchors + *out = make([]TrustAnchorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustStoreInitParameters. +func (in *TrustStoreInitParameters) DeepCopy() *TrustStoreInitParameters { + if in == nil { + return nil + } + out := new(TrustStoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustStoreObservation) DeepCopyInto(out *TrustStoreObservation) { + *out = *in + if in.IntermediateCas != nil { + in, out := &in.IntermediateCas, &out.IntermediateCas + *out = make([]IntermediateCasObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrustAnchors != nil { + in, out := &in.TrustAnchors, &out.TrustAnchors + *out = make([]TrustAnchorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustStoreObservation. +func (in *TrustStoreObservation) DeepCopy() *TrustStoreObservation { + if in == nil { + return nil + } + out := new(TrustStoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustStoreParameters) DeepCopyInto(out *TrustStoreParameters) { + *out = *in + if in.IntermediateCas != nil { + in, out := &in.IntermediateCas, &out.IntermediateCas + *out = make([]IntermediateCasParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrustAnchors != nil { + in, out := &in.TrustAnchors, &out.TrustAnchors + *out = make([]TrustAnchorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustStoreParameters. +func (in *TrustStoreParameters) DeepCopy() *TrustStoreParameters { + if in == nil { + return nil + } + out := new(TrustStoreParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkloadIdentityPoolProvider) DeepCopyInto(out *WorkloadIdentityPoolProvider) { *out = *in @@ -327,6 +534,11 @@ func (in *WorkloadIdentityPoolProviderInitParameters) DeepCopyInto(out *Workload *out = new(SAMLInitParameters) (*in).DeepCopyInto(*out) } + if in.X509 != nil { + in, out := &in.X509, &out.X509 + *out = new(X509InitParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadIdentityPoolProviderInitParameters. @@ -450,6 +662,11 @@ func (in *WorkloadIdentityPoolProviderObservation) DeepCopyInto(out *WorkloadIde *out = new(string) **out = **in } + if in.X509 != nil { + in, out := &in.X509, &out.X509 + *out = new(X509Observation) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadIdentityPoolProviderObservation. @@ -536,6 +753,11 @@ func (in *WorkloadIdentityPoolProviderParameters) DeepCopyInto(out *WorkloadIden *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.X509 != nil { + in, out := &in.X509, &out.X509 + *out = new(X509Parameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadIdentityPoolProviderParameters. @@ -582,3 +804,63 @@ func (in *WorkloadIdentityPoolProviderStatus) DeepCopy() *WorkloadIdentityPoolPr in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *X509InitParameters) DeepCopyInto(out *X509InitParameters) { + *out = *in + if in.TrustStore != nil { + in, out := &in.TrustStore, &out.TrustStore + *out = new(TrustStoreInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new X509InitParameters. +func (in *X509InitParameters) DeepCopy() *X509InitParameters { + if in == nil { + return nil + } + out := new(X509InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *X509Observation) DeepCopyInto(out *X509Observation) { + *out = *in + if in.TrustStore != nil { + in, out := &in.TrustStore, &out.TrustStore + *out = new(TrustStoreObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new X509Observation. +func (in *X509Observation) DeepCopy() *X509Observation { + if in == nil { + return nil + } + out := new(X509Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *X509Parameters) DeepCopyInto(out *X509Parameters) { + *out = *in + if in.TrustStore != nil { + in, out := &in.TrustStore, &out.TrustStore + *out = new(TrustStoreParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new X509Parameters. +func (in *X509Parameters) DeepCopy() *X509Parameters { + if in == nil { + return nil + } + out := new(X509Parameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/iam/v1beta2/zz_workloadidentitypoolprovider_types.go b/apis/iam/v1beta2/zz_workloadidentitypoolprovider_types.go index fe48c4500..7b54358b4 100755 --- a/apis/iam/v1beta2/zz_workloadidentitypoolprovider_types.go +++ b/apis/iam/v1beta2/zz_workloadidentitypoolprovider_types.go @@ -32,6 +32,28 @@ type AwsParameters struct { AccountID *string `json:"accountId" tf:"account_id,omitempty"` } +type IntermediateCasInitParameters struct { + + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate *string `json:"pemCertificate,omitempty" tf:"pem_certificate,omitempty"` +} + +type IntermediateCasObservation struct { + + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate *string `json:"pemCertificate,omitempty" tf:"pem_certificate,omitempty"` +} + +type IntermediateCasParameters struct { + + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + // +kubebuilder:validation:Optional + PemCertificate *string `json:"pemCertificate,omitempty" tf:"pem_certificate,omitempty"` +} + type OidcInitParameters struct { // Acceptable values for the aud field (audience) in the OIDC token. Token exchange @@ -123,6 +145,75 @@ type SAMLParameters struct { IdPMetadataXML *string `json:"idpMetadataXml" tf:"idp_metadata_xml,omitempty"` } +type TrustAnchorsInitParameters struct { + + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate *string `json:"pemCertificate,omitempty" tf:"pem_certificate,omitempty"` +} + +type TrustAnchorsObservation struct { + + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate *string `json:"pemCertificate,omitempty" tf:"pem_certificate,omitempty"` +} + +type TrustAnchorsParameters struct { + + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + // +kubebuilder:validation:Optional + PemCertificate *string `json:"pemCertificate,omitempty" tf:"pem_certificate,omitempty"` +} + +type TrustStoreInitParameters struct { + + // Set of intermediate CA certificates used for building the trust chain to + // trust anchor. + // IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + // Structure is documented below. + IntermediateCas []IntermediateCasInitParameters `json:"intermediateCas,omitempty" tf:"intermediate_cas,omitempty"` + + // List of Trust Anchors to be used while performing validation + // against a given TrustStore. The incoming end entity's certificate + // must be chained up to one of the trust anchors here. + // Structure is documented below. + TrustAnchors []TrustAnchorsInitParameters `json:"trustAnchors,omitempty" tf:"trust_anchors,omitempty"` +} + +type TrustStoreObservation struct { + + // Set of intermediate CA certificates used for building the trust chain to + // trust anchor. + // IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + // Structure is documented below. + IntermediateCas []IntermediateCasObservation `json:"intermediateCas,omitempty" tf:"intermediate_cas,omitempty"` + + // List of Trust Anchors to be used while performing validation + // against a given TrustStore. The incoming end entity's certificate + // must be chained up to one of the trust anchors here. + // Structure is documented below. + TrustAnchors []TrustAnchorsObservation `json:"trustAnchors,omitempty" tf:"trust_anchors,omitempty"` +} + +type TrustStoreParameters struct { + + // Set of intermediate CA certificates used for building the trust chain to + // trust anchor. + // IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + // Structure is documented below. + // +kubebuilder:validation:Optional + IntermediateCas []IntermediateCasParameters `json:"intermediateCas,omitempty" tf:"intermediate_cas,omitempty"` + + // List of Trust Anchors to be used while performing validation + // against a given TrustStore. The incoming end entity's certificate + // must be chained up to one of the trust anchors here. + // Structure is documented below. + // +kubebuilder:validation:Optional + TrustAnchors []TrustAnchorsParameters `json:"trustAnchors" tf:"trust_anchors,omitempty"` +} + type WorkloadIdentityPoolProviderInitParameters struct { // A Common Expression Language expression, in @@ -164,6 +255,11 @@ type WorkloadIdentityPoolProviderInitParameters struct { // An SAML 2.0 identity provider. Not compatible with the property oidc or aws. // Structure is documented below. SAML *SAMLInitParameters `json:"saml,omitempty" tf:"saml,omitempty"` + + // An X.509-type identity provider represents a CA. It is trusted to assert a + // client identity if the client has a certificate that chains up to this CA. + // Structure is documented below. + X509 *X509InitParameters `json:"x509,omitempty" tf:"x509,omitempty"` } type WorkloadIdentityPoolProviderObservation struct { @@ -222,6 +318,11 @@ type WorkloadIdentityPoolProviderObservation struct { // value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix // gcp- is reserved for use by Google, and may not be specified. WorkloadIdentityPoolID *string `json:"workloadIdentityPoolId,omitempty" tf:"workload_identity_pool_id,omitempty"` + + // An X.509-type identity provider represents a CA. It is trusted to assert a + // client identity if the client has a certificate that chains up to this CA. + // Structure is documented below. + X509 *X509Observation `json:"x509,omitempty" tf:"x509,omitempty"` } type WorkloadIdentityPoolProviderParameters struct { @@ -289,6 +390,46 @@ type WorkloadIdentityPoolProviderParameters struct { // Selector for a WorkloadIdentityPool in iam to populate workloadIdentityPoolId. // +kubebuilder:validation:Optional WorkloadIdentityPoolIDSelector *v1.Selector `json:"workloadIdentityPoolIdSelector,omitempty" tf:"-"` + + // An X.509-type identity provider represents a CA. It is trusted to assert a + // client identity if the client has a certificate that chains up to this CA. + // Structure is documented below. + // +kubebuilder:validation:Optional + X509 *X509Parameters `json:"x509,omitempty" tf:"x509,omitempty"` +} + +type X509InitParameters struct { + + // A Trust store, use this trust store as a wrapper to config the trust + // anchor and optional intermediate cas to help build the trust chain for + // the incoming end entity certificate. Follow the x509 guidelines to + // define those PEM encoded certs. Only 1 trust store is currently + // supported. + // Structure is documented below. + TrustStore *TrustStoreInitParameters `json:"trustStore,omitempty" tf:"trust_store,omitempty"` +} + +type X509Observation struct { + + // A Trust store, use this trust store as a wrapper to config the trust + // anchor and optional intermediate cas to help build the trust chain for + // the incoming end entity certificate. Follow the x509 guidelines to + // define those PEM encoded certs. Only 1 trust store is currently + // supported. + // Structure is documented below. + TrustStore *TrustStoreObservation `json:"trustStore,omitempty" tf:"trust_store,omitempty"` +} + +type X509Parameters struct { + + // A Trust store, use this trust store as a wrapper to config the trust + // anchor and optional intermediate cas to help build the trust chain for + // the incoming end entity certificate. Follow the x509 guidelines to + // define those PEM encoded certs. Only 1 trust store is currently + // supported. + // Structure is documented below. + // +kubebuilder:validation:Optional + TrustStore *TrustStoreParameters `json:"trustStore" tf:"trust_store,omitempty"` } // WorkloadIdentityPoolProviderSpec defines the desired state of WorkloadIdentityPoolProvider diff --git a/apis/datastore/v1beta1/zz_index_terraformed.go b/apis/identityplatform/v1beta1/zz_config_terraformed.go similarity index 69% rename from apis/datastore/v1beta1/zz_index_terraformed.go rename to apis/identityplatform/v1beta1/zz_config_terraformed.go index cbd9ee161..fd7de5d09 100755 --- a/apis/datastore/v1beta1/zz_index_terraformed.go +++ b/apis/identityplatform/v1beta1/zz_config_terraformed.go @@ -14,18 +14,18 @@ import ( "github.com/crossplane/upjet/pkg/resource/json" ) -// GetTerraformResourceType returns Terraform resource type for this Index -func (mg *Index) GetTerraformResourceType() string { - return "google_datastore_index" +// GetTerraformResourceType returns Terraform resource type for this Config +func (mg *Config) GetTerraformResourceType() string { + return "google_identity_platform_config" } -// GetConnectionDetailsMapping for this Index -func (tr *Index) GetConnectionDetailsMapping() map[string]string { - return nil +// GetConnectionDetailsMapping for this Config +func (tr *Config) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"client[*].api_key": "status.atProvider.client[*].apiKey"} } -// GetObservation of this Index -func (tr *Index) GetObservation() (map[string]any, error) { +// GetObservation of this Config +func (tr *Config) GetObservation() (map[string]any, error) { o, err := json.TFParser.Marshal(tr.Status.AtProvider) if err != nil { return nil, err @@ -34,8 +34,8 @@ func (tr *Index) GetObservation() (map[string]any, error) { return base, json.TFParser.Unmarshal(o, &base) } -// SetObservation for this Index -func (tr *Index) SetObservation(obs map[string]any) error { +// SetObservation for this Config +func (tr *Config) SetObservation(obs map[string]any) error { p, err := json.TFParser.Marshal(obs) if err != nil { return err @@ -43,16 +43,16 @@ func (tr *Index) SetObservation(obs map[string]any) error { return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } -// GetID returns ID of underlying Terraform resource of this Index -func (tr *Index) GetID() string { +// GetID returns ID of underlying Terraform resource of this Config +func (tr *Config) GetID() string { if tr.Status.AtProvider.ID == nil { return "" } return *tr.Status.AtProvider.ID } -// GetParameters of this Index -func (tr *Index) GetParameters() (map[string]any, error) { +// GetParameters of this Config +func (tr *Config) GetParameters() (map[string]any, error) { p, err := json.TFParser.Marshal(tr.Spec.ForProvider) if err != nil { return nil, err @@ -61,8 +61,8 @@ func (tr *Index) GetParameters() (map[string]any, error) { return base, json.TFParser.Unmarshal(p, &base) } -// SetParameters for this Index -func (tr *Index) SetParameters(params map[string]any) error { +// SetParameters for this Config +func (tr *Config) SetParameters(params map[string]any) error { p, err := json.TFParser.Marshal(params) if err != nil { return err @@ -70,8 +70,8 @@ func (tr *Index) SetParameters(params map[string]any) error { return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } -// GetInitParameters of this Index -func (tr *Index) GetInitParameters() (map[string]any, error) { +// GetInitParameters of this Config +func (tr *Config) GetInitParameters() (map[string]any, error) { p, err := json.TFParser.Marshal(tr.Spec.InitProvider) if err != nil { return nil, err @@ -80,8 +80,8 @@ func (tr *Index) GetInitParameters() (map[string]any, error) { return base, json.TFParser.Unmarshal(p, &base) } -// GetInitParameters of this Index -func (tr *Index) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { +// GetInitParameters of this Config +func (tr *Config) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { params, err := tr.GetParameters() if err != nil { return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) @@ -110,10 +110,10 @@ func (tr *Index) GetMergedParameters(shouldMergeInitProvider bool) (map[string]a return params, nil } -// LateInitialize this Index using its observed tfState. +// LateInitialize this Config using its observed tfState. // returns True if there are any spec changes for the resource. -func (tr *Index) LateInitialize(attrs []byte) (bool, error) { - params := &IndexParameters{} +func (tr *Config) LateInitialize(attrs []byte) (bool, error) { + params := &ConfigParameters{} if err := json.TFParser.Unmarshal(attrs, params); err != nil { return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") } @@ -124,6 +124,6 @@ func (tr *Index) LateInitialize(attrs []byte) (bool, error) { } // GetTerraformSchemaVersion returns the associated Terraform schema version -func (tr *Index) GetTerraformSchemaVersion() int { +func (tr *Config) GetTerraformSchemaVersion() int { return 0 } diff --git a/apis/identityplatform/v1beta1/zz_config_types.go b/apis/identityplatform/v1beta1/zz_config_types.go new file mode 100755 index 000000000..add76c1ae --- /dev/null +++ b/apis/identityplatform/v1beta1/zz_config_types.go @@ -0,0 +1,902 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AllowByDefaultInitParameters struct { + + // Two letter unicode region codes to disallow as defined by https://cldr.unicode.org/ The full list of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json + DisallowedRegions []*string `json:"disallowedRegions,omitempty" tf:"disallowed_regions,omitempty"` +} + +type AllowByDefaultObservation struct { + + // Two letter unicode region codes to disallow as defined by https://cldr.unicode.org/ The full list of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json + DisallowedRegions []*string `json:"disallowedRegions,omitempty" tf:"disallowed_regions,omitempty"` +} + +type AllowByDefaultParameters struct { + + // Two letter unicode region codes to disallow as defined by https://cldr.unicode.org/ The full list of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json + // +kubebuilder:validation:Optional + DisallowedRegions []*string `json:"disallowedRegions,omitempty" tf:"disallowed_regions,omitempty"` +} + +type AllowlistOnlyInitParameters struct { + + // Two letter unicode region codes to allow as defined by https://cldr.unicode.org/ The full list of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json + AllowedRegions []*string `json:"allowedRegions,omitempty" tf:"allowed_regions,omitempty"` +} + +type AllowlistOnlyObservation struct { + + // Two letter unicode region codes to allow as defined by https://cldr.unicode.org/ The full list of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json + AllowedRegions []*string `json:"allowedRegions,omitempty" tf:"allowed_regions,omitempty"` +} + +type AllowlistOnlyParameters struct { + + // Two letter unicode region codes to allow as defined by https://cldr.unicode.org/ The full list of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json + // +kubebuilder:validation:Optional + AllowedRegions []*string `json:"allowedRegions,omitempty" tf:"allowed_regions,omitempty"` +} + +type AnonymousInitParameters struct { + + // Whether phone number auth is enabled for the project or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AnonymousObservation struct { + + // Whether phone number auth is enabled for the project or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AnonymousParameters struct { + + // Whether phone number auth is enabled for the project or not. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type BlockingFunctionsInitParameters struct { + + // The user credentials to include in the JWT payload that is sent to the registered Blocking Functions. + // Structure is documented below. + ForwardInboundCredentials *ForwardInboundCredentialsInitParameters `json:"forwardInboundCredentials,omitempty" tf:"forward_inbound_credentials,omitempty"` + + // Map of Trigger to event type. Key should be one of the supported event types: "beforeCreate", "beforeSignIn". + // Structure is documented below. + Triggers []TriggersInitParameters `json:"triggers,omitempty" tf:"triggers,omitempty"` +} + +type BlockingFunctionsObservation struct { + + // The user credentials to include in the JWT payload that is sent to the registered Blocking Functions. + // Structure is documented below. + ForwardInboundCredentials *ForwardInboundCredentialsObservation `json:"forwardInboundCredentials,omitempty" tf:"forward_inbound_credentials,omitempty"` + + // Map of Trigger to event type. Key should be one of the supported event types: "beforeCreate", "beforeSignIn". + // Structure is documented below. + Triggers []TriggersObservation `json:"triggers,omitempty" tf:"triggers,omitempty"` +} + +type BlockingFunctionsParameters struct { + + // The user credentials to include in the JWT payload that is sent to the registered Blocking Functions. + // Structure is documented below. + // +kubebuilder:validation:Optional + ForwardInboundCredentials *ForwardInboundCredentialsParameters `json:"forwardInboundCredentials,omitempty" tf:"forward_inbound_credentials,omitempty"` + + // Map of Trigger to event type. Key should be one of the supported event types: "beforeCreate", "beforeSignIn". + // Structure is documented below. + // +kubebuilder:validation:Optional + Triggers []TriggersParameters `json:"triggers" tf:"triggers,omitempty"` +} + +type ClientInitParameters struct { + + // Configuration related to restricting a user's ability to affect their account. + // Structure is documented below. + Permissions *PermissionsInitParameters `json:"permissions,omitempty" tf:"permissions,omitempty"` +} + +type ClientObservation struct { + + // (Output) + // Firebase subdomain. + FirebaseSubdomain *string `json:"firebaseSubdomain,omitempty" tf:"firebase_subdomain,omitempty"` + + // Configuration related to restricting a user's ability to affect their account. + // Structure is documented below. + Permissions *PermissionsObservation `json:"permissions,omitempty" tf:"permissions,omitempty"` +} + +type ClientParameters struct { + + // Configuration related to restricting a user's ability to affect their account. + // Structure is documented below. + // +kubebuilder:validation:Optional + Permissions *PermissionsParameters `json:"permissions,omitempty" tf:"permissions,omitempty"` +} + +type ConfigInitParameters struct { + + // List of domains authorized for OAuth redirects. + AuthorizedDomains []*string `json:"authorizedDomains,omitempty" tf:"authorized_domains,omitempty"` + + // Whether anonymous users will be auto-deleted after a period of 30 days + AutodeleteAnonymousUsers *bool `json:"autodeleteAnonymousUsers,omitempty" tf:"autodelete_anonymous_users,omitempty"` + + // Configuration related to blocking functions. + // Structure is documented below. + BlockingFunctions *BlockingFunctionsInitParameters `json:"blockingFunctions,omitempty" tf:"blocking_functions,omitempty"` + + // Options related to how clients making requests on behalf of a project should be configured. + // Structure is documented below. + Client *ClientInitParameters `json:"client,omitempty" tf:"client,omitempty"` + + // Options related to how clients making requests on behalf of a project should be configured. + // Structure is documented below. + Mfa *MfaInitParameters `json:"mfa,omitempty" tf:"mfa,omitempty"` + + // Configuration related to monitoring project activity. + // Structure is documented below. + Monitoring *MonitoringInitParameters `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // Configuration related to multi-tenant functionality. + // Structure is documented below. + MultiTenant *MultiTenantInitParameters `json:"multiTenant,omitempty" tf:"multi_tenant,omitempty"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/cloudplatform/v1beta1.Project + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("project_id",false) + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // Reference to a Project in cloudplatform to populate project. + // +kubebuilder:validation:Optional + ProjectRef *v1.Reference `json:"projectRef,omitempty" tf:"-"` + + // Selector for a Project in cloudplatform to populate project. + // +kubebuilder:validation:Optional + ProjectSelector *v1.Selector `json:"projectSelector,omitempty" tf:"-"` + + // Configuration related to quotas. + // Structure is documented below. + Quota *QuotaInitParameters `json:"quota,omitempty" tf:"quota,omitempty"` + + // Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. + // Structure is documented below. + SMSRegionConfig *SMSRegionConfigInitParameters `json:"smsRegionConfig,omitempty" tf:"sms_region_config,omitempty"` + + // Configuration related to local sign in methods. + // Structure is documented below. + SignIn *SignInInitParameters `json:"signIn,omitempty" tf:"sign_in,omitempty"` +} + +type ConfigObservation struct { + + // List of domains authorized for OAuth redirects. + AuthorizedDomains []*string `json:"authorizedDomains,omitempty" tf:"authorized_domains,omitempty"` + + // Whether anonymous users will be auto-deleted after a period of 30 days + AutodeleteAnonymousUsers *bool `json:"autodeleteAnonymousUsers,omitempty" tf:"autodelete_anonymous_users,omitempty"` + + // Configuration related to blocking functions. + // Structure is documented below. + BlockingFunctions *BlockingFunctionsObservation `json:"blockingFunctions,omitempty" tf:"blocking_functions,omitempty"` + + // Options related to how clients making requests on behalf of a project should be configured. + // Structure is documented below. + Client *ClientObservation `json:"client,omitempty" tf:"client,omitempty"` + + // an identifier for the resource with format projects/{{project}}/config + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Options related to how clients making requests on behalf of a project should be configured. + // Structure is documented below. + Mfa *MfaObservation `json:"mfa,omitempty" tf:"mfa,omitempty"` + + // Configuration related to monitoring project activity. + // Structure is documented below. + Monitoring *MonitoringObservation `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // Configuration related to multi-tenant functionality. + // Structure is documented below. + MultiTenant *MultiTenantObservation `json:"multiTenant,omitempty" tf:"multi_tenant,omitempty"` + + // The name of the Config resource + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // Configuration related to quotas. + // Structure is documented below. + Quota *QuotaObservation `json:"quota,omitempty" tf:"quota,omitempty"` + + // Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. + // Structure is documented below. + SMSRegionConfig *SMSRegionConfigObservation `json:"smsRegionConfig,omitempty" tf:"sms_region_config,omitempty"` + + // Configuration related to local sign in methods. + // Structure is documented below. + SignIn *SignInObservation `json:"signIn,omitempty" tf:"sign_in,omitempty"` +} + +type ConfigParameters struct { + + // List of domains authorized for OAuth redirects. + // +kubebuilder:validation:Optional + AuthorizedDomains []*string `json:"authorizedDomains,omitempty" tf:"authorized_domains,omitempty"` + + // Whether anonymous users will be auto-deleted after a period of 30 days + // +kubebuilder:validation:Optional + AutodeleteAnonymousUsers *bool `json:"autodeleteAnonymousUsers,omitempty" tf:"autodelete_anonymous_users,omitempty"` + + // Configuration related to blocking functions. + // Structure is documented below. + // +kubebuilder:validation:Optional + BlockingFunctions *BlockingFunctionsParameters `json:"blockingFunctions,omitempty" tf:"blocking_functions,omitempty"` + + // Options related to how clients making requests on behalf of a project should be configured. + // Structure is documented below. + // +kubebuilder:validation:Optional + Client *ClientParameters `json:"client,omitempty" tf:"client,omitempty"` + + // Options related to how clients making requests on behalf of a project should be configured. + // Structure is documented below. + // +kubebuilder:validation:Optional + Mfa *MfaParameters `json:"mfa,omitempty" tf:"mfa,omitempty"` + + // Configuration related to monitoring project activity. + // Structure is documented below. + // +kubebuilder:validation:Optional + Monitoring *MonitoringParameters `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // Configuration related to multi-tenant functionality. + // Structure is documented below. + // +kubebuilder:validation:Optional + MultiTenant *MultiTenantParameters `json:"multiTenant,omitempty" tf:"multi_tenant,omitempty"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/cloudplatform/v1beta1.Project + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("project_id",false) + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // Reference to a Project in cloudplatform to populate project. + // +kubebuilder:validation:Optional + ProjectRef *v1.Reference `json:"projectRef,omitempty" tf:"-"` + + // Selector for a Project in cloudplatform to populate project. + // +kubebuilder:validation:Optional + ProjectSelector *v1.Selector `json:"projectSelector,omitempty" tf:"-"` + + // Configuration related to quotas. + // Structure is documented below. + // +kubebuilder:validation:Optional + Quota *QuotaParameters `json:"quota,omitempty" tf:"quota,omitempty"` + + // Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. + // Structure is documented below. + // +kubebuilder:validation:Optional + SMSRegionConfig *SMSRegionConfigParameters `json:"smsRegionConfig,omitempty" tf:"sms_region_config,omitempty"` + + // Configuration related to local sign in methods. + // Structure is documented below. + // +kubebuilder:validation:Optional + SignIn *SignInParameters `json:"signIn,omitempty" tf:"sign_in,omitempty"` +} + +type EmailInitParameters struct { + + // Whether phone number auth is enabled for the project or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether a password is required for email auth or not. If true, both an email and + // password must be provided to sign in. If false, a user may sign in via either + // email/password or email link. + PasswordRequired *bool `json:"passwordRequired,omitempty" tf:"password_required,omitempty"` +} + +type EmailObservation struct { + + // Whether phone number auth is enabled for the project or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether a password is required for email auth or not. If true, both an email and + // password must be provided to sign in. If false, a user may sign in via either + // email/password or email link. + PasswordRequired *bool `json:"passwordRequired,omitempty" tf:"password_required,omitempty"` +} + +type EmailParameters struct { + + // Whether phone number auth is enabled for the project or not. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Whether a password is required for email auth or not. If true, both an email and + // password must be provided to sign in. If false, a user may sign in via either + // email/password or email link. + // +kubebuilder:validation:Optional + PasswordRequired *bool `json:"passwordRequired,omitempty" tf:"password_required,omitempty"` +} + +type ForwardInboundCredentialsInitParameters struct { + + // Whether to pass the user's OAuth identity provider's access token. + AccessToken *bool `json:"accessToken,omitempty" tf:"access_token,omitempty"` + + // Whether to pass the user's OIDC identity provider's ID token. + IDToken *bool `json:"idToken,omitempty" tf:"id_token,omitempty"` + + // Whether to pass the user's OAuth identity provider's refresh token. + RefreshToken *bool `json:"refreshToken,omitempty" tf:"refresh_token,omitempty"` +} + +type ForwardInboundCredentialsObservation struct { + + // Whether to pass the user's OAuth identity provider's access token. + AccessToken *bool `json:"accessToken,omitempty" tf:"access_token,omitempty"` + + // Whether to pass the user's OIDC identity provider's ID token. + IDToken *bool `json:"idToken,omitempty" tf:"id_token,omitempty"` + + // Whether to pass the user's OAuth identity provider's refresh token. + RefreshToken *bool `json:"refreshToken,omitempty" tf:"refresh_token,omitempty"` +} + +type ForwardInboundCredentialsParameters struct { + + // Whether to pass the user's OAuth identity provider's access token. + // +kubebuilder:validation:Optional + AccessToken *bool `json:"accessToken,omitempty" tf:"access_token,omitempty"` + + // Whether to pass the user's OIDC identity provider's ID token. + // +kubebuilder:validation:Optional + IDToken *bool `json:"idToken,omitempty" tf:"id_token,omitempty"` + + // Whether to pass the user's OAuth identity provider's refresh token. + // +kubebuilder:validation:Optional + RefreshToken *bool `json:"refreshToken,omitempty" tf:"refresh_token,omitempty"` +} + +type HashConfigInitParameters struct { +} + +type HashConfigObservation struct { + + // (Output) + // Different password hash algorithms used in Identity Toolkit. + Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` + + // (Output) + // Memory cost for hash calculation. Used by scrypt and other similar password derivation algorithms. See https://tools.ietf.org/html/rfc7914 for explanation of field. + MemoryCost *float64 `json:"memoryCost,omitempty" tf:"memory_cost,omitempty"` + + // (Output) + // How many rounds for hash calculation. Used by scrypt and other similar password derivation algorithms. + Rounds *float64 `json:"rounds,omitempty" tf:"rounds,omitempty"` + + // (Output) + // Non-printable character to be inserted between the salt and plain text password in base64. + SaltSeparator *string `json:"saltSeparator,omitempty" tf:"salt_separator,omitempty"` + + // (Output) + // Signer key in base64. + SignerKey *string `json:"signerKey,omitempty" tf:"signer_key,omitempty"` +} + +type HashConfigParameters struct { +} + +type MfaInitParameters struct { + + // A list of usable second factors for this project. + // Each value may be one of: PHONE_SMS. + EnabledProviders []*string `json:"enabledProviders,omitempty" tf:"enabled_providers,omitempty"` + + // A list of usable second factors for this project along with their configurations. + // This field does not support phone based MFA, for that use the 'enabledProviders' field. + // Structure is documented below. + ProviderConfigs []ProviderConfigsInitParameters `json:"providerConfigs,omitempty" tf:"provider_configs,omitempty"` + + // Whether MultiFactor Authentication has been enabled for this project. + // Possible values are: DISABLED, ENABLED, MANDATORY. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type MfaObservation struct { + + // A list of usable second factors for this project. + // Each value may be one of: PHONE_SMS. + EnabledProviders []*string `json:"enabledProviders,omitempty" tf:"enabled_providers,omitempty"` + + // A list of usable second factors for this project along with their configurations. + // This field does not support phone based MFA, for that use the 'enabledProviders' field. + // Structure is documented below. + ProviderConfigs []ProviderConfigsObservation `json:"providerConfigs,omitempty" tf:"provider_configs,omitempty"` + + // Whether MultiFactor Authentication has been enabled for this project. + // Possible values are: DISABLED, ENABLED, MANDATORY. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type MfaParameters struct { + + // A list of usable second factors for this project. + // Each value may be one of: PHONE_SMS. + // +kubebuilder:validation:Optional + EnabledProviders []*string `json:"enabledProviders,omitempty" tf:"enabled_providers,omitempty"` + + // A list of usable second factors for this project along with their configurations. + // This field does not support phone based MFA, for that use the 'enabledProviders' field. + // Structure is documented below. + // +kubebuilder:validation:Optional + ProviderConfigs []ProviderConfigsParameters `json:"providerConfigs,omitempty" tf:"provider_configs,omitempty"` + + // Whether MultiFactor Authentication has been enabled for this project. + // Possible values are: DISABLED, ENABLED, MANDATORY. + // +kubebuilder:validation:Optional + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type MonitoringInitParameters struct { + + // Configuration for logging requests made to this project to Stackdriver Logging + // Structure is documented below. + RequestLogging *RequestLoggingInitParameters `json:"requestLogging,omitempty" tf:"request_logging,omitempty"` +} + +type MonitoringObservation struct { + + // Configuration for logging requests made to this project to Stackdriver Logging + // Structure is documented below. + RequestLogging *RequestLoggingObservation `json:"requestLogging,omitempty" tf:"request_logging,omitempty"` +} + +type MonitoringParameters struct { + + // Configuration for logging requests made to this project to Stackdriver Logging + // Structure is documented below. + // +kubebuilder:validation:Optional + RequestLogging *RequestLoggingParameters `json:"requestLogging,omitempty" tf:"request_logging,omitempty"` +} + +type MultiTenantInitParameters struct { + + // Whether this project can have tenants or not. + AllowTenants *bool `json:"allowTenants,omitempty" tf:"allow_tenants,omitempty"` + + // The default cloud parent org or folder that the tenant project should be created under. + // The parent resource name should be in the format of "/", such as "folders/123" or "organizations/456". + // If the value is not set, the tenant will be created under the same organization or folder as the agent project. + DefaultTenantLocation *string `json:"defaultTenantLocation,omitempty" tf:"default_tenant_location,omitempty"` +} + +type MultiTenantObservation struct { + + // Whether this project can have tenants or not. + AllowTenants *bool `json:"allowTenants,omitempty" tf:"allow_tenants,omitempty"` + + // The default cloud parent org or folder that the tenant project should be created under. + // The parent resource name should be in the format of "/", such as "folders/123" or "organizations/456". + // If the value is not set, the tenant will be created under the same organization or folder as the agent project. + DefaultTenantLocation *string `json:"defaultTenantLocation,omitempty" tf:"default_tenant_location,omitempty"` +} + +type MultiTenantParameters struct { + + // Whether this project can have tenants or not. + // +kubebuilder:validation:Optional + AllowTenants *bool `json:"allowTenants,omitempty" tf:"allow_tenants,omitempty"` + + // The default cloud parent org or folder that the tenant project should be created under. + // The parent resource name should be in the format of "/", such as "folders/123" or "organizations/456". + // If the value is not set, the tenant will be created under the same organization or folder as the agent project. + // +kubebuilder:validation:Optional + DefaultTenantLocation *string `json:"defaultTenantLocation,omitempty" tf:"default_tenant_location,omitempty"` +} + +type PermissionsInitParameters struct { + + // When true, end users cannot delete their account on the associated project through any of our API methods + DisabledUserDeletion *bool `json:"disabledUserDeletion,omitempty" tf:"disabled_user_deletion,omitempty"` + + // When true, end users cannot sign up for a new account on the associated project through any of our API methods + DisabledUserSignup *bool `json:"disabledUserSignup,omitempty" tf:"disabled_user_signup,omitempty"` +} + +type PermissionsObservation struct { + + // When true, end users cannot delete their account on the associated project through any of our API methods + DisabledUserDeletion *bool `json:"disabledUserDeletion,omitempty" tf:"disabled_user_deletion,omitempty"` + + // When true, end users cannot sign up for a new account on the associated project through any of our API methods + DisabledUserSignup *bool `json:"disabledUserSignup,omitempty" tf:"disabled_user_signup,omitempty"` +} + +type PermissionsParameters struct { + + // When true, end users cannot delete their account on the associated project through any of our API methods + // +kubebuilder:validation:Optional + DisabledUserDeletion *bool `json:"disabledUserDeletion,omitempty" tf:"disabled_user_deletion,omitempty"` + + // When true, end users cannot sign up for a new account on the associated project through any of our API methods + // +kubebuilder:validation:Optional + DisabledUserSignup *bool `json:"disabledUserSignup,omitempty" tf:"disabled_user_signup,omitempty"` +} + +type PhoneNumberInitParameters struct { + + // Whether phone number auth is enabled for the project or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A map of that can be used for phone auth testing. + // +mapType=granular + TestPhoneNumbers map[string]*string `json:"testPhoneNumbers,omitempty" tf:"test_phone_numbers,omitempty"` +} + +type PhoneNumberObservation struct { + + // Whether phone number auth is enabled for the project or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A map of that can be used for phone auth testing. + // +mapType=granular + TestPhoneNumbers map[string]*string `json:"testPhoneNumbers,omitempty" tf:"test_phone_numbers,omitempty"` +} + +type PhoneNumberParameters struct { + + // Whether phone number auth is enabled for the project or not. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A map of that can be used for phone auth testing. + // +kubebuilder:validation:Optional + // +mapType=granular + TestPhoneNumbers map[string]*string `json:"testPhoneNumbers,omitempty" tf:"test_phone_numbers,omitempty"` +} + +type ProviderConfigsInitParameters struct { + + // Whether MultiFactor Authentication has been enabled for this project. + // Possible values are: DISABLED, ENABLED, MANDATORY. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // TOTP MFA provider config for this project. + // Structure is documented below. + TotpProviderConfig *TotpProviderConfigInitParameters `json:"totpProviderConfig,omitempty" tf:"totp_provider_config,omitempty"` +} + +type ProviderConfigsObservation struct { + + // Whether MultiFactor Authentication has been enabled for this project. + // Possible values are: DISABLED, ENABLED, MANDATORY. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // TOTP MFA provider config for this project. + // Structure is documented below. + TotpProviderConfig *TotpProviderConfigObservation `json:"totpProviderConfig,omitempty" tf:"totp_provider_config,omitempty"` +} + +type ProviderConfigsParameters struct { + + // Whether MultiFactor Authentication has been enabled for this project. + // Possible values are: DISABLED, ENABLED, MANDATORY. + // +kubebuilder:validation:Optional + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // TOTP MFA provider config for this project. + // Structure is documented below. + // +kubebuilder:validation:Optional + TotpProviderConfig *TotpProviderConfigParameters `json:"totpProviderConfig,omitempty" tf:"totp_provider_config,omitempty"` +} + +type QuotaInitParameters struct { + + // Quota for the Signup endpoint, if overwritten. Signup quota is measured in sign ups per project per hour per IP. None of quota, startTime, or quotaDuration can be skipped. + // Structure is documented below. + SignUpQuotaConfig *SignUpQuotaConfigInitParameters `json:"signUpQuotaConfig,omitempty" tf:"sign_up_quota_config,omitempty"` +} + +type QuotaObservation struct { + + // Quota for the Signup endpoint, if overwritten. Signup quota is measured in sign ups per project per hour per IP. None of quota, startTime, or quotaDuration can be skipped. + // Structure is documented below. + SignUpQuotaConfig *SignUpQuotaConfigObservation `json:"signUpQuotaConfig,omitempty" tf:"sign_up_quota_config,omitempty"` +} + +type QuotaParameters struct { + + // Quota for the Signup endpoint, if overwritten. Signup quota is measured in sign ups per project per hour per IP. None of quota, startTime, or quotaDuration can be skipped. + // Structure is documented below. + // +kubebuilder:validation:Optional + SignUpQuotaConfig *SignUpQuotaConfigParameters `json:"signUpQuotaConfig,omitempty" tf:"sign_up_quota_config,omitempty"` +} + +type RequestLoggingInitParameters struct { + + // Whether logging is enabled for this project or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type RequestLoggingObservation struct { + + // Whether logging is enabled for this project or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type RequestLoggingParameters struct { + + // Whether logging is enabled for this project or not. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SMSRegionConfigInitParameters struct { + + // A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. + // Structure is documented below. + AllowByDefault *AllowByDefaultInitParameters `json:"allowByDefault,omitempty" tf:"allow_by_default,omitempty"` + + // A policy of only allowing regions by explicitly adding them to an allowlist. + // Structure is documented below. + AllowlistOnly *AllowlistOnlyInitParameters `json:"allowlistOnly,omitempty" tf:"allowlist_only,omitempty"` +} + +type SMSRegionConfigObservation struct { + + // A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. + // Structure is documented below. + AllowByDefault *AllowByDefaultObservation `json:"allowByDefault,omitempty" tf:"allow_by_default,omitempty"` + + // A policy of only allowing regions by explicitly adding them to an allowlist. + // Structure is documented below. + AllowlistOnly *AllowlistOnlyObservation `json:"allowlistOnly,omitempty" tf:"allowlist_only,omitempty"` +} + +type SMSRegionConfigParameters struct { + + // A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. + // Structure is documented below. + // +kubebuilder:validation:Optional + AllowByDefault *AllowByDefaultParameters `json:"allowByDefault,omitempty" tf:"allow_by_default,omitempty"` + + // A policy of only allowing regions by explicitly adding them to an allowlist. + // Structure is documented below. + // +kubebuilder:validation:Optional + AllowlistOnly *AllowlistOnlyParameters `json:"allowlistOnly,omitempty" tf:"allowlist_only,omitempty"` +} + +type SignInInitParameters struct { + + // Whether to allow more than one account to have the same email. + AllowDuplicateEmails *bool `json:"allowDuplicateEmails,omitempty" tf:"allow_duplicate_emails,omitempty"` + + // Configuration options related to authenticating an anonymous user. + // Structure is documented below. + Anonymous *AnonymousInitParameters `json:"anonymous,omitempty" tf:"anonymous,omitempty"` + + // Configuration options related to authenticating a user by their email address. + // Structure is documented below. + Email *EmailInitParameters `json:"email,omitempty" tf:"email,omitempty"` + + // Configuration options related to authenticated a user by their phone number. + // Structure is documented below. + PhoneNumber *PhoneNumberInitParameters `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type SignInObservation struct { + + // Whether to allow more than one account to have the same email. + AllowDuplicateEmails *bool `json:"allowDuplicateEmails,omitempty" tf:"allow_duplicate_emails,omitempty"` + + // Configuration options related to authenticating an anonymous user. + // Structure is documented below. + Anonymous *AnonymousObservation `json:"anonymous,omitempty" tf:"anonymous,omitempty"` + + // Configuration options related to authenticating a user by their email address. + // Structure is documented below. + Email *EmailObservation `json:"email,omitempty" tf:"email,omitempty"` + + // (Output) + // Output only. Hash config information. + // Structure is documented below. + HashConfig []HashConfigObservation `json:"hashConfig,omitempty" tf:"hash_config,omitempty"` + + // Configuration options related to authenticated a user by their phone number. + // Structure is documented below. + PhoneNumber *PhoneNumberObservation `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type SignInParameters struct { + + // Whether to allow more than one account to have the same email. + // +kubebuilder:validation:Optional + AllowDuplicateEmails *bool `json:"allowDuplicateEmails,omitempty" tf:"allow_duplicate_emails,omitempty"` + + // Configuration options related to authenticating an anonymous user. + // Structure is documented below. + // +kubebuilder:validation:Optional + Anonymous *AnonymousParameters `json:"anonymous,omitempty" tf:"anonymous,omitempty"` + + // Configuration options related to authenticating a user by their email address. + // Structure is documented below. + // +kubebuilder:validation:Optional + Email *EmailParameters `json:"email,omitempty" tf:"email,omitempty"` + + // Configuration options related to authenticated a user by their phone number. + // Structure is documented below. + // +kubebuilder:validation:Optional + PhoneNumber *PhoneNumberParameters `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type SignUpQuotaConfigInitParameters struct { + + // A sign up APIs quota that customers can override temporarily. Value can be in between 1 and 1000. + Quota *float64 `json:"quota,omitempty" tf:"quota,omitempty"` + + // How long this quota will be active for. It is measurred in seconds, e.g., Example: "9.615s". + QuotaDuration *string `json:"quotaDuration,omitempty" tf:"quota_duration,omitempty"` + + // When this quota will take affect. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type SignUpQuotaConfigObservation struct { + + // A sign up APIs quota that customers can override temporarily. Value can be in between 1 and 1000. + Quota *float64 `json:"quota,omitempty" tf:"quota,omitempty"` + + // How long this quota will be active for. It is measurred in seconds, e.g., Example: "9.615s". + QuotaDuration *string `json:"quotaDuration,omitempty" tf:"quota_duration,omitempty"` + + // When this quota will take affect. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type SignUpQuotaConfigParameters struct { + + // A sign up APIs quota that customers can override temporarily. Value can be in between 1 and 1000. + // +kubebuilder:validation:Optional + Quota *float64 `json:"quota,omitempty" tf:"quota,omitempty"` + + // How long this quota will be active for. It is measurred in seconds, e.g., Example: "9.615s". + // +kubebuilder:validation:Optional + QuotaDuration *string `json:"quotaDuration,omitempty" tf:"quota_duration,omitempty"` + + // When this quota will take affect. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type TotpProviderConfigInitParameters struct { + + // The allowed number of adjacent intervals that will be used for verification to avoid clock skew. + AdjacentIntervals *float64 `json:"adjacentIntervals,omitempty" tf:"adjacent_intervals,omitempty"` +} + +type TotpProviderConfigObservation struct { + + // The allowed number of adjacent intervals that will be used for verification to avoid clock skew. + AdjacentIntervals *float64 `json:"adjacentIntervals,omitempty" tf:"adjacent_intervals,omitempty"` +} + +type TotpProviderConfigParameters struct { + + // The allowed number of adjacent intervals that will be used for verification to avoid clock skew. + // +kubebuilder:validation:Optional + AdjacentIntervals *float64 `json:"adjacentIntervals,omitempty" tf:"adjacent_intervals,omitempty"` +} + +type TriggersInitParameters struct { + + // The identifier for this object. Format specified above. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // HTTP URI trigger for the Cloud Function. + FunctionURI *string `json:"functionUri,omitempty" tf:"function_uri,omitempty"` +} + +type TriggersObservation struct { + + // The identifier for this object. Format specified above. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // HTTP URI trigger for the Cloud Function. + FunctionURI *string `json:"functionUri,omitempty" tf:"function_uri,omitempty"` + + // (Output) + // When the trigger was changed. + UpdateTime *string `json:"updateTime,omitempty" tf:"update_time,omitempty"` +} + +type TriggersParameters struct { + + // The identifier for this object. Format specified above. + // +kubebuilder:validation:Optional + EventType *string `json:"eventType" tf:"event_type,omitempty"` + + // HTTP URI trigger for the Cloud Function. + // +kubebuilder:validation:Optional + FunctionURI *string `json:"functionUri" tf:"function_uri,omitempty"` +} + +// ConfigSpec defines the desired state of Config +type ConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConfigInitParameters `json:"initProvider,omitempty"` +} + +// ConfigStatus defines the observed state of Config. +type ConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Config is the Schema for the Configs API. Identity Platform configuration for a Cloud project. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type Config struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ConfigSpec `json:"spec"` + Status ConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConfigList contains a list of Configs +type ConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Config `json:"items"` +} + +// Repository type metadata. +var ( + Config_Kind = "Config" + Config_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Config_Kind}.String() + Config_KindAPIVersion = Config_Kind + "." + CRDGroupVersion.String() + Config_GroupVersionKind = CRDGroupVersion.WithKind(Config_Kind) +) + +func init() { + SchemeBuilder.Register(&Config{}, &ConfigList{}) +} diff --git a/apis/identityplatform/v1beta1/zz_generated.conversion_hubs.go b/apis/identityplatform/v1beta1/zz_generated.conversion_hubs.go index 0ab6572a9..663ed675e 100755 --- a/apis/identityplatform/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/identityplatform/v1beta1/zz_generated.conversion_hubs.go @@ -6,6 +6,9 @@ package v1beta1 +// Hub marks this type as a conversion hub. +func (tr *Config) Hub() {} + // Hub marks this type as a conversion hub. func (tr *DefaultSupportedIdPConfig) Hub() {} diff --git a/apis/identityplatform/v1beta1/zz_generated.conversion_spokes.go b/apis/identityplatform/v1beta1/zz_generated.conversion_spokes.go index 5c8d76c9f..e634344f0 100755 --- a/apis/identityplatform/v1beta1/zz_generated.conversion_spokes.go +++ b/apis/identityplatform/v1beta1/zz_generated.conversion_spokes.go @@ -33,26 +33,6 @@ func (tr *InboundSAMLConfig) ConvertFrom(srcRaw conversion.Hub) error { return nil } -// ConvertTo converts this ProjectDefaultConfig to the hub type. -func (tr *ProjectDefaultConfig) ConvertTo(dstRaw conversion.Hub) error { - spokeVersion := tr.GetObjectKind().GroupVersionKind().Version - hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version - if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { - return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) - } - return nil -} - -// ConvertFrom converts from the hub type to the ProjectDefaultConfig type. -func (tr *ProjectDefaultConfig) ConvertFrom(srcRaw conversion.Hub) error { - spokeVersion := tr.GetObjectKind().GroupVersionKind().Version - hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version - if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { - return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) - } - return nil -} - // ConvertTo converts this TenantInboundSAMLConfig to the hub type. func (tr *TenantInboundSAMLConfig) ConvertTo(dstRaw conversion.Hub) error { spokeVersion := tr.GetObjectKind().GroupVersionKind().Version diff --git a/apis/identityplatform/v1beta1/zz_generated.deepcopy.go b/apis/identityplatform/v1beta1/zz_generated.deepcopy.go index 95ebee0d6..6c29f1e91 100644 --- a/apis/identityplatform/v1beta1/zz_generated.deepcopy.go +++ b/apis/identityplatform/v1beta1/zz_generated.deepcopy.go @@ -13,6 +13,162 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowByDefaultInitParameters) DeepCopyInto(out *AllowByDefaultInitParameters) { + *out = *in + if in.DisallowedRegions != nil { + in, out := &in.DisallowedRegions, &out.DisallowedRegions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowByDefaultInitParameters. +func (in *AllowByDefaultInitParameters) DeepCopy() *AllowByDefaultInitParameters { + if in == nil { + return nil + } + out := new(AllowByDefaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowByDefaultObservation) DeepCopyInto(out *AllowByDefaultObservation) { + *out = *in + if in.DisallowedRegions != nil { + in, out := &in.DisallowedRegions, &out.DisallowedRegions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowByDefaultObservation. +func (in *AllowByDefaultObservation) DeepCopy() *AllowByDefaultObservation { + if in == nil { + return nil + } + out := new(AllowByDefaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowByDefaultParameters) DeepCopyInto(out *AllowByDefaultParameters) { + *out = *in + if in.DisallowedRegions != nil { + in, out := &in.DisallowedRegions, &out.DisallowedRegions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowByDefaultParameters. +func (in *AllowByDefaultParameters) DeepCopy() *AllowByDefaultParameters { + if in == nil { + return nil + } + out := new(AllowByDefaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowlistOnlyInitParameters) DeepCopyInto(out *AllowlistOnlyInitParameters) { + *out = *in + if in.AllowedRegions != nil { + in, out := &in.AllowedRegions, &out.AllowedRegions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowlistOnlyInitParameters. +func (in *AllowlistOnlyInitParameters) DeepCopy() *AllowlistOnlyInitParameters { + if in == nil { + return nil + } + out := new(AllowlistOnlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowlistOnlyObservation) DeepCopyInto(out *AllowlistOnlyObservation) { + *out = *in + if in.AllowedRegions != nil { + in, out := &in.AllowedRegions, &out.AllowedRegions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowlistOnlyObservation. +func (in *AllowlistOnlyObservation) DeepCopy() *AllowlistOnlyObservation { + if in == nil { + return nil + } + out := new(AllowlistOnlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowlistOnlyParameters) DeepCopyInto(out *AllowlistOnlyParameters) { + *out = *in + if in.AllowedRegions != nil { + in, out := &in.AllowedRegions, &out.AllowedRegions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowlistOnlyParameters. +func (in *AllowlistOnlyParameters) DeepCopy() *AllowlistOnlyParameters { + if in == nil { + return nil + } + out := new(AllowlistOnlyParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AnonymousInitParameters) DeepCopyInto(out *AnonymousInitParameters) { *out = *in @@ -73,6 +229,504 @@ func (in *AnonymousParameters) DeepCopy() *AnonymousParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockingFunctionsInitParameters) DeepCopyInto(out *BlockingFunctionsInitParameters) { + *out = *in + if in.ForwardInboundCredentials != nil { + in, out := &in.ForwardInboundCredentials, &out.ForwardInboundCredentials + *out = new(ForwardInboundCredentialsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]TriggersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockingFunctionsInitParameters. +func (in *BlockingFunctionsInitParameters) DeepCopy() *BlockingFunctionsInitParameters { + if in == nil { + return nil + } + out := new(BlockingFunctionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockingFunctionsObservation) DeepCopyInto(out *BlockingFunctionsObservation) { + *out = *in + if in.ForwardInboundCredentials != nil { + in, out := &in.ForwardInboundCredentials, &out.ForwardInboundCredentials + *out = new(ForwardInboundCredentialsObservation) + (*in).DeepCopyInto(*out) + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]TriggersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockingFunctionsObservation. +func (in *BlockingFunctionsObservation) DeepCopy() *BlockingFunctionsObservation { + if in == nil { + return nil + } + out := new(BlockingFunctionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockingFunctionsParameters) DeepCopyInto(out *BlockingFunctionsParameters) { + *out = *in + if in.ForwardInboundCredentials != nil { + in, out := &in.ForwardInboundCredentials, &out.ForwardInboundCredentials + *out = new(ForwardInboundCredentialsParameters) + (*in).DeepCopyInto(*out) + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]TriggersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockingFunctionsParameters. +func (in *BlockingFunctionsParameters) DeepCopy() *BlockingFunctionsParameters { + if in == nil { + return nil + } + out := new(BlockingFunctionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientInitParameters) DeepCopyInto(out *ClientInitParameters) { + *out = *in + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = new(PermissionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientInitParameters. +func (in *ClientInitParameters) DeepCopy() *ClientInitParameters { + if in == nil { + return nil + } + out := new(ClientInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientObservation) DeepCopyInto(out *ClientObservation) { + *out = *in + if in.FirebaseSubdomain != nil { + in, out := &in.FirebaseSubdomain, &out.FirebaseSubdomain + *out = new(string) + **out = **in + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = new(PermissionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientObservation. +func (in *ClientObservation) DeepCopy() *ClientObservation { + if in == nil { + return nil + } + out := new(ClientObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientParameters) DeepCopyInto(out *ClientParameters) { + *out = *in + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = new(PermissionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientParameters. +func (in *ClientParameters) DeepCopy() *ClientParameters { + if in == nil { + return nil + } + out := new(ClientParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigInitParameters) DeepCopyInto(out *ConfigInitParameters) { + *out = *in + if in.AuthorizedDomains != nil { + in, out := &in.AuthorizedDomains, &out.AuthorizedDomains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AutodeleteAnonymousUsers != nil { + in, out := &in.AutodeleteAnonymousUsers, &out.AutodeleteAnonymousUsers + *out = new(bool) + **out = **in + } + if in.BlockingFunctions != nil { + in, out := &in.BlockingFunctions, &out.BlockingFunctions + *out = new(BlockingFunctionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Client != nil { + in, out := &in.Client, &out.Client + *out = new(ClientInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Mfa != nil { + in, out := &in.Mfa, &out.Mfa + *out = new(MfaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MultiTenant != nil { + in, out := &in.MultiTenant, &out.MultiTenant + *out = new(MultiTenantInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.ProjectRef != nil { + in, out := &in.ProjectRef, &out.ProjectRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProjectSelector != nil { + in, out := &in.ProjectSelector, &out.ProjectSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(QuotaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SMSRegionConfig != nil { + in, out := &in.SMSRegionConfig, &out.SMSRegionConfig + *out = new(SMSRegionConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SignIn != nil { + in, out := &in.SignIn, &out.SignIn + *out = new(SignInInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigInitParameters. +func (in *ConfigInitParameters) DeepCopy() *ConfigInitParameters { + if in == nil { + return nil + } + out := new(ConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigList) DeepCopyInto(out *ConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. +func (in *ConfigList) DeepCopy() *ConfigList { + if in == nil { + return nil + } + out := new(ConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigObservation) DeepCopyInto(out *ConfigObservation) { + *out = *in + if in.AuthorizedDomains != nil { + in, out := &in.AuthorizedDomains, &out.AuthorizedDomains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AutodeleteAnonymousUsers != nil { + in, out := &in.AutodeleteAnonymousUsers, &out.AutodeleteAnonymousUsers + *out = new(bool) + **out = **in + } + if in.BlockingFunctions != nil { + in, out := &in.BlockingFunctions, &out.BlockingFunctions + *out = new(BlockingFunctionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Client != nil { + in, out := &in.Client, &out.Client + *out = new(ClientObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Mfa != nil { + in, out := &in.Mfa, &out.Mfa + *out = new(MfaObservation) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringObservation) + (*in).DeepCopyInto(*out) + } + if in.MultiTenant != nil { + in, out := &in.MultiTenant, &out.MultiTenant + *out = new(MultiTenantObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(QuotaObservation) + (*in).DeepCopyInto(*out) + } + if in.SMSRegionConfig != nil { + in, out := &in.SMSRegionConfig, &out.SMSRegionConfig + *out = new(SMSRegionConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.SignIn != nil { + in, out := &in.SignIn, &out.SignIn + *out = new(SignInObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigObservation. +func (in *ConfigObservation) DeepCopy() *ConfigObservation { + if in == nil { + return nil + } + out := new(ConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigParameters) DeepCopyInto(out *ConfigParameters) { + *out = *in + if in.AuthorizedDomains != nil { + in, out := &in.AuthorizedDomains, &out.AuthorizedDomains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AutodeleteAnonymousUsers != nil { + in, out := &in.AutodeleteAnonymousUsers, &out.AutodeleteAnonymousUsers + *out = new(bool) + **out = **in + } + if in.BlockingFunctions != nil { + in, out := &in.BlockingFunctions, &out.BlockingFunctions + *out = new(BlockingFunctionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Client != nil { + in, out := &in.Client, &out.Client + *out = new(ClientParameters) + (*in).DeepCopyInto(*out) + } + if in.Mfa != nil { + in, out := &in.Mfa, &out.Mfa + *out = new(MfaParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringParameters) + (*in).DeepCopyInto(*out) + } + if in.MultiTenant != nil { + in, out := &in.MultiTenant, &out.MultiTenant + *out = new(MultiTenantParameters) + (*in).DeepCopyInto(*out) + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.ProjectRef != nil { + in, out := &in.ProjectRef, &out.ProjectRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProjectSelector != nil { + in, out := &in.ProjectSelector, &out.ProjectSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(QuotaParameters) + (*in).DeepCopyInto(*out) + } + if in.SMSRegionConfig != nil { + in, out := &in.SMSRegionConfig, &out.SMSRegionConfig + *out = new(SMSRegionConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.SignIn != nil { + in, out := &in.SignIn, &out.SignIn + *out = new(SignInParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigParameters. +func (in *ConfigParameters) DeepCopy() *ConfigParameters { + if in == nil { + return nil + } + out := new(ConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. +func (in *ConfigSpec) DeepCopy() *ConfigSpec { + if in == nil { + return nil + } + out := new(ConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. +func (in *ConfigStatus) DeepCopy() *ConfigStatus { + if in == nil { + return nil + } + out := new(ConfigStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DefaultSupportedIdPConfig) DeepCopyInto(out *DefaultSupportedIdPConfig) { *out = *in @@ -291,57 +945,147 @@ func (in *EmailInitParameters) DeepCopy() *EmailInitParameters { if in == nil { return nil } - out := new(EmailInitParameters) + out := new(EmailInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailObservation) DeepCopyInto(out *EmailObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.PasswordRequired != nil { + in, out := &in.PasswordRequired, &out.PasswordRequired + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailObservation. +func (in *EmailObservation) DeepCopy() *EmailObservation { + if in == nil { + return nil + } + out := new(EmailObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailParameters) DeepCopyInto(out *EmailParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.PasswordRequired != nil { + in, out := &in.PasswordRequired, &out.PasswordRequired + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailParameters. +func (in *EmailParameters) DeepCopy() *EmailParameters { + if in == nil { + return nil + } + out := new(EmailParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardInboundCredentialsInitParameters) DeepCopyInto(out *ForwardInboundCredentialsInitParameters) { + *out = *in + if in.AccessToken != nil { + in, out := &in.AccessToken, &out.AccessToken + *out = new(bool) + **out = **in + } + if in.IDToken != nil { + in, out := &in.IDToken, &out.IDToken + *out = new(bool) + **out = **in + } + if in.RefreshToken != nil { + in, out := &in.RefreshToken, &out.RefreshToken + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardInboundCredentialsInitParameters. +func (in *ForwardInboundCredentialsInitParameters) DeepCopy() *ForwardInboundCredentialsInitParameters { + if in == nil { + return nil + } + out := new(ForwardInboundCredentialsInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmailObservation) DeepCopyInto(out *EmailObservation) { +func (in *ForwardInboundCredentialsObservation) DeepCopyInto(out *ForwardInboundCredentialsObservation) { *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled + if in.AccessToken != nil { + in, out := &in.AccessToken, &out.AccessToken *out = new(bool) **out = **in } - if in.PasswordRequired != nil { - in, out := &in.PasswordRequired, &out.PasswordRequired + if in.IDToken != nil { + in, out := &in.IDToken, &out.IDToken + *out = new(bool) + **out = **in + } + if in.RefreshToken != nil { + in, out := &in.RefreshToken, &out.RefreshToken *out = new(bool) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailObservation. -func (in *EmailObservation) DeepCopy() *EmailObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardInboundCredentialsObservation. +func (in *ForwardInboundCredentialsObservation) DeepCopy() *ForwardInboundCredentialsObservation { if in == nil { return nil } - out := new(EmailObservation) + out := new(ForwardInboundCredentialsObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmailParameters) DeepCopyInto(out *EmailParameters) { +func (in *ForwardInboundCredentialsParameters) DeepCopyInto(out *ForwardInboundCredentialsParameters) { *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled + if in.AccessToken != nil { + in, out := &in.AccessToken, &out.AccessToken *out = new(bool) **out = **in } - if in.PasswordRequired != nil { - in, out := &in.PasswordRequired, &out.PasswordRequired + if in.IDToken != nil { + in, out := &in.IDToken, &out.IDToken + *out = new(bool) + **out = **in + } + if in.RefreshToken != nil { + in, out := &in.RefreshToken, &out.RefreshToken *out = new(bool) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailParameters. -func (in *EmailParameters) DeepCopy() *EmailParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardInboundCredentialsParameters. +func (in *ForwardInboundCredentialsParameters) DeepCopy() *ForwardInboundCredentialsParameters { if in == nil { return nil } - out := new(EmailParameters) + out := new(ForwardInboundCredentialsParameters) in.DeepCopyInto(out) return out } @@ -732,153 +1476,402 @@ func (in *InboundSAMLConfigList) DeepCopy() *InboundSAMLConfigList { if in == nil { return nil } - out := new(InboundSAMLConfigList) + out := new(InboundSAMLConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InboundSAMLConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InboundSAMLConfigObservation) DeepCopyInto(out *InboundSAMLConfigObservation) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IdPConfig != nil { + in, out := &in.IdPConfig, &out.IdPConfig + *out = make([]IdPConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.SpConfig != nil { + in, out := &in.SpConfig, &out.SpConfig + *out = make([]SpConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundSAMLConfigObservation. +func (in *InboundSAMLConfigObservation) DeepCopy() *InboundSAMLConfigObservation { + if in == nil { + return nil + } + out := new(InboundSAMLConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InboundSAMLConfigParameters) DeepCopyInto(out *InboundSAMLConfigParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdPConfig != nil { + in, out := &in.IdPConfig, &out.IdPConfig + *out = make([]IdPConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.SpConfig != nil { + in, out := &in.SpConfig, &out.SpConfig + *out = make([]SpConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundSAMLConfigParameters. +func (in *InboundSAMLConfigParameters) DeepCopy() *InboundSAMLConfigParameters { + if in == nil { + return nil + } + out := new(InboundSAMLConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InboundSAMLConfigSpec) DeepCopyInto(out *InboundSAMLConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundSAMLConfigSpec. +func (in *InboundSAMLConfigSpec) DeepCopy() *InboundSAMLConfigSpec { + if in == nil { + return nil + } + out := new(InboundSAMLConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InboundSAMLConfigStatus) DeepCopyInto(out *InboundSAMLConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundSAMLConfigStatus. +func (in *InboundSAMLConfigStatus) DeepCopy() *InboundSAMLConfigStatus { + if in == nil { + return nil + } + out := new(InboundSAMLConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MfaInitParameters) DeepCopyInto(out *MfaInitParameters) { + *out = *in + if in.EnabledProviders != nil { + in, out := &in.EnabledProviders, &out.EnabledProviders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProviderConfigs != nil { + in, out := &in.ProviderConfigs, &out.ProviderConfigs + *out = make([]ProviderConfigsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MfaInitParameters. +func (in *MfaInitParameters) DeepCopy() *MfaInitParameters { + if in == nil { + return nil + } + out := new(MfaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MfaObservation) DeepCopyInto(out *MfaObservation) { + *out = *in + if in.EnabledProviders != nil { + in, out := &in.EnabledProviders, &out.EnabledProviders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProviderConfigs != nil { + in, out := &in.ProviderConfigs, &out.ProviderConfigs + *out = make([]ProviderConfigsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MfaObservation. +func (in *MfaObservation) DeepCopy() *MfaObservation { + if in == nil { + return nil + } + out := new(MfaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MfaParameters) DeepCopyInto(out *MfaParameters) { + *out = *in + if in.EnabledProviders != nil { + in, out := &in.EnabledProviders, &out.EnabledProviders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProviderConfigs != nil { + in, out := &in.ProviderConfigs, &out.ProviderConfigs + *out = make([]ProviderConfigsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MfaParameters. +func (in *MfaParameters) DeepCopy() *MfaParameters { + if in == nil { + return nil + } + out := new(MfaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringInitParameters) DeepCopyInto(out *MonitoringInitParameters) { + *out = *in + if in.RequestLogging != nil { + in, out := &in.RequestLogging, &out.RequestLogging + *out = new(RequestLoggingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringInitParameters. +func (in *MonitoringInitParameters) DeepCopy() *MonitoringInitParameters { + if in == nil { + return nil + } + out := new(MonitoringInitParameters) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InboundSAMLConfigList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringObservation) DeepCopyInto(out *MonitoringObservation) { + *out = *in + if in.RequestLogging != nil { + in, out := &in.RequestLogging, &out.RequestLogging + *out = new(RequestLoggingObservation) + (*in).DeepCopyInto(*out) } - return nil +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringObservation. +func (in *MonitoringObservation) DeepCopy() *MonitoringObservation { + if in == nil { + return nil + } + out := new(MonitoringObservation) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InboundSAMLConfigObservation) DeepCopyInto(out *InboundSAMLConfigObservation) { +func (in *MonitoringParameters) DeepCopyInto(out *MonitoringParameters) { *out = *in - if in.DisplayName != nil { - in, out := &in.DisplayName, &out.DisplayName - *out = new(string) - **out = **in - } - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - if in.IdPConfig != nil { - in, out := &in.IdPConfig, &out.IdPConfig - *out = make([]IdPConfigObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } - if in.Project != nil { - in, out := &in.Project, &out.Project - *out = new(string) - **out = **in - } - if in.SpConfig != nil { - in, out := &in.SpConfig, &out.SpConfig - *out = make([]SpConfigObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.RequestLogging != nil { + in, out := &in.RequestLogging, &out.RequestLogging + *out = new(RequestLoggingParameters) + (*in).DeepCopyInto(*out) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundSAMLConfigObservation. -func (in *InboundSAMLConfigObservation) DeepCopy() *InboundSAMLConfigObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringParameters. +func (in *MonitoringParameters) DeepCopy() *MonitoringParameters { if in == nil { return nil } - out := new(InboundSAMLConfigObservation) + out := new(MonitoringParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InboundSAMLConfigParameters) DeepCopyInto(out *InboundSAMLConfigParameters) { +func (in *MultiTenantInitParameters) DeepCopyInto(out *MultiTenantInitParameters) { *out = *in - if in.DisplayName != nil { - in, out := &in.DisplayName, &out.DisplayName - *out = new(string) - **out = **in - } - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled + if in.AllowTenants != nil { + in, out := &in.AllowTenants, &out.AllowTenants *out = new(bool) **out = **in } - if in.IdPConfig != nil { - in, out := &in.IdPConfig, &out.IdPConfig - *out = make([]IdPConfigParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } - if in.Project != nil { - in, out := &in.Project, &out.Project + if in.DefaultTenantLocation != nil { + in, out := &in.DefaultTenantLocation, &out.DefaultTenantLocation *out = new(string) **out = **in } - if in.SpConfig != nil { - in, out := &in.SpConfig, &out.SpConfig - *out = make([]SpConfigParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundSAMLConfigParameters. -func (in *InboundSAMLConfigParameters) DeepCopy() *InboundSAMLConfigParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiTenantInitParameters. +func (in *MultiTenantInitParameters) DeepCopy() *MultiTenantInitParameters { if in == nil { return nil } - out := new(InboundSAMLConfigParameters) + out := new(MultiTenantInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InboundSAMLConfigSpec) DeepCopyInto(out *InboundSAMLConfigSpec) { +func (in *MultiTenantObservation) DeepCopyInto(out *MultiTenantObservation) { *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) - in.InitProvider.DeepCopyInto(&out.InitProvider) + if in.AllowTenants != nil { + in, out := &in.AllowTenants, &out.AllowTenants + *out = new(bool) + **out = **in + } + if in.DefaultTenantLocation != nil { + in, out := &in.DefaultTenantLocation, &out.DefaultTenantLocation + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundSAMLConfigSpec. -func (in *InboundSAMLConfigSpec) DeepCopy() *InboundSAMLConfigSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiTenantObservation. +func (in *MultiTenantObservation) DeepCopy() *MultiTenantObservation { if in == nil { return nil } - out := new(InboundSAMLConfigSpec) + out := new(MultiTenantObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InboundSAMLConfigStatus) DeepCopyInto(out *InboundSAMLConfigStatus) { +func (in *MultiTenantParameters) DeepCopyInto(out *MultiTenantParameters) { *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - in.AtProvider.DeepCopyInto(&out.AtProvider) + if in.AllowTenants != nil { + in, out := &in.AllowTenants, &out.AllowTenants + *out = new(bool) + **out = **in + } + if in.DefaultTenantLocation != nil { + in, out := &in.DefaultTenantLocation, &out.DefaultTenantLocation + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundSAMLConfigStatus. -func (in *InboundSAMLConfigStatus) DeepCopy() *InboundSAMLConfigStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiTenantParameters. +func (in *MultiTenantParameters) DeepCopy() *MultiTenantParameters { if in == nil { return nil } - out := new(InboundSAMLConfigStatus) + out := new(MultiTenantParameters) in.DeepCopyInto(out) return out } @@ -1114,6 +2107,81 @@ func (in *OAuthIdPConfigStatus) DeepCopy() *OAuthIdPConfigStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsInitParameters) DeepCopyInto(out *PermissionsInitParameters) { + *out = *in + if in.DisabledUserDeletion != nil { + in, out := &in.DisabledUserDeletion, &out.DisabledUserDeletion + *out = new(bool) + **out = **in + } + if in.DisabledUserSignup != nil { + in, out := &in.DisabledUserSignup, &out.DisabledUserSignup + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsInitParameters. +func (in *PermissionsInitParameters) DeepCopy() *PermissionsInitParameters { + if in == nil { + return nil + } + out := new(PermissionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsObservation) DeepCopyInto(out *PermissionsObservation) { + *out = *in + if in.DisabledUserDeletion != nil { + in, out := &in.DisabledUserDeletion, &out.DisabledUserDeletion + *out = new(bool) + **out = **in + } + if in.DisabledUserSignup != nil { + in, out := &in.DisabledUserSignup, &out.DisabledUserSignup + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsObservation. +func (in *PermissionsObservation) DeepCopy() *PermissionsObservation { + if in == nil { + return nil + } + out := new(PermissionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsParameters) DeepCopyInto(out *PermissionsParameters) { + *out = *in + if in.DisabledUserDeletion != nil { + in, out := &in.DisabledUserDeletion, &out.DisabledUserDeletion + *out = new(bool) + **out = **in + } + if in.DisabledUserSignup != nil { + in, out := &in.DisabledUserSignup, &out.DisabledUserSignup + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsParameters. +func (in *PermissionsParameters) DeepCopy() *PermissionsParameters { + if in == nil { + return nil + } + out := new(PermissionsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PhoneNumberInitParameters) DeepCopyInto(out *PhoneNumberInitParameters) { *out = *in @@ -1223,186 +2291,271 @@ func (in *PhoneNumberParameters) DeepCopy() *PhoneNumberParameters { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfig) DeepCopyInto(out *ProjectDefaultConfig) { +func (in *ProviderConfigsInitParameters) DeepCopyInto(out *ProviderConfigsInitParameters) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.TotpProviderConfig != nil { + in, out := &in.TotpProviderConfig, &out.TotpProviderConfig + *out = new(TotpProviderConfigInitParameters) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfig. -func (in *ProjectDefaultConfig) DeepCopy() *ProjectDefaultConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigsInitParameters. +func (in *ProviderConfigsInitParameters) DeepCopy() *ProviderConfigsInitParameters { if in == nil { return nil } - out := new(ProjectDefaultConfig) + out := new(ProviderConfigsInitParameters) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ProjectDefaultConfig) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigsObservation) DeepCopyInto(out *ProviderConfigsObservation) { + *out = *in + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in } - return nil + if in.TotpProviderConfig != nil { + in, out := &in.TotpProviderConfig, &out.TotpProviderConfig + *out = new(TotpProviderConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigsObservation. +func (in *ProviderConfigsObservation) DeepCopy() *ProviderConfigsObservation { + if in == nil { + return nil + } + out := new(ProviderConfigsObservation) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigInitParameters) DeepCopyInto(out *ProjectDefaultConfigInitParameters) { +func (in *ProviderConfigsParameters) DeepCopyInto(out *ProviderConfigsParameters) { *out = *in - if in.Project != nil { - in, out := &in.Project, &out.Project + if in.State != nil { + in, out := &in.State, &out.State *out = new(string) **out = **in } - if in.SignIn != nil { - in, out := &in.SignIn, &out.SignIn - *out = make([]SignInInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.TotpProviderConfig != nil { + in, out := &in.TotpProviderConfig, &out.TotpProviderConfig + *out = new(TotpProviderConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigsParameters. +func (in *ProviderConfigsParameters) DeepCopy() *ProviderConfigsParameters { + if in == nil { + return nil + } + out := new(ProviderConfigsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaInitParameters) DeepCopyInto(out *QuotaInitParameters) { + *out = *in + if in.SignUpQuotaConfig != nil { + in, out := &in.SignUpQuotaConfig, &out.SignUpQuotaConfig + *out = new(SignUpQuotaConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaInitParameters. +func (in *QuotaInitParameters) DeepCopy() *QuotaInitParameters { + if in == nil { + return nil + } + out := new(QuotaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaObservation) DeepCopyInto(out *QuotaObservation) { + *out = *in + if in.SignUpQuotaConfig != nil { + in, out := &in.SignUpQuotaConfig, &out.SignUpQuotaConfig + *out = new(SignUpQuotaConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaObservation. +func (in *QuotaObservation) DeepCopy() *QuotaObservation { + if in == nil { + return nil + } + out := new(QuotaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaParameters) DeepCopyInto(out *QuotaParameters) { + *out = *in + if in.SignUpQuotaConfig != nil { + in, out := &in.SignUpQuotaConfig, &out.SignUpQuotaConfig + *out = new(SignUpQuotaConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaParameters. +func (in *QuotaParameters) DeepCopy() *QuotaParameters { + if in == nil { + return nil + } + out := new(QuotaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestLoggingInitParameters) DeepCopyInto(out *RequestLoggingInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigInitParameters. -func (in *ProjectDefaultConfigInitParameters) DeepCopy() *ProjectDefaultConfigInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestLoggingInitParameters. +func (in *RequestLoggingInitParameters) DeepCopy() *RequestLoggingInitParameters { if in == nil { return nil } - out := new(ProjectDefaultConfigInitParameters) + out := new(RequestLoggingInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigList) DeepCopyInto(out *ProjectDefaultConfigList) { +func (in *RequestLoggingObservation) DeepCopyInto(out *RequestLoggingObservation) { *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ProjectDefaultConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigList. -func (in *ProjectDefaultConfigList) DeepCopy() *ProjectDefaultConfigList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestLoggingObservation. +func (in *RequestLoggingObservation) DeepCopy() *RequestLoggingObservation { if in == nil { return nil } - out := new(ProjectDefaultConfigList) + out := new(RequestLoggingObservation) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ProjectDefaultConfigList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigObservation) DeepCopyInto(out *ProjectDefaultConfigObservation) { +func (in *RequestLoggingParameters) DeepCopyInto(out *RequestLoggingParameters) { *out = *in - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } - if in.Project != nil { - in, out := &in.Project, &out.Project - *out = new(string) + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) **out = **in } - if in.SignIn != nil { - in, out := &in.SignIn, &out.SignIn - *out = make([]SignInObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigObservation. -func (in *ProjectDefaultConfigObservation) DeepCopy() *ProjectDefaultConfigObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestLoggingParameters. +func (in *RequestLoggingParameters) DeepCopy() *RequestLoggingParameters { if in == nil { return nil } - out := new(ProjectDefaultConfigObservation) + out := new(RequestLoggingParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigParameters) DeepCopyInto(out *ProjectDefaultConfigParameters) { +func (in *SMSRegionConfigInitParameters) DeepCopyInto(out *SMSRegionConfigInitParameters) { *out = *in - if in.Project != nil { - in, out := &in.Project, &out.Project - *out = new(string) - **out = **in + if in.AllowByDefault != nil { + in, out := &in.AllowByDefault, &out.AllowByDefault + *out = new(AllowByDefaultInitParameters) + (*in).DeepCopyInto(*out) } - if in.SignIn != nil { - in, out := &in.SignIn, &out.SignIn - *out = make([]SignInParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.AllowlistOnly != nil { + in, out := &in.AllowlistOnly, &out.AllowlistOnly + *out = new(AllowlistOnlyInitParameters) + (*in).DeepCopyInto(*out) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigParameters. -func (in *ProjectDefaultConfigParameters) DeepCopy() *ProjectDefaultConfigParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMSRegionConfigInitParameters. +func (in *SMSRegionConfigInitParameters) DeepCopy() *SMSRegionConfigInitParameters { if in == nil { return nil } - out := new(ProjectDefaultConfigParameters) + out := new(SMSRegionConfigInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigSpec) DeepCopyInto(out *ProjectDefaultConfigSpec) { +func (in *SMSRegionConfigObservation) DeepCopyInto(out *SMSRegionConfigObservation) { *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) - in.InitProvider.DeepCopyInto(&out.InitProvider) + if in.AllowByDefault != nil { + in, out := &in.AllowByDefault, &out.AllowByDefault + *out = new(AllowByDefaultObservation) + (*in).DeepCopyInto(*out) + } + if in.AllowlistOnly != nil { + in, out := &in.AllowlistOnly, &out.AllowlistOnly + *out = new(AllowlistOnlyObservation) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigSpec. -func (in *ProjectDefaultConfigSpec) DeepCopy() *ProjectDefaultConfigSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMSRegionConfigObservation. +func (in *SMSRegionConfigObservation) DeepCopy() *SMSRegionConfigObservation { if in == nil { return nil } - out := new(ProjectDefaultConfigSpec) + out := new(SMSRegionConfigObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigStatus) DeepCopyInto(out *ProjectDefaultConfigStatus) { +func (in *SMSRegionConfigParameters) DeepCopyInto(out *SMSRegionConfigParameters) { *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - in.AtProvider.DeepCopyInto(&out.AtProvider) + if in.AllowByDefault != nil { + in, out := &in.AllowByDefault, &out.AllowByDefault + *out = new(AllowByDefaultParameters) + (*in).DeepCopyInto(*out) + } + if in.AllowlistOnly != nil { + in, out := &in.AllowlistOnly, &out.AllowlistOnly + *out = new(AllowlistOnlyParameters) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigStatus. -func (in *ProjectDefaultConfigStatus) DeepCopy() *ProjectDefaultConfigStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMSRegionConfigParameters. +func (in *SMSRegionConfigParameters) DeepCopy() *SMSRegionConfigParameters { if in == nil { return nil } - out := new(ProjectDefaultConfigStatus) + out := new(SMSRegionConfigParameters) in.DeepCopyInto(out) return out } @@ -1417,24 +2570,18 @@ func (in *SignInInitParameters) DeepCopyInto(out *SignInInitParameters) { } if in.Anonymous != nil { in, out := &in.Anonymous, &out.Anonymous - *out = make([]AnonymousInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + *out = new(AnonymousInitParameters) + (*in).DeepCopyInto(*out) } if in.Email != nil { in, out := &in.Email, &out.Email - *out = make([]EmailInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + *out = new(EmailInitParameters) + (*in).DeepCopyInto(*out) } if in.PhoneNumber != nil { in, out := &in.PhoneNumber, &out.PhoneNumber - *out = make([]PhoneNumberInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + *out = new(PhoneNumberInitParameters) + (*in).DeepCopyInto(*out) } } @@ -1458,17 +2605,13 @@ func (in *SignInObservation) DeepCopyInto(out *SignInObservation) { } if in.Anonymous != nil { in, out := &in.Anonymous, &out.Anonymous - *out = make([]AnonymousObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + *out = new(AnonymousObservation) + (*in).DeepCopyInto(*out) } if in.Email != nil { in, out := &in.Email, &out.Email - *out = make([]EmailObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + *out = new(EmailObservation) + (*in).DeepCopyInto(*out) } if in.HashConfig != nil { in, out := &in.HashConfig, &out.HashConfig @@ -1479,10 +2622,8 @@ func (in *SignInObservation) DeepCopyInto(out *SignInObservation) { } if in.PhoneNumber != nil { in, out := &in.PhoneNumber, &out.PhoneNumber - *out = make([]PhoneNumberObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + *out = new(PhoneNumberObservation) + (*in).DeepCopyInto(*out) } } @@ -1506,24 +2647,18 @@ func (in *SignInParameters) DeepCopyInto(out *SignInParameters) { } if in.Anonymous != nil { in, out := &in.Anonymous, &out.Anonymous - *out = make([]AnonymousParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + *out = new(AnonymousParameters) + (*in).DeepCopyInto(*out) } if in.Email != nil { in, out := &in.Email, &out.Email - *out = make([]EmailParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + *out = new(EmailParameters) + (*in).DeepCopyInto(*out) } if in.PhoneNumber != nil { in, out := &in.PhoneNumber, &out.PhoneNumber - *out = make([]PhoneNumberParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + *out = new(PhoneNumberParameters) + (*in).DeepCopyInto(*out) } } @@ -1537,6 +2672,96 @@ func (in *SignInParameters) DeepCopy() *SignInParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignUpQuotaConfigInitParameters) DeepCopyInto(out *SignUpQuotaConfigInitParameters) { + *out = *in + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(float64) + **out = **in + } + if in.QuotaDuration != nil { + in, out := &in.QuotaDuration, &out.QuotaDuration + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignUpQuotaConfigInitParameters. +func (in *SignUpQuotaConfigInitParameters) DeepCopy() *SignUpQuotaConfigInitParameters { + if in == nil { + return nil + } + out := new(SignUpQuotaConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignUpQuotaConfigObservation) DeepCopyInto(out *SignUpQuotaConfigObservation) { + *out = *in + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(float64) + **out = **in + } + if in.QuotaDuration != nil { + in, out := &in.QuotaDuration, &out.QuotaDuration + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignUpQuotaConfigObservation. +func (in *SignUpQuotaConfigObservation) DeepCopy() *SignUpQuotaConfigObservation { + if in == nil { + return nil + } + out := new(SignUpQuotaConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignUpQuotaConfigParameters) DeepCopyInto(out *SignUpQuotaConfigParameters) { + *out = *in + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(float64) + **out = **in + } + if in.QuotaDuration != nil { + in, out := &in.QuotaDuration, &out.QuotaDuration + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignUpQuotaConfigParameters. +func (in *SignUpQuotaConfigParameters) DeepCopy() *SignUpQuotaConfigParameters { + if in == nil { + return nil + } + out := new(SignUpQuotaConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SpCertificatesInitParameters) DeepCopyInto(out *SpCertificatesInitParameters) { *out = *in @@ -2915,3 +4140,143 @@ func (in *TenantStatus) DeepCopy() *TenantStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TotpProviderConfigInitParameters) DeepCopyInto(out *TotpProviderConfigInitParameters) { + *out = *in + if in.AdjacentIntervals != nil { + in, out := &in.AdjacentIntervals, &out.AdjacentIntervals + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TotpProviderConfigInitParameters. +func (in *TotpProviderConfigInitParameters) DeepCopy() *TotpProviderConfigInitParameters { + if in == nil { + return nil + } + out := new(TotpProviderConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TotpProviderConfigObservation) DeepCopyInto(out *TotpProviderConfigObservation) { + *out = *in + if in.AdjacentIntervals != nil { + in, out := &in.AdjacentIntervals, &out.AdjacentIntervals + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TotpProviderConfigObservation. +func (in *TotpProviderConfigObservation) DeepCopy() *TotpProviderConfigObservation { + if in == nil { + return nil + } + out := new(TotpProviderConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TotpProviderConfigParameters) DeepCopyInto(out *TotpProviderConfigParameters) { + *out = *in + if in.AdjacentIntervals != nil { + in, out := &in.AdjacentIntervals, &out.AdjacentIntervals + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TotpProviderConfigParameters. +func (in *TotpProviderConfigParameters) DeepCopy() *TotpProviderConfigParameters { + if in == nil { + return nil + } + out := new(TotpProviderConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggersInitParameters) DeepCopyInto(out *TriggersInitParameters) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.FunctionURI != nil { + in, out := &in.FunctionURI, &out.FunctionURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggersInitParameters. +func (in *TriggersInitParameters) DeepCopy() *TriggersInitParameters { + if in == nil { + return nil + } + out := new(TriggersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggersObservation) DeepCopyInto(out *TriggersObservation) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.FunctionURI != nil { + in, out := &in.FunctionURI, &out.FunctionURI + *out = new(string) + **out = **in + } + if in.UpdateTime != nil { + in, out := &in.UpdateTime, &out.UpdateTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggersObservation. +func (in *TriggersObservation) DeepCopy() *TriggersObservation { + if in == nil { + return nil + } + out := new(TriggersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggersParameters) DeepCopyInto(out *TriggersParameters) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.FunctionURI != nil { + in, out := &in.FunctionURI, &out.FunctionURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggersParameters. +func (in *TriggersParameters) DeepCopy() *TriggersParameters { + if in == nil { + return nil + } + out := new(TriggersParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/identityplatform/v1beta1/zz_generated.managed.go b/apis/identityplatform/v1beta1/zz_generated.managed.go index e3296ca7f..1bf0f3462 100644 --- a/apis/identityplatform/v1beta1/zz_generated.managed.go +++ b/apis/identityplatform/v1beta1/zz_generated.managed.go @@ -7,6 +7,66 @@ package v1beta1 import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +// GetCondition of this Config. +func (mg *Config) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Config. +func (mg *Config) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Config. +func (mg *Config) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Config. +func (mg *Config) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Config. +func (mg *Config) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Config. +func (mg *Config) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Config. +func (mg *Config) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Config. +func (mg *Config) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Config. +func (mg *Config) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Config. +func (mg *Config) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Config. +func (mg *Config) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Config. +func (mg *Config) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this DefaultSupportedIdPConfig. func (mg *DefaultSupportedIdPConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -187,66 +247,6 @@ func (mg *OAuthIdPConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretRefe mg.Spec.WriteConnectionSecretToReference = r } -// GetCondition of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetManagementPolicies of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetManagementPolicies() xpv1.ManagementPolicies { - return mg.Spec.ManagementPolicies -} - -// GetProviderConfigReference of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -// GetPublishConnectionDetailsTo of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { - return mg.Spec.PublishConnectionDetailsTo -} - -// GetWriteConnectionSecretToReference of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetManagementPolicies of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { - mg.Spec.ManagementPolicies = r -} - -// SetProviderConfigReference of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -// SetPublishConnectionDetailsTo of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { - mg.Spec.PublishConnectionDetailsTo = r -} - -// SetWriteConnectionSecretToReference of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} - // GetCondition of this Tenant. func (mg *Tenant) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) diff --git a/apis/identityplatform/v1beta1/zz_generated.managedlist.go b/apis/identityplatform/v1beta1/zz_generated.managedlist.go index e65371738..7e876a8d2 100644 --- a/apis/identityplatform/v1beta1/zz_generated.managedlist.go +++ b/apis/identityplatform/v1beta1/zz_generated.managedlist.go @@ -7,8 +7,8 @@ package v1beta1 import resource "github.com/crossplane/crossplane-runtime/pkg/resource" -// GetItems of this DefaultSupportedIdPConfigList. -func (l *DefaultSupportedIdPConfigList) GetItems() []resource.Managed { +// GetItems of this ConfigList. +func (l *ConfigList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) for i := range l.Items { items[i] = &l.Items[i] @@ -16,8 +16,8 @@ func (l *DefaultSupportedIdPConfigList) GetItems() []resource.Managed { return items } -// GetItems of this InboundSAMLConfigList. -func (l *InboundSAMLConfigList) GetItems() []resource.Managed { +// GetItems of this DefaultSupportedIdPConfigList. +func (l *DefaultSupportedIdPConfigList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) for i := range l.Items { items[i] = &l.Items[i] @@ -25,8 +25,8 @@ func (l *InboundSAMLConfigList) GetItems() []resource.Managed { return items } -// GetItems of this OAuthIdPConfigList. -func (l *OAuthIdPConfigList) GetItems() []resource.Managed { +// GetItems of this InboundSAMLConfigList. +func (l *InboundSAMLConfigList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) for i := range l.Items { items[i] = &l.Items[i] @@ -34,8 +34,8 @@ func (l *OAuthIdPConfigList) GetItems() []resource.Managed { return items } -// GetItems of this ProjectDefaultConfigList. -func (l *ProjectDefaultConfigList) GetItems() []resource.Managed { +// GetItems of this OAuthIdPConfigList. +func (l *OAuthIdPConfigList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) for i := range l.Items { items[i] = &l.Items[i] diff --git a/apis/identityplatform/v1beta1/zz_generated.resolvers.go b/apis/identityplatform/v1beta1/zz_generated.resolvers.go index d3a485892..f2fb6850d 100644 --- a/apis/identityplatform/v1beta1/zz_generated.resolvers.go +++ b/apis/identityplatform/v1beta1/zz_generated.resolvers.go @@ -9,15 +9,67 @@ package v1beta1 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" - apisresolver "github.com/upbound/provider-gcp/internal/apis" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Config. + apisresolver "github.com/upbound/provider-gcp/internal/apis" ) -func (mg *TenantDefaultSupportedIdPConfig) ResolveReferences( // ResolveReferences of this TenantDefaultSupportedIdPConfig. - ctx context.Context, c client.Reader) error { +func (mg *Config) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cloudplatform.gcp.upbound.io", "v1beta1", "Project", "ProjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Project), + Extract: resource.ExtractParamPath("project_id", false), + Reference: mg.Spec.ForProvider.ProjectRef, + Selector: mg.Spec.ForProvider.ProjectSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Project") + } + mg.Spec.ForProvider.Project = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ProjectRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cloudplatform.gcp.upbound.io", "v1beta1", "Project", "ProjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Project), + Extract: resource.ExtractParamPath("project_id", false), + Reference: mg.Spec.InitProvider.ProjectRef, + Selector: mg.Spec.InitProvider.ProjectSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Project") + } + mg.Spec.InitProvider.Project = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ProjectRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this TenantDefaultSupportedIdPConfig. +func (mg *TenantDefaultSupportedIdPConfig) ResolveReferences(ctx context.Context, c client.Reader) error { var m xpresource.Managed var l xpresource.ManagedList r := reference.NewAPIResolver(c, mg) diff --git a/apis/identityplatform/v1beta1/zz_projectdefaultconfig_terraformed.go b/apis/identityplatform/v1beta1/zz_projectdefaultconfig_terraformed.go deleted file mode 100755 index 13ffdefa1..000000000 --- a/apis/identityplatform/v1beta1/zz_projectdefaultconfig_terraformed.go +++ /dev/null @@ -1,129 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -import ( - "dario.cat/mergo" - "github.com/pkg/errors" - - "github.com/crossplane/upjet/pkg/resource" - "github.com/crossplane/upjet/pkg/resource/json" -) - -// GetTerraformResourceType returns Terraform resource type for this ProjectDefaultConfig -func (mg *ProjectDefaultConfig) GetTerraformResourceType() string { - return "google_identity_platform_project_default_config" -} - -// GetConnectionDetailsMapping for this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetConnectionDetailsMapping() map[string]string { - return nil -} - -// GetObservation of this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) -} - -// SetObservation for this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) -} - -// GetID returns ID of underlying Terraform resource of this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID -} - -// GetParameters of this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) -} - -// SetParameters for this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) -} - -// GetInitParameters of this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) -} - -// GetInitParameters of this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil -} - -// LateInitialize this ProjectDefaultConfig using its observed tfState. -// returns True if there are any spec changes for the resource. -func (tr *ProjectDefaultConfig) LateInitialize(attrs []byte) (bool, error) { - params := &ProjectDefaultConfigParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) -} - -// GetTerraformSchemaVersion returns the associated Terraform schema version -func (tr *ProjectDefaultConfig) GetTerraformSchemaVersion() int { - return 0 -} diff --git a/apis/identityplatform/v1beta1/zz_projectdefaultconfig_types.go b/apis/identityplatform/v1beta1/zz_projectdefaultconfig_types.go deleted file mode 100755 index 4dc60830f..000000000 --- a/apis/identityplatform/v1beta1/zz_projectdefaultconfig_types.go +++ /dev/null @@ -1,294 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - - v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -type AnonymousInitParameters struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -} - -type AnonymousObservation struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -} - -type AnonymousParameters struct { - - // Whether phone number auth is enabled for the project or not. - // +kubebuilder:validation:Optional - Enabled *bool `json:"enabled" tf:"enabled,omitempty"` -} - -type EmailInitParameters struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // Whether a password is required for email auth or not. If true, both an email and - // password must be provided to sign in. If false, a user may sign in via either - // email/password or email link. - PasswordRequired *bool `json:"passwordRequired,omitempty" tf:"password_required,omitempty"` -} - -type EmailObservation struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // Whether a password is required for email auth or not. If true, both an email and - // password must be provided to sign in. If false, a user may sign in via either - // email/password or email link. - PasswordRequired *bool `json:"passwordRequired,omitempty" tf:"password_required,omitempty"` -} - -type EmailParameters struct { - - // Whether phone number auth is enabled for the project or not. - // +kubebuilder:validation:Optional - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // Whether a password is required for email auth or not. If true, both an email and - // password must be provided to sign in. If false, a user may sign in via either - // email/password or email link. - // +kubebuilder:validation:Optional - PasswordRequired *bool `json:"passwordRequired,omitempty" tf:"password_required,omitempty"` -} - -type HashConfigInitParameters struct { -} - -type HashConfigObservation struct { - - // (Output) - // Different password hash algorithms used in Identity Toolkit. - Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` - - // (Output) - // Memory cost for hash calculation. Used by scrypt and other similar password derivation algorithms. See https://tools.ietf.org/html/rfc7914 for explanation of field. - MemoryCost *float64 `json:"memoryCost,omitempty" tf:"memory_cost,omitempty"` - - // (Output) - // How many rounds for hash calculation. Used by scrypt and other similar password derivation algorithms. - Rounds *float64 `json:"rounds,omitempty" tf:"rounds,omitempty"` - - // (Output) - // Non-printable character to be inserted between the salt and plain text password in base64. - SaltSeparator *string `json:"saltSeparator,omitempty" tf:"salt_separator,omitempty"` - - // (Output) - // Signer key in base64. - SignerKey *string `json:"signerKey,omitempty" tf:"signer_key,omitempty"` -} - -type HashConfigParameters struct { -} - -type PhoneNumberInitParameters struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // A map of that can be used for phone auth testing. - // +mapType=granular - TestPhoneNumbers map[string]*string `json:"testPhoneNumbers,omitempty" tf:"test_phone_numbers,omitempty"` -} - -type PhoneNumberObservation struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // A map of that can be used for phone auth testing. - // +mapType=granular - TestPhoneNumbers map[string]*string `json:"testPhoneNumbers,omitempty" tf:"test_phone_numbers,omitempty"` -} - -type PhoneNumberParameters struct { - - // Whether phone number auth is enabled for the project or not. - // +kubebuilder:validation:Optional - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // A map of that can be used for phone auth testing. - // +kubebuilder:validation:Optional - // +mapType=granular - TestPhoneNumbers map[string]*string `json:"testPhoneNumbers,omitempty" tf:"test_phone_numbers,omitempty"` -} - -type ProjectDefaultConfigInitParameters struct { - - // The ID of the project in which the resource belongs. - // If it is not provided, the provider project is used. - Project *string `json:"project,omitempty" tf:"project,omitempty"` - - // Configuration related to local sign in methods. - // Structure is documented below. - SignIn []SignInInitParameters `json:"signIn,omitempty" tf:"sign_in,omitempty"` -} - -type ProjectDefaultConfigObservation struct { - - // an identifier for the resource with format {{project}} - ID *string `json:"id,omitempty" tf:"id,omitempty"` - - // The name of the Config resource. Example: "projects/my-awesome-project/config" - Name *string `json:"name,omitempty" tf:"name,omitempty"` - - // The ID of the project in which the resource belongs. - // If it is not provided, the provider project is used. - Project *string `json:"project,omitempty" tf:"project,omitempty"` - - // Configuration related to local sign in methods. - // Structure is documented below. - SignIn []SignInObservation `json:"signIn,omitempty" tf:"sign_in,omitempty"` -} - -type ProjectDefaultConfigParameters struct { - - // The ID of the project in which the resource belongs. - // If it is not provided, the provider project is used. - // +kubebuilder:validation:Optional - Project *string `json:"project,omitempty" tf:"project,omitempty"` - - // Configuration related to local sign in methods. - // Structure is documented below. - // +kubebuilder:validation:Optional - SignIn []SignInParameters `json:"signIn,omitempty" tf:"sign_in,omitempty"` -} - -type SignInInitParameters struct { - - // Whether to allow more than one account to have the same email. - AllowDuplicateEmails *bool `json:"allowDuplicateEmails,omitempty" tf:"allow_duplicate_emails,omitempty"` - - // Configuration options related to authenticating an anonymous user. - // Structure is documented below. - Anonymous []AnonymousInitParameters `json:"anonymous,omitempty" tf:"anonymous,omitempty"` - - // Configuration options related to authenticating a user by their email address. - // Structure is documented below. - Email []EmailInitParameters `json:"email,omitempty" tf:"email,omitempty"` - - // Configuration options related to authenticated a user by their phone number. - // Structure is documented below. - PhoneNumber []PhoneNumberInitParameters `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` -} - -type SignInObservation struct { - - // Whether to allow more than one account to have the same email. - AllowDuplicateEmails *bool `json:"allowDuplicateEmails,omitempty" tf:"allow_duplicate_emails,omitempty"` - - // Configuration options related to authenticating an anonymous user. - // Structure is documented below. - Anonymous []AnonymousObservation `json:"anonymous,omitempty" tf:"anonymous,omitempty"` - - // Configuration options related to authenticating a user by their email address. - // Structure is documented below. - Email []EmailObservation `json:"email,omitempty" tf:"email,omitempty"` - - // (Output) - // Output only. Hash config information. - // Structure is documented below. - HashConfig []HashConfigObservation `json:"hashConfig,omitempty" tf:"hash_config,omitempty"` - - // Configuration options related to authenticated a user by their phone number. - // Structure is documented below. - PhoneNumber []PhoneNumberObservation `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` -} - -type SignInParameters struct { - - // Whether to allow more than one account to have the same email. - // +kubebuilder:validation:Optional - AllowDuplicateEmails *bool `json:"allowDuplicateEmails,omitempty" tf:"allow_duplicate_emails,omitempty"` - - // Configuration options related to authenticating an anonymous user. - // Structure is documented below. - // +kubebuilder:validation:Optional - Anonymous []AnonymousParameters `json:"anonymous,omitempty" tf:"anonymous,omitempty"` - - // Configuration options related to authenticating a user by their email address. - // Structure is documented below. - // +kubebuilder:validation:Optional - Email []EmailParameters `json:"email,omitempty" tf:"email,omitempty"` - - // Configuration options related to authenticated a user by their phone number. - // Structure is documented below. - // +kubebuilder:validation:Optional - PhoneNumber []PhoneNumberParameters `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` -} - -// ProjectDefaultConfigSpec defines the desired state of ProjectDefaultConfig -type ProjectDefaultConfigSpec struct { - v1.ResourceSpec `json:",inline"` - ForProvider ProjectDefaultConfigParameters `json:"forProvider"` - // THIS IS A BETA FIELD. It will be honored - // unless the Management Policies feature flag is disabled. - // InitProvider holds the same fields as ForProvider, with the exception - // of Identifier and other resource reference fields. The fields that are - // in InitProvider are merged into ForProvider when the resource is created. - // The same fields are also added to the terraform ignore_changes hook, to - // avoid updating them after creation. This is useful for fields that are - // required on creation, but we do not desire to update them after creation, - // for example because of an external controller is managing them, like an - // autoscaler. - InitProvider ProjectDefaultConfigInitParameters `json:"initProvider,omitempty"` -} - -// ProjectDefaultConfigStatus defines the observed state of ProjectDefaultConfig. -type ProjectDefaultConfigStatus struct { - v1.ResourceStatus `json:",inline"` - AtProvider ProjectDefaultConfigObservation `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:storageversion - -// ProjectDefaultConfig is the Schema for the ProjectDefaultConfigs API. There is no persistent data associated with this resource. -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} -type ProjectDefaultConfig struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ProjectDefaultConfigSpec `json:"spec"` - Status ProjectDefaultConfigStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// ProjectDefaultConfigList contains a list of ProjectDefaultConfigs -type ProjectDefaultConfigList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []ProjectDefaultConfig `json:"items"` -} - -// Repository type metadata. -var ( - ProjectDefaultConfig_Kind = "ProjectDefaultConfig" - ProjectDefaultConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDefaultConfig_Kind}.String() - ProjectDefaultConfig_KindAPIVersion = ProjectDefaultConfig_Kind + "." + CRDGroupVersion.String() - ProjectDefaultConfig_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDefaultConfig_Kind) -) - -func init() { - SchemeBuilder.Register(&ProjectDefaultConfig{}, &ProjectDefaultConfigList{}) -} diff --git a/apis/identityplatform/v1beta2/zz_generated.conversion_hubs.go b/apis/identityplatform/v1beta2/zz_generated.conversion_hubs.go index f39f927ab..e26309bb1 100755 --- a/apis/identityplatform/v1beta2/zz_generated.conversion_hubs.go +++ b/apis/identityplatform/v1beta2/zz_generated.conversion_hubs.go @@ -9,8 +9,5 @@ package v1beta2 // Hub marks this type as a conversion hub. func (tr *InboundSAMLConfig) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ProjectDefaultConfig) Hub() {} - // Hub marks this type as a conversion hub. func (tr *TenantInboundSAMLConfig) Hub() {} diff --git a/apis/identityplatform/v1beta2/zz_generated.deepcopy.go b/apis/identityplatform/v1beta2/zz_generated.deepcopy.go index e14aac6c6..c9c33fa73 100644 --- a/apis/identityplatform/v1beta2/zz_generated.deepcopy.go +++ b/apis/identityplatform/v1beta2/zz_generated.deepcopy.go @@ -13,211 +13,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AnonymousInitParameters) DeepCopyInto(out *AnonymousInitParameters) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousInitParameters. -func (in *AnonymousInitParameters) DeepCopy() *AnonymousInitParameters { - if in == nil { - return nil - } - out := new(AnonymousInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AnonymousObservation) DeepCopyInto(out *AnonymousObservation) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousObservation. -func (in *AnonymousObservation) DeepCopy() *AnonymousObservation { - if in == nil { - return nil - } - out := new(AnonymousObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AnonymousParameters) DeepCopyInto(out *AnonymousParameters) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousParameters. -func (in *AnonymousParameters) DeepCopy() *AnonymousParameters { - if in == nil { - return nil - } - out := new(AnonymousParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmailInitParameters) DeepCopyInto(out *EmailInitParameters) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.PasswordRequired != nil { - in, out := &in.PasswordRequired, &out.PasswordRequired - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailInitParameters. -func (in *EmailInitParameters) DeepCopy() *EmailInitParameters { - if in == nil { - return nil - } - out := new(EmailInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmailObservation) DeepCopyInto(out *EmailObservation) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.PasswordRequired != nil { - in, out := &in.PasswordRequired, &out.PasswordRequired - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailObservation. -func (in *EmailObservation) DeepCopy() *EmailObservation { - if in == nil { - return nil - } - out := new(EmailObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmailParameters) DeepCopyInto(out *EmailParameters) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.PasswordRequired != nil { - in, out := &in.PasswordRequired, &out.PasswordRequired - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailParameters. -func (in *EmailParameters) DeepCopy() *EmailParameters { - if in == nil { - return nil - } - out := new(EmailParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HashConfigInitParameters) DeepCopyInto(out *HashConfigInitParameters) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HashConfigInitParameters. -func (in *HashConfigInitParameters) DeepCopy() *HashConfigInitParameters { - if in == nil { - return nil - } - out := new(HashConfigInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HashConfigObservation) DeepCopyInto(out *HashConfigObservation) { - *out = *in - if in.Algorithm != nil { - in, out := &in.Algorithm, &out.Algorithm - *out = new(string) - **out = **in - } - if in.MemoryCost != nil { - in, out := &in.MemoryCost, &out.MemoryCost - *out = new(float64) - **out = **in - } - if in.Rounds != nil { - in, out := &in.Rounds, &out.Rounds - *out = new(float64) - **out = **in - } - if in.SaltSeparator != nil { - in, out := &in.SaltSeparator, &out.SaltSeparator - *out = new(string) - **out = **in - } - if in.SignerKey != nil { - in, out := &in.SignerKey, &out.SignerKey - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HashConfigObservation. -func (in *HashConfigObservation) DeepCopy() *HashConfigObservation { - if in == nil { - return nil - } - out := new(HashConfigObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HashConfigParameters) DeepCopyInto(out *HashConfigParameters) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HashConfigParameters. -func (in *HashConfigParameters) DeepCopy() *HashConfigParameters { - if in == nil { - return nil - } - out := new(HashConfigParameters) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IdPCertificatesInitParameters) DeepCopyInto(out *IdPCertificatesInitParameters) { *out = *in @@ -673,405 +468,6 @@ func (in *InboundSAMLConfigStatus) DeepCopy() *InboundSAMLConfigStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PhoneNumberInitParameters) DeepCopyInto(out *PhoneNumberInitParameters) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.TestPhoneNumbers != nil { - in, out := &in.TestPhoneNumbers, &out.TestPhoneNumbers - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - inVal := (*in)[key] - in, out := &inVal, &outVal - *out = new(string) - **out = **in - } - (*out)[key] = outVal - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhoneNumberInitParameters. -func (in *PhoneNumberInitParameters) DeepCopy() *PhoneNumberInitParameters { - if in == nil { - return nil - } - out := new(PhoneNumberInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PhoneNumberObservation) DeepCopyInto(out *PhoneNumberObservation) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.TestPhoneNumbers != nil { - in, out := &in.TestPhoneNumbers, &out.TestPhoneNumbers - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - inVal := (*in)[key] - in, out := &inVal, &outVal - *out = new(string) - **out = **in - } - (*out)[key] = outVal - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhoneNumberObservation. -func (in *PhoneNumberObservation) DeepCopy() *PhoneNumberObservation { - if in == nil { - return nil - } - out := new(PhoneNumberObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PhoneNumberParameters) DeepCopyInto(out *PhoneNumberParameters) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.TestPhoneNumbers != nil { - in, out := &in.TestPhoneNumbers, &out.TestPhoneNumbers - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - inVal := (*in)[key] - in, out := &inVal, &outVal - *out = new(string) - **out = **in - } - (*out)[key] = outVal - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhoneNumberParameters. -func (in *PhoneNumberParameters) DeepCopy() *PhoneNumberParameters { - if in == nil { - return nil - } - out := new(PhoneNumberParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfig) DeepCopyInto(out *ProjectDefaultConfig) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfig. -func (in *ProjectDefaultConfig) DeepCopy() *ProjectDefaultConfig { - if in == nil { - return nil - } - out := new(ProjectDefaultConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ProjectDefaultConfig) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigInitParameters) DeepCopyInto(out *ProjectDefaultConfigInitParameters) { - *out = *in - if in.Project != nil { - in, out := &in.Project, &out.Project - *out = new(string) - **out = **in - } - if in.SignIn != nil { - in, out := &in.SignIn, &out.SignIn - *out = new(SignInInitParameters) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigInitParameters. -func (in *ProjectDefaultConfigInitParameters) DeepCopy() *ProjectDefaultConfigInitParameters { - if in == nil { - return nil - } - out := new(ProjectDefaultConfigInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigList) DeepCopyInto(out *ProjectDefaultConfigList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ProjectDefaultConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigList. -func (in *ProjectDefaultConfigList) DeepCopy() *ProjectDefaultConfigList { - if in == nil { - return nil - } - out := new(ProjectDefaultConfigList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ProjectDefaultConfigList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigObservation) DeepCopyInto(out *ProjectDefaultConfigObservation) { - *out = *in - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } - if in.Project != nil { - in, out := &in.Project, &out.Project - *out = new(string) - **out = **in - } - if in.SignIn != nil { - in, out := &in.SignIn, &out.SignIn - *out = new(SignInObservation) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigObservation. -func (in *ProjectDefaultConfigObservation) DeepCopy() *ProjectDefaultConfigObservation { - if in == nil { - return nil - } - out := new(ProjectDefaultConfigObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigParameters) DeepCopyInto(out *ProjectDefaultConfigParameters) { - *out = *in - if in.Project != nil { - in, out := &in.Project, &out.Project - *out = new(string) - **out = **in - } - if in.SignIn != nil { - in, out := &in.SignIn, &out.SignIn - *out = new(SignInParameters) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigParameters. -func (in *ProjectDefaultConfigParameters) DeepCopy() *ProjectDefaultConfigParameters { - if in == nil { - return nil - } - out := new(ProjectDefaultConfigParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigSpec) DeepCopyInto(out *ProjectDefaultConfigSpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) - in.InitProvider.DeepCopyInto(&out.InitProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigSpec. -func (in *ProjectDefaultConfigSpec) DeepCopy() *ProjectDefaultConfigSpec { - if in == nil { - return nil - } - out := new(ProjectDefaultConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectDefaultConfigStatus) DeepCopyInto(out *ProjectDefaultConfigStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - in.AtProvider.DeepCopyInto(&out.AtProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDefaultConfigStatus. -func (in *ProjectDefaultConfigStatus) DeepCopy() *ProjectDefaultConfigStatus { - if in == nil { - return nil - } - out := new(ProjectDefaultConfigStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SignInInitParameters) DeepCopyInto(out *SignInInitParameters) { - *out = *in - if in.AllowDuplicateEmails != nil { - in, out := &in.AllowDuplicateEmails, &out.AllowDuplicateEmails - *out = new(bool) - **out = **in - } - if in.Anonymous != nil { - in, out := &in.Anonymous, &out.Anonymous - *out = new(AnonymousInitParameters) - (*in).DeepCopyInto(*out) - } - if in.Email != nil { - in, out := &in.Email, &out.Email - *out = new(EmailInitParameters) - (*in).DeepCopyInto(*out) - } - if in.PhoneNumber != nil { - in, out := &in.PhoneNumber, &out.PhoneNumber - *out = new(PhoneNumberInitParameters) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignInInitParameters. -func (in *SignInInitParameters) DeepCopy() *SignInInitParameters { - if in == nil { - return nil - } - out := new(SignInInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SignInObservation) DeepCopyInto(out *SignInObservation) { - *out = *in - if in.AllowDuplicateEmails != nil { - in, out := &in.AllowDuplicateEmails, &out.AllowDuplicateEmails - *out = new(bool) - **out = **in - } - if in.Anonymous != nil { - in, out := &in.Anonymous, &out.Anonymous - *out = new(AnonymousObservation) - (*in).DeepCopyInto(*out) - } - if in.Email != nil { - in, out := &in.Email, &out.Email - *out = new(EmailObservation) - (*in).DeepCopyInto(*out) - } - if in.HashConfig != nil { - in, out := &in.HashConfig, &out.HashConfig - *out = make([]HashConfigObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PhoneNumber != nil { - in, out := &in.PhoneNumber, &out.PhoneNumber - *out = new(PhoneNumberObservation) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignInObservation. -func (in *SignInObservation) DeepCopy() *SignInObservation { - if in == nil { - return nil - } - out := new(SignInObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SignInParameters) DeepCopyInto(out *SignInParameters) { - *out = *in - if in.AllowDuplicateEmails != nil { - in, out := &in.AllowDuplicateEmails, &out.AllowDuplicateEmails - *out = new(bool) - **out = **in - } - if in.Anonymous != nil { - in, out := &in.Anonymous, &out.Anonymous - *out = new(AnonymousParameters) - (*in).DeepCopyInto(*out) - } - if in.Email != nil { - in, out := &in.Email, &out.Email - *out = new(EmailParameters) - (*in).DeepCopyInto(*out) - } - if in.PhoneNumber != nil { - in, out := &in.PhoneNumber, &out.PhoneNumber - *out = new(PhoneNumberParameters) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignInParameters. -func (in *SignInParameters) DeepCopy() *SignInParameters { - if in == nil { - return nil - } - out := new(SignInParameters) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SpCertificatesInitParameters) DeepCopyInto(out *SpCertificatesInitParameters) { *out = *in diff --git a/apis/identityplatform/v1beta2/zz_generated.managed.go b/apis/identityplatform/v1beta2/zz_generated.managed.go index 45608be77..41c6ccf41 100644 --- a/apis/identityplatform/v1beta2/zz_generated.managed.go +++ b/apis/identityplatform/v1beta2/zz_generated.managed.go @@ -67,66 +67,6 @@ func (mg *InboundSAMLConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretR mg.Spec.WriteConnectionSecretToReference = r } -// GetCondition of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetManagementPolicies of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetManagementPolicies() xpv1.ManagementPolicies { - return mg.Spec.ManagementPolicies -} - -// GetProviderConfigReference of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -// GetPublishConnectionDetailsTo of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { - return mg.Spec.PublishConnectionDetailsTo -} - -// GetWriteConnectionSecretToReference of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetManagementPolicies of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { - mg.Spec.ManagementPolicies = r -} - -// SetProviderConfigReference of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -// SetPublishConnectionDetailsTo of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { - mg.Spec.PublishConnectionDetailsTo = r -} - -// SetWriteConnectionSecretToReference of this ProjectDefaultConfig. -func (mg *ProjectDefaultConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} - // GetCondition of this TenantInboundSAMLConfig. func (mg *TenantInboundSAMLConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) diff --git a/apis/identityplatform/v1beta2/zz_generated.managedlist.go b/apis/identityplatform/v1beta2/zz_generated.managedlist.go index bd527f912..ffc290db6 100644 --- a/apis/identityplatform/v1beta2/zz_generated.managedlist.go +++ b/apis/identityplatform/v1beta2/zz_generated.managedlist.go @@ -16,15 +16,6 @@ func (l *InboundSAMLConfigList) GetItems() []resource.Managed { return items } -// GetItems of this ProjectDefaultConfigList. -func (l *ProjectDefaultConfigList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} - // GetItems of this TenantInboundSAMLConfigList. func (l *TenantInboundSAMLConfigList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) diff --git a/apis/identityplatform/v1beta2/zz_projectdefaultconfig_terraformed.go b/apis/identityplatform/v1beta2/zz_projectdefaultconfig_terraformed.go deleted file mode 100755 index 0a5d8fd19..000000000 --- a/apis/identityplatform/v1beta2/zz_projectdefaultconfig_terraformed.go +++ /dev/null @@ -1,129 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta2 - -import ( - "dario.cat/mergo" - "github.com/pkg/errors" - - "github.com/crossplane/upjet/pkg/resource" - "github.com/crossplane/upjet/pkg/resource/json" -) - -// GetTerraformResourceType returns Terraform resource type for this ProjectDefaultConfig -func (mg *ProjectDefaultConfig) GetTerraformResourceType() string { - return "google_identity_platform_project_default_config" -} - -// GetConnectionDetailsMapping for this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetConnectionDetailsMapping() map[string]string { - return nil -} - -// GetObservation of this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) -} - -// SetObservation for this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) -} - -// GetID returns ID of underlying Terraform resource of this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID -} - -// GetParameters of this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) -} - -// SetParameters for this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) -} - -// GetInitParameters of this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) -} - -// GetInitParameters of this ProjectDefaultConfig -func (tr *ProjectDefaultConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil -} - -// LateInitialize this ProjectDefaultConfig using its observed tfState. -// returns True if there are any spec changes for the resource. -func (tr *ProjectDefaultConfig) LateInitialize(attrs []byte) (bool, error) { - params := &ProjectDefaultConfigParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) -} - -// GetTerraformSchemaVersion returns the associated Terraform schema version -func (tr *ProjectDefaultConfig) GetTerraformSchemaVersion() int { - return 0 -} diff --git a/apis/identityplatform/v1beta2/zz_projectdefaultconfig_types.go b/apis/identityplatform/v1beta2/zz_projectdefaultconfig_types.go deleted file mode 100755 index 79b7f79d0..000000000 --- a/apis/identityplatform/v1beta2/zz_projectdefaultconfig_types.go +++ /dev/null @@ -1,293 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta2 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - - v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -type AnonymousInitParameters struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -} - -type AnonymousObservation struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -} - -type AnonymousParameters struct { - - // Whether phone number auth is enabled for the project or not. - // +kubebuilder:validation:Optional - Enabled *bool `json:"enabled" tf:"enabled,omitempty"` -} - -type EmailInitParameters struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // Whether a password is required for email auth or not. If true, both an email and - // password must be provided to sign in. If false, a user may sign in via either - // email/password or email link. - PasswordRequired *bool `json:"passwordRequired,omitempty" tf:"password_required,omitempty"` -} - -type EmailObservation struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // Whether a password is required for email auth or not. If true, both an email and - // password must be provided to sign in. If false, a user may sign in via either - // email/password or email link. - PasswordRequired *bool `json:"passwordRequired,omitempty" tf:"password_required,omitempty"` -} - -type EmailParameters struct { - - // Whether phone number auth is enabled for the project or not. - // +kubebuilder:validation:Optional - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // Whether a password is required for email auth or not. If true, both an email and - // password must be provided to sign in. If false, a user may sign in via either - // email/password or email link. - // +kubebuilder:validation:Optional - PasswordRequired *bool `json:"passwordRequired,omitempty" tf:"password_required,omitempty"` -} - -type HashConfigInitParameters struct { -} - -type HashConfigObservation struct { - - // (Output) - // Different password hash algorithms used in Identity Toolkit. - Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` - - // (Output) - // Memory cost for hash calculation. Used by scrypt and other similar password derivation algorithms. See https://tools.ietf.org/html/rfc7914 for explanation of field. - MemoryCost *float64 `json:"memoryCost,omitempty" tf:"memory_cost,omitempty"` - - // (Output) - // How many rounds for hash calculation. Used by scrypt and other similar password derivation algorithms. - Rounds *float64 `json:"rounds,omitempty" tf:"rounds,omitempty"` - - // (Output) - // Non-printable character to be inserted between the salt and plain text password in base64. - SaltSeparator *string `json:"saltSeparator,omitempty" tf:"salt_separator,omitempty"` - - // (Output) - // Signer key in base64. - SignerKey *string `json:"signerKey,omitempty" tf:"signer_key,omitempty"` -} - -type HashConfigParameters struct { -} - -type PhoneNumberInitParameters struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // A map of that can be used for phone auth testing. - // +mapType=granular - TestPhoneNumbers map[string]*string `json:"testPhoneNumbers,omitempty" tf:"test_phone_numbers,omitempty"` -} - -type PhoneNumberObservation struct { - - // Whether phone number auth is enabled for the project or not. - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // A map of that can be used for phone auth testing. - // +mapType=granular - TestPhoneNumbers map[string]*string `json:"testPhoneNumbers,omitempty" tf:"test_phone_numbers,omitempty"` -} - -type PhoneNumberParameters struct { - - // Whether phone number auth is enabled for the project or not. - // +kubebuilder:validation:Optional - Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - - // A map of that can be used for phone auth testing. - // +kubebuilder:validation:Optional - // +mapType=granular - TestPhoneNumbers map[string]*string `json:"testPhoneNumbers,omitempty" tf:"test_phone_numbers,omitempty"` -} - -type ProjectDefaultConfigInitParameters struct { - - // The ID of the project in which the resource belongs. - // If it is not provided, the provider project is used. - Project *string `json:"project,omitempty" tf:"project,omitempty"` - - // Configuration related to local sign in methods. - // Structure is documented below. - SignIn *SignInInitParameters `json:"signIn,omitempty" tf:"sign_in,omitempty"` -} - -type ProjectDefaultConfigObservation struct { - - // an identifier for the resource with format {{project}} - ID *string `json:"id,omitempty" tf:"id,omitempty"` - - // The name of the Config resource. Example: "projects/my-awesome-project/config" - Name *string `json:"name,omitempty" tf:"name,omitempty"` - - // The ID of the project in which the resource belongs. - // If it is not provided, the provider project is used. - Project *string `json:"project,omitempty" tf:"project,omitempty"` - - // Configuration related to local sign in methods. - // Structure is documented below. - SignIn *SignInObservation `json:"signIn,omitempty" tf:"sign_in,omitempty"` -} - -type ProjectDefaultConfigParameters struct { - - // The ID of the project in which the resource belongs. - // If it is not provided, the provider project is used. - // +kubebuilder:validation:Optional - Project *string `json:"project,omitempty" tf:"project,omitempty"` - - // Configuration related to local sign in methods. - // Structure is documented below. - // +kubebuilder:validation:Optional - SignIn *SignInParameters `json:"signIn,omitempty" tf:"sign_in,omitempty"` -} - -type SignInInitParameters struct { - - // Whether to allow more than one account to have the same email. - AllowDuplicateEmails *bool `json:"allowDuplicateEmails,omitempty" tf:"allow_duplicate_emails,omitempty"` - - // Configuration options related to authenticating an anonymous user. - // Structure is documented below. - Anonymous *AnonymousInitParameters `json:"anonymous,omitempty" tf:"anonymous,omitempty"` - - // Configuration options related to authenticating a user by their email address. - // Structure is documented below. - Email *EmailInitParameters `json:"email,omitempty" tf:"email,omitempty"` - - // Configuration options related to authenticated a user by their phone number. - // Structure is documented below. - PhoneNumber *PhoneNumberInitParameters `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` -} - -type SignInObservation struct { - - // Whether to allow more than one account to have the same email. - AllowDuplicateEmails *bool `json:"allowDuplicateEmails,omitempty" tf:"allow_duplicate_emails,omitempty"` - - // Configuration options related to authenticating an anonymous user. - // Structure is documented below. - Anonymous *AnonymousObservation `json:"anonymous,omitempty" tf:"anonymous,omitempty"` - - // Configuration options related to authenticating a user by their email address. - // Structure is documented below. - Email *EmailObservation `json:"email,omitempty" tf:"email,omitempty"` - - // (Output) - // Output only. Hash config information. - // Structure is documented below. - HashConfig []HashConfigObservation `json:"hashConfig,omitempty" tf:"hash_config,omitempty"` - - // Configuration options related to authenticated a user by their phone number. - // Structure is documented below. - PhoneNumber *PhoneNumberObservation `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` -} - -type SignInParameters struct { - - // Whether to allow more than one account to have the same email. - // +kubebuilder:validation:Optional - AllowDuplicateEmails *bool `json:"allowDuplicateEmails,omitempty" tf:"allow_duplicate_emails,omitempty"` - - // Configuration options related to authenticating an anonymous user. - // Structure is documented below. - // +kubebuilder:validation:Optional - Anonymous *AnonymousParameters `json:"anonymous,omitempty" tf:"anonymous,omitempty"` - - // Configuration options related to authenticating a user by their email address. - // Structure is documented below. - // +kubebuilder:validation:Optional - Email *EmailParameters `json:"email,omitempty" tf:"email,omitempty"` - - // Configuration options related to authenticated a user by their phone number. - // Structure is documented below. - // +kubebuilder:validation:Optional - PhoneNumber *PhoneNumberParameters `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` -} - -// ProjectDefaultConfigSpec defines the desired state of ProjectDefaultConfig -type ProjectDefaultConfigSpec struct { - v1.ResourceSpec `json:",inline"` - ForProvider ProjectDefaultConfigParameters `json:"forProvider"` - // THIS IS A BETA FIELD. It will be honored - // unless the Management Policies feature flag is disabled. - // InitProvider holds the same fields as ForProvider, with the exception - // of Identifier and other resource reference fields. The fields that are - // in InitProvider are merged into ForProvider when the resource is created. - // The same fields are also added to the terraform ignore_changes hook, to - // avoid updating them after creation. This is useful for fields that are - // required on creation, but we do not desire to update them after creation, - // for example because of an external controller is managing them, like an - // autoscaler. - InitProvider ProjectDefaultConfigInitParameters `json:"initProvider,omitempty"` -} - -// ProjectDefaultConfigStatus defines the observed state of ProjectDefaultConfig. -type ProjectDefaultConfigStatus struct { - v1.ResourceStatus `json:",inline"` - AtProvider ProjectDefaultConfigObservation `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status - -// ProjectDefaultConfig is the Schema for the ProjectDefaultConfigs API. There is no persistent data associated with this resource. -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} -type ProjectDefaultConfig struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ProjectDefaultConfigSpec `json:"spec"` - Status ProjectDefaultConfigStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// ProjectDefaultConfigList contains a list of ProjectDefaultConfigs -type ProjectDefaultConfigList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []ProjectDefaultConfig `json:"items"` -} - -// Repository type metadata. -var ( - ProjectDefaultConfig_Kind = "ProjectDefaultConfig" - ProjectDefaultConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDefaultConfig_Kind}.String() - ProjectDefaultConfig_KindAPIVersion = ProjectDefaultConfig_Kind + "." + CRDGroupVersion.String() - ProjectDefaultConfig_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDefaultConfig_Kind) -) - -func init() { - SchemeBuilder.Register(&ProjectDefaultConfig{}, &ProjectDefaultConfigList{}) -} diff --git a/apis/monitoring/v1beta2/zz_alertpolicy_types.go b/apis/monitoring/v1beta2/zz_alertpolicy_types.go index e1062480e..c1dbbd5a4 100755 --- a/apis/monitoring/v1beta2/zz_alertpolicy_types.go +++ b/apis/monitoring/v1beta2/zz_alertpolicy_types.go @@ -497,6 +497,10 @@ type AlertStrategyInitParameters struct { // Structure is documented below. NotificationChannelStrategy []NotificationChannelStrategyInitParameters `json:"notificationChannelStrategy,omitempty" tf:"notification_channel_strategy,omitempty"` + // Control when notifications will be sent out. + // Each value may be one of: NOTIFICATION_PROMPT_UNSPECIFIED, OPENED, CLOSED. + NotificationPrompts []*string `json:"notificationPrompts,omitempty" tf:"notification_prompts,omitempty"` + // Required for alert policies with a LogMatch condition. // This limit is not implemented for alert policies that are not log-based. // Structure is documented below. @@ -513,6 +517,10 @@ type AlertStrategyObservation struct { // Structure is documented below. NotificationChannelStrategy []NotificationChannelStrategyObservation `json:"notificationChannelStrategy,omitempty" tf:"notification_channel_strategy,omitempty"` + // Control when notifications will be sent out. + // Each value may be one of: NOTIFICATION_PROMPT_UNSPECIFIED, OPENED, CLOSED. + NotificationPrompts []*string `json:"notificationPrompts,omitempty" tf:"notification_prompts,omitempty"` + // Required for alert policies with a LogMatch condition. // This limit is not implemented for alert policies that are not log-based. // Structure is documented below. @@ -531,6 +539,11 @@ type AlertStrategyParameters struct { // +kubebuilder:validation:Optional NotificationChannelStrategy []NotificationChannelStrategyParameters `json:"notificationChannelStrategy,omitempty" tf:"notification_channel_strategy,omitempty"` + // Control when notifications will be sent out. + // Each value may be one of: NOTIFICATION_PROMPT_UNSPECIFIED, OPENED, CLOSED. + // +kubebuilder:validation:Optional + NotificationPrompts []*string `json:"notificationPrompts,omitempty" tf:"notification_prompts,omitempty"` + // Required for alert policies with a LogMatch condition. // This limit is not implemented for alert policies that are not log-based. // Structure is documented below. @@ -992,6 +1005,11 @@ type ConditionPrometheusQueryLanguageInitParameters struct { // valid Prometheus label name. AlertRule *string `json:"alertRule,omitempty" tf:"alert_rule,omitempty"` + // Whether to disable metric existence validation for this condition. + // Users with the monitoring.alertPolicyViewer role are able to see the + // name of the non-existent metric in the alerting policy condition. + DisableMetricValidation *bool `json:"disableMetricValidation,omitempty" tf:"disable_metric_validation,omitempty"` + // The amount of time that a time series must // violate the threshold to be considered // failing. Currently, only values that are a @@ -1053,6 +1071,11 @@ type ConditionPrometheusQueryLanguageObservation struct { // valid Prometheus label name. AlertRule *string `json:"alertRule,omitempty" tf:"alert_rule,omitempty"` + // Whether to disable metric existence validation for this condition. + // Users with the monitoring.alertPolicyViewer role are able to see the + // name of the non-existent metric in the alerting policy condition. + DisableMetricValidation *bool `json:"disableMetricValidation,omitempty" tf:"disable_metric_validation,omitempty"` + // The amount of time that a time series must // violate the threshold to be considered // failing. Currently, only values that are a @@ -1115,6 +1138,12 @@ type ConditionPrometheusQueryLanguageParameters struct { // +kubebuilder:validation:Optional AlertRule *string `json:"alertRule,omitempty" tf:"alert_rule,omitempty"` + // Whether to disable metric existence validation for this condition. + // Users with the monitoring.alertPolicyViewer role are able to see the + // name of the non-existent metric in the alerting policy condition. + // +kubebuilder:validation:Optional + DisableMetricValidation *bool `json:"disableMetricValidation,omitempty" tf:"disable_metric_validation,omitempty"` + // The amount of time that a time series must // violate the threshold to be considered // failing. Currently, only values that are a diff --git a/apis/monitoring/v1beta2/zz_generated.deepcopy.go b/apis/monitoring/v1beta2/zz_generated.deepcopy.go index 985cbddef..2c9f9dd7c 100644 --- a/apis/monitoring/v1beta2/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta2/zz_generated.deepcopy.go @@ -589,6 +589,17 @@ func (in *AlertStrategyInitParameters) DeepCopyInto(out *AlertStrategyInitParame (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.NotificationPrompts != nil { + in, out := &in.NotificationPrompts, &out.NotificationPrompts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.NotificationRateLimit != nil { in, out := &in.NotificationRateLimit, &out.NotificationRateLimit *out = new(NotificationRateLimitInitParameters) @@ -621,6 +632,17 @@ func (in *AlertStrategyObservation) DeepCopyInto(out *AlertStrategyObservation) (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.NotificationPrompts != nil { + in, out := &in.NotificationPrompts, &out.NotificationPrompts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.NotificationRateLimit != nil { in, out := &in.NotificationRateLimit, &out.NotificationRateLimit *out = new(NotificationRateLimitObservation) @@ -653,6 +675,17 @@ func (in *AlertStrategyParameters) DeepCopyInto(out *AlertStrategyParameters) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.NotificationPrompts != nil { + in, out := &in.NotificationPrompts, &out.NotificationPrompts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.NotificationRateLimit != nil { in, out := &in.NotificationRateLimit, &out.NotificationRateLimit *out = new(NotificationRateLimitParameters) @@ -1855,6 +1888,11 @@ func (in *ConditionPrometheusQueryLanguageInitParameters) DeepCopyInto(out *Cond *out = new(string) **out = **in } + if in.DisableMetricValidation != nil { + in, out := &in.DisableMetricValidation, &out.DisableMetricValidation + *out = new(bool) + **out = **in + } if in.Duration != nil { in, out := &in.Duration, &out.Duration *out = new(string) @@ -1911,6 +1949,11 @@ func (in *ConditionPrometheusQueryLanguageObservation) DeepCopyInto(out *Conditi *out = new(string) **out = **in } + if in.DisableMetricValidation != nil { + in, out := &in.DisableMetricValidation, &out.DisableMetricValidation + *out = new(bool) + **out = **in + } if in.Duration != nil { in, out := &in.Duration, &out.Duration *out = new(string) @@ -1967,6 +2010,11 @@ func (in *ConditionPrometheusQueryLanguageParameters) DeepCopyInto(out *Conditio *out = new(string) **out = **in } + if in.DisableMetricValidation != nil { + in, out := &in.DisableMetricValidation, &out.DisableMetricValidation + *out = new(bool) + **out = **in + } if in.Duration != nil { in, out := &in.Duration, &out.Duration *out = new(string) diff --git a/apis/networkconnectivity/v1beta1/zz_generated.deepcopy.go b/apis/networkconnectivity/v1beta1/zz_generated.deepcopy.go index 9a6cca98b..9ae0d9969 100644 --- a/apis/networkconnectivity/v1beta1/zz_generated.deepcopy.go +++ b/apis/networkconnectivity/v1beta1/zz_generated.deepcopy.go @@ -222,6 +222,11 @@ func (in *HubInitParameters) DeepCopyInto(out *HubInitParameters) { *out = new(string) **out = **in } + if in.PresetTopology != nil { + in, out := &in.PresetTopology, &out.PresetTopology + *out = new(string) + **out = **in + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -331,6 +336,11 @@ func (in *HubObservation) DeepCopyInto(out *HubObservation) { *out = new(string) **out = **in } + if in.PresetTopology != nil { + in, out := &in.PresetTopology, &out.PresetTopology + *out = new(string) + **out = **in + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -420,6 +430,11 @@ func (in *HubParameters) DeepCopyInto(out *HubParameters) { *out = new(string) **out = **in } + if in.PresetTopology != nil { + in, out := &in.PresetTopology, &out.PresetTopology + *out = new(string) + **out = **in + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) diff --git a/apis/networkconnectivity/v1beta1/zz_hub_types.go b/apis/networkconnectivity/v1beta1/zz_hub_types.go index 62fe1ac99..eb31482cf 100755 --- a/apis/networkconnectivity/v1beta1/zz_hub_types.go +++ b/apis/networkconnectivity/v1beta1/zz_hub_types.go @@ -30,6 +30,10 @@ type HubInitParameters struct { // Immutable. The name of the hub. Hub names must be unique. They use the following form: projects/{project_number}/locations/global/hubs/{hub_id} Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Optional. The topology implemented in this hub. Currently, this field is only used when policyMode = PRESET. The available preset topologies are MESH and STAR. If presetTopology is unspecified and policyMode = PRESET, the presetTopology defaults to MESH. When policyMode = CUSTOM, the presetTopology is set to PRESET_TOPOLOGY_UNSPECIFIED. + // Possible values are: MESH, STAR. + PresetTopology *string `json:"presetTopology,omitempty" tf:"preset_topology,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -61,6 +65,10 @@ type HubObservation struct { // Immutable. The name of the hub. Hub names must be unique. They use the following form: projects/{project_number}/locations/global/hubs/{hub_id} Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Optional. The topology implemented in this hub. Currently, this field is only used when policyMode = PRESET. The available preset topologies are MESH and STAR. If presetTopology is unspecified and policyMode = PRESET, the presetTopology defaults to MESH. When policyMode = CUSTOM, the presetTopology is set to PRESET_TOPOLOGY_UNSPECIFIED. + // Possible values are: MESH, STAR. + PresetTopology *string `json:"presetTopology,omitempty" tf:"preset_topology,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -105,6 +113,11 @@ type HubParameters struct { // +kubebuilder:validation:Optional Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Optional. The topology implemented in this hub. Currently, this field is only used when policyMode = PRESET. The available preset topologies are MESH and STAR. If presetTopology is unspecified and policyMode = PRESET, the presetTopology defaults to MESH. When policyMode = CUSTOM, the presetTopology is set to PRESET_TOPOLOGY_UNSPECIFIED. + // Possible values are: MESH, STAR. + // +kubebuilder:validation:Optional + PresetTopology *string `json:"presetTopology,omitempty" tf:"preset_topology,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional diff --git a/apis/networkconnectivity/v1beta2/zz_generated.deepcopy.go b/apis/networkconnectivity/v1beta2/zz_generated.deepcopy.go index 202c5c271..deab06acf 100644 --- a/apis/networkconnectivity/v1beta2/zz_generated.deepcopy.go +++ b/apis/networkconnectivity/v1beta2/zz_generated.deepcopy.go @@ -111,6 +111,17 @@ func (in *InstancesParameters) DeepCopy() *InstancesParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinkedInterconnectAttachmentsInitParameters) DeepCopyInto(out *LinkedInterconnectAttachmentsInitParameters) { *out = *in + if in.IncludeImportRanges != nil { + in, out := &in.IncludeImportRanges, &out.IncludeImportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.SiteToSiteDataTransfer != nil { in, out := &in.SiteToSiteDataTransfer, &out.SiteToSiteDataTransfer *out = new(bool) @@ -127,6 +138,18 @@ func (in *LinkedInterconnectAttachmentsInitParameters) DeepCopyInto(out *LinkedI } } } + if in.UrisRefs != nil { + in, out := &in.UrisRefs, &out.UrisRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UrisSelector != nil { + in, out := &in.UrisSelector, &out.UrisSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedInterconnectAttachmentsInitParameters. @@ -142,6 +165,17 @@ func (in *LinkedInterconnectAttachmentsInitParameters) DeepCopy() *LinkedInterco // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinkedInterconnectAttachmentsObservation) DeepCopyInto(out *LinkedInterconnectAttachmentsObservation) { *out = *in + if in.IncludeImportRanges != nil { + in, out := &in.IncludeImportRanges, &out.IncludeImportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.SiteToSiteDataTransfer != nil { in, out := &in.SiteToSiteDataTransfer, &out.SiteToSiteDataTransfer *out = new(bool) @@ -173,6 +207,17 @@ func (in *LinkedInterconnectAttachmentsObservation) DeepCopy() *LinkedInterconne // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinkedInterconnectAttachmentsParameters) DeepCopyInto(out *LinkedInterconnectAttachmentsParameters) { *out = *in + if in.IncludeImportRanges != nil { + in, out := &in.IncludeImportRanges, &out.IncludeImportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.SiteToSiteDataTransfer != nil { in, out := &in.SiteToSiteDataTransfer, &out.SiteToSiteDataTransfer *out = new(bool) @@ -189,6 +234,18 @@ func (in *LinkedInterconnectAttachmentsParameters) DeepCopyInto(out *LinkedInter } } } + if in.UrisRefs != nil { + in, out := &in.UrisRefs, &out.UrisRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UrisSelector != nil { + in, out := &in.UrisSelector, &out.UrisSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedInterconnectAttachmentsParameters. @@ -201,9 +258,206 @@ func (in *LinkedInterconnectAttachmentsParameters) DeepCopy() *LinkedInterconnec return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedProducerVPCNetworkInitParameters) DeepCopyInto(out *LinkedProducerVPCNetworkInitParameters) { + *out = *in + if in.ExcludeExportRanges != nil { + in, out := &in.ExcludeExportRanges, &out.ExcludeExportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeExportRanges != nil { + in, out := &in.IncludeExportRanges, &out.IncludeExportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(string) + **out = **in + } + if in.NetworkRef != nil { + in, out := &in.NetworkRef, &out.NetworkRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkSelector != nil { + in, out := &in.NetworkSelector, &out.NetworkSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Peering != nil { + in, out := &in.Peering, &out.Peering + *out = new(string) + **out = **in + } + if in.PeeringRef != nil { + in, out := &in.PeeringRef, &out.PeeringRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PeeringSelector != nil { + in, out := &in.PeeringSelector, &out.PeeringSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedProducerVPCNetworkInitParameters. +func (in *LinkedProducerVPCNetworkInitParameters) DeepCopy() *LinkedProducerVPCNetworkInitParameters { + if in == nil { + return nil + } + out := new(LinkedProducerVPCNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedProducerVPCNetworkObservation) DeepCopyInto(out *LinkedProducerVPCNetworkObservation) { + *out = *in + if in.ExcludeExportRanges != nil { + in, out := &in.ExcludeExportRanges, &out.ExcludeExportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeExportRanges != nil { + in, out := &in.IncludeExportRanges, &out.IncludeExportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(string) + **out = **in + } + if in.Peering != nil { + in, out := &in.Peering, &out.Peering + *out = new(string) + **out = **in + } + if in.ProducerNetwork != nil { + in, out := &in.ProducerNetwork, &out.ProducerNetwork + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedProducerVPCNetworkObservation. +func (in *LinkedProducerVPCNetworkObservation) DeepCopy() *LinkedProducerVPCNetworkObservation { + if in == nil { + return nil + } + out := new(LinkedProducerVPCNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedProducerVPCNetworkParameters) DeepCopyInto(out *LinkedProducerVPCNetworkParameters) { + *out = *in + if in.ExcludeExportRanges != nil { + in, out := &in.ExcludeExportRanges, &out.ExcludeExportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeExportRanges != nil { + in, out := &in.IncludeExportRanges, &out.IncludeExportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(string) + **out = **in + } + if in.NetworkRef != nil { + in, out := &in.NetworkRef, &out.NetworkRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkSelector != nil { + in, out := &in.NetworkSelector, &out.NetworkSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Peering != nil { + in, out := &in.Peering, &out.Peering + *out = new(string) + **out = **in + } + if in.PeeringRef != nil { + in, out := &in.PeeringRef, &out.PeeringRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PeeringSelector != nil { + in, out := &in.PeeringSelector, &out.PeeringSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedProducerVPCNetworkParameters. +func (in *LinkedProducerVPCNetworkParameters) DeepCopy() *LinkedProducerVPCNetworkParameters { + if in == nil { + return nil + } + out := new(LinkedProducerVPCNetworkParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinkedRouterApplianceInstancesInitParameters) DeepCopyInto(out *LinkedRouterApplianceInstancesInitParameters) { *out = *in + if in.IncludeImportRanges != nil { + in, out := &in.IncludeImportRanges, &out.IncludeImportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Instances != nil { in, out := &in.Instances, &out.Instances *out = make([]InstancesInitParameters, len(*in)) @@ -231,6 +485,17 @@ func (in *LinkedRouterApplianceInstancesInitParameters) DeepCopy() *LinkedRouter // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinkedRouterApplianceInstancesObservation) DeepCopyInto(out *LinkedRouterApplianceInstancesObservation) { *out = *in + if in.IncludeImportRanges != nil { + in, out := &in.IncludeImportRanges, &out.IncludeImportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Instances != nil { in, out := &in.Instances, &out.Instances *out = make([]InstancesObservation, len(*in)) @@ -258,6 +523,17 @@ func (in *LinkedRouterApplianceInstancesObservation) DeepCopy() *LinkedRouterApp // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinkedRouterApplianceInstancesParameters) DeepCopyInto(out *LinkedRouterApplianceInstancesParameters) { *out = *in + if in.IncludeImportRanges != nil { + in, out := &in.IncludeImportRanges, &out.IncludeImportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Instances != nil { in, out := &in.Instances, &out.Instances *out = make([]InstancesParameters, len(*in)) @@ -296,6 +572,17 @@ func (in *LinkedVPCNetworkInitParameters) DeepCopyInto(out *LinkedVPCNetworkInit } } } + if in.IncludeExportRanges != nil { + in, out := &in.IncludeExportRanges, &out.IncludeExportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.URI != nil { in, out := &in.URI, &out.URI *out = new(string) @@ -337,6 +624,17 @@ func (in *LinkedVPCNetworkObservation) DeepCopyInto(out *LinkedVPCNetworkObserva } } } + if in.IncludeExportRanges != nil { + in, out := &in.IncludeExportRanges, &out.IncludeExportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.URI != nil { in, out := &in.URI, &out.URI *out = new(string) @@ -368,6 +666,17 @@ func (in *LinkedVPCNetworkParameters) DeepCopyInto(out *LinkedVPCNetworkParamete } } } + if in.IncludeExportRanges != nil { + in, out := &in.IncludeExportRanges, &out.IncludeExportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.URI != nil { in, out := &in.URI, &out.URI *out = new(string) @@ -398,6 +707,17 @@ func (in *LinkedVPCNetworkParameters) DeepCopy() *LinkedVPCNetworkParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinkedVPNTunnelsInitParameters) DeepCopyInto(out *LinkedVPNTunnelsInitParameters) { *out = *in + if in.IncludeImportRanges != nil { + in, out := &in.IncludeImportRanges, &out.IncludeImportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.SiteToSiteDataTransfer != nil { in, out := &in.SiteToSiteDataTransfer, &out.SiteToSiteDataTransfer *out = new(bool) @@ -414,6 +734,18 @@ func (in *LinkedVPNTunnelsInitParameters) DeepCopyInto(out *LinkedVPNTunnelsInit } } } + if in.UrisRefs != nil { + in, out := &in.UrisRefs, &out.UrisRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UrisSelector != nil { + in, out := &in.UrisSelector, &out.UrisSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedVPNTunnelsInitParameters. @@ -429,6 +761,17 @@ func (in *LinkedVPNTunnelsInitParameters) DeepCopy() *LinkedVPNTunnelsInitParame // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinkedVPNTunnelsObservation) DeepCopyInto(out *LinkedVPNTunnelsObservation) { *out = *in + if in.IncludeImportRanges != nil { + in, out := &in.IncludeImportRanges, &out.IncludeImportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.SiteToSiteDataTransfer != nil { in, out := &in.SiteToSiteDataTransfer, &out.SiteToSiteDataTransfer *out = new(bool) @@ -460,6 +803,17 @@ func (in *LinkedVPNTunnelsObservation) DeepCopy() *LinkedVPNTunnelsObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinkedVPNTunnelsParameters) DeepCopyInto(out *LinkedVPNTunnelsParameters) { *out = *in + if in.IncludeImportRanges != nil { + in, out := &in.IncludeImportRanges, &out.IncludeImportRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.SiteToSiteDataTransfer != nil { in, out := &in.SiteToSiteDataTransfer, &out.SiteToSiteDataTransfer *out = new(bool) @@ -476,6 +830,18 @@ func (in *LinkedVPNTunnelsParameters) DeepCopyInto(out *LinkedVPNTunnelsParamete } } } + if in.UrisRefs != nil { + in, out := &in.UrisRefs, &out.UrisRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UrisSelector != nil { + in, out := &in.UrisSelector, &out.UrisSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedVPNTunnelsParameters. @@ -523,6 +889,11 @@ func (in *SpokeInitParameters) DeepCopyInto(out *SpokeInitParameters) { *out = new(string) **out = **in } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } if in.Hub != nil { in, out := &in.Hub, &out.Hub *out = new(string) @@ -559,6 +930,11 @@ func (in *SpokeInitParameters) DeepCopyInto(out *SpokeInitParameters) { *out = new(LinkedInterconnectAttachmentsInitParameters) (*in).DeepCopyInto(*out) } + if in.LinkedProducerVPCNetwork != nil { + in, out := &in.LinkedProducerVPCNetwork, &out.LinkedProducerVPCNetwork + *out = new(LinkedProducerVPCNetworkInitParameters) + (*in).DeepCopyInto(*out) + } if in.LinkedRouterApplianceInstances != nil { in, out := &in.LinkedRouterApplianceInstances, &out.LinkedRouterApplianceInstances *out = new(LinkedRouterApplianceInstancesInitParameters) @@ -662,6 +1038,11 @@ func (in *SpokeObservation) DeepCopyInto(out *SpokeObservation) { (*out)[key] = outVal } } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } if in.Hub != nil { in, out := &in.Hub, &out.Hub *out = new(string) @@ -693,6 +1074,11 @@ func (in *SpokeObservation) DeepCopyInto(out *SpokeObservation) { *out = new(LinkedInterconnectAttachmentsObservation) (*in).DeepCopyInto(*out) } + if in.LinkedProducerVPCNetwork != nil { + in, out := &in.LinkedProducerVPCNetwork, &out.LinkedProducerVPCNetwork + *out = new(LinkedProducerVPCNetworkObservation) + (*in).DeepCopyInto(*out) + } if in.LinkedRouterApplianceInstances != nil { in, out := &in.LinkedRouterApplianceInstances, &out.LinkedRouterApplianceInstances *out = new(LinkedRouterApplianceInstancesObservation) @@ -774,6 +1160,11 @@ func (in *SpokeParameters) DeepCopyInto(out *SpokeParameters) { *out = new(string) **out = **in } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } if in.Hub != nil { in, out := &in.Hub, &out.Hub *out = new(string) @@ -810,6 +1201,11 @@ func (in *SpokeParameters) DeepCopyInto(out *SpokeParameters) { *out = new(LinkedInterconnectAttachmentsParameters) (*in).DeepCopyInto(*out) } + if in.LinkedProducerVPCNetwork != nil { + in, out := &in.LinkedProducerVPCNetwork, &out.LinkedProducerVPCNetwork + *out = new(LinkedProducerVPCNetworkParameters) + (*in).DeepCopyInto(*out) + } if in.LinkedRouterApplianceInstances != nil { in, out := &in.LinkedRouterApplianceInstances, &out.LinkedRouterApplianceInstances *out = new(LinkedRouterApplianceInstancesParameters) diff --git a/apis/networkconnectivity/v1beta2/zz_generated.resolvers.go b/apis/networkconnectivity/v1beta2/zz_generated.resolvers.go index db8c0607b..f81345d2f 100644 --- a/apis/networkconnectivity/v1beta2/zz_generated.resolvers.go +++ b/apis/networkconnectivity/v1beta2/zz_generated.resolvers.go @@ -25,6 +25,7 @@ func (mg *Spoke) ResolveReferences(ctx context.Context, c client.Reader) error { r := reference.NewAPIResolver(c, mg) var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse var err error { m, l, err = apisresolver.GetManagedResource("networkconnectivity.gcp.upbound.io", "v1beta1", "Hub", "HubList") @@ -46,6 +47,69 @@ func (mg *Spoke) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.Hub = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.HubRef = rsp.ResolvedReference + if mg.Spec.ForProvider.LinkedInterconnectAttachments != nil { + { + m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta1", "InterconnectAttachment", "InterconnectAttachmentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.LinkedInterconnectAttachments.Uris), + Extract: resource.ExtractParamPath("self_link", true), + References: mg.Spec.ForProvider.LinkedInterconnectAttachments.UrisRefs, + Selector: mg.Spec.ForProvider.LinkedInterconnectAttachments.UrisSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LinkedInterconnectAttachments.Uris") + } + mg.Spec.ForProvider.LinkedInterconnectAttachments.Uris = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.LinkedInterconnectAttachments.UrisRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.LinkedProducerVPCNetwork != nil { + { + m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta1", "Network", "NetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LinkedProducerVPCNetwork.Network), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LinkedProducerVPCNetwork.NetworkRef, + Selector: mg.Spec.ForProvider.LinkedProducerVPCNetwork.NetworkSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LinkedProducerVPCNetwork.Network") + } + mg.Spec.ForProvider.LinkedProducerVPCNetwork.Network = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LinkedProducerVPCNetwork.NetworkRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LinkedProducerVPCNetwork != nil { + { + m, l, err = apisresolver.GetManagedResource("servicenetworking.gcp.upbound.io", "v1beta1", "Connection", "ConnectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LinkedProducerVPCNetwork.Peering), + Extract: resource.ExtractParamPath("peering", true), + Reference: mg.Spec.ForProvider.LinkedProducerVPCNetwork.PeeringRef, + Selector: mg.Spec.ForProvider.LinkedProducerVPCNetwork.PeeringSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LinkedProducerVPCNetwork.Peering") + } + mg.Spec.ForProvider.LinkedProducerVPCNetwork.Peering = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LinkedProducerVPCNetwork.PeeringRef = rsp.ResolvedReference + + } if mg.Spec.ForProvider.LinkedRouterApplianceInstances != nil { for i4 := 0; i4 < len(mg.Spec.ForProvider.LinkedRouterApplianceInstances.Instances); i4++ { { @@ -89,6 +153,27 @@ func (mg *Spoke) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.LinkedVPCNetwork.URI = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LinkedVPCNetwork.URIRef = rsp.ResolvedReference + } + if mg.Spec.ForProvider.LinkedVPNTunnels != nil { + { + m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta1", "VPNTunnel", "VPNTunnelList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.LinkedVPNTunnels.Uris), + Extract: resource.ExtractParamPath("self_link", true), + References: mg.Spec.ForProvider.LinkedVPNTunnels.UrisRefs, + Selector: mg.Spec.ForProvider.LinkedVPNTunnels.UrisSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LinkedVPNTunnels.Uris") + } + mg.Spec.ForProvider.LinkedVPNTunnels.Uris = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.LinkedVPNTunnels.UrisRefs = mrsp.ResolvedReferences + } { m, l, err = apisresolver.GetManagedResource("networkconnectivity.gcp.upbound.io", "v1beta1", "Hub", "HubList") @@ -109,6 +194,69 @@ func (mg *Spoke) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.InitProvider.Hub = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.HubRef = rsp.ResolvedReference + if mg.Spec.InitProvider.LinkedInterconnectAttachments != nil { + { + m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta1", "InterconnectAttachment", "InterconnectAttachmentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.LinkedInterconnectAttachments.Uris), + Extract: resource.ExtractParamPath("self_link", true), + References: mg.Spec.InitProvider.LinkedInterconnectAttachments.UrisRefs, + Selector: mg.Spec.InitProvider.LinkedInterconnectAttachments.UrisSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LinkedInterconnectAttachments.Uris") + } + mg.Spec.InitProvider.LinkedInterconnectAttachments.Uris = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.LinkedInterconnectAttachments.UrisRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.LinkedProducerVPCNetwork != nil { + { + m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta1", "Network", "NetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LinkedProducerVPCNetwork.Network), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LinkedProducerVPCNetwork.NetworkRef, + Selector: mg.Spec.InitProvider.LinkedProducerVPCNetwork.NetworkSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LinkedProducerVPCNetwork.Network") + } + mg.Spec.InitProvider.LinkedProducerVPCNetwork.Network = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LinkedProducerVPCNetwork.NetworkRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LinkedProducerVPCNetwork != nil { + { + m, l, err = apisresolver.GetManagedResource("servicenetworking.gcp.upbound.io", "v1beta1", "Connection", "ConnectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LinkedProducerVPCNetwork.Peering), + Extract: resource.ExtractParamPath("peering", true), + Reference: mg.Spec.InitProvider.LinkedProducerVPCNetwork.PeeringRef, + Selector: mg.Spec.InitProvider.LinkedProducerVPCNetwork.PeeringSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LinkedProducerVPCNetwork.Peering") + } + mg.Spec.InitProvider.LinkedProducerVPCNetwork.Peering = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LinkedProducerVPCNetwork.PeeringRef = rsp.ResolvedReference + + } if mg.Spec.InitProvider.LinkedRouterApplianceInstances != nil { for i4 := 0; i4 < len(mg.Spec.InitProvider.LinkedRouterApplianceInstances.Instances); i4++ { { @@ -152,6 +300,27 @@ func (mg *Spoke) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.InitProvider.LinkedVPCNetwork.URI = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.LinkedVPCNetwork.URIRef = rsp.ResolvedReference + } + if mg.Spec.InitProvider.LinkedVPNTunnels != nil { + { + m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta1", "VPNTunnel", "VPNTunnelList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.LinkedVPNTunnels.Uris), + Extract: resource.ExtractParamPath("self_link", true), + References: mg.Spec.InitProvider.LinkedVPNTunnels.UrisRefs, + Selector: mg.Spec.InitProvider.LinkedVPNTunnels.UrisSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LinkedVPNTunnels.Uris") + } + mg.Spec.InitProvider.LinkedVPNTunnels.Uris = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.LinkedVPNTunnels.UrisRefs = mrsp.ResolvedReferences + } return nil diff --git a/apis/networkconnectivity/v1beta2/zz_spoke_types.go b/apis/networkconnectivity/v1beta2/zz_spoke_types.go index 72b352a83..74ca66975 100755 --- a/apis/networkconnectivity/v1beta2/zz_spoke_types.go +++ b/apis/networkconnectivity/v1beta2/zz_spoke_types.go @@ -45,7 +45,7 @@ type InstancesParameters struct { // The IP address on the VM to use for peering. // +kubebuilder:validation:Optional - IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + IPAddress *string `json:"ipAddress" tf:"ip_address,omitempty"` // The URI of the virtual machine resource // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta2.Instance @@ -64,15 +64,33 @@ type InstancesParameters struct { type LinkedInterconnectAttachmentsInitParameters struct { + // IP ranges allowed to be included during import from hub (does not control transit connectivity). + // The only allowed value for now is "ALL_IPV4_RANGES". + IncludeImportRanges []*string `json:"includeImportRanges,omitempty" tf:"include_import_ranges,omitempty"` + // A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. SiteToSiteDataTransfer *bool `json:"siteToSiteDataTransfer,omitempty" tf:"site_to_site_data_transfer,omitempty"` // The URIs of linked interconnect attachment resources + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.InterconnectAttachment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("self_link",true) Uris []*string `json:"uris,omitempty" tf:"uris,omitempty"` + + // References to InterconnectAttachment in compute to populate uris. + // +kubebuilder:validation:Optional + UrisRefs []v1.Reference `json:"urisRefs,omitempty" tf:"-"` + + // Selector for a list of InterconnectAttachment in compute to populate uris. + // +kubebuilder:validation:Optional + UrisSelector *v1.Selector `json:"urisSelector,omitempty" tf:"-"` } type LinkedInterconnectAttachmentsObservation struct { + // IP ranges allowed to be included during import from hub (does not control transit connectivity). + // The only allowed value for now is "ALL_IPV4_RANGES". + IncludeImportRanges []*string `json:"includeImportRanges,omitempty" tf:"include_import_ranges,omitempty"` + // A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. SiteToSiteDataTransfer *bool `json:"siteToSiteDataTransfer,omitempty" tf:"site_to_site_data_transfer,omitempty"` @@ -82,17 +100,127 @@ type LinkedInterconnectAttachmentsObservation struct { type LinkedInterconnectAttachmentsParameters struct { + // IP ranges allowed to be included during import from hub (does not control transit connectivity). + // The only allowed value for now is "ALL_IPV4_RANGES". + // +kubebuilder:validation:Optional + IncludeImportRanges []*string `json:"includeImportRanges,omitempty" tf:"include_import_ranges,omitempty"` + // A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. // +kubebuilder:validation:Optional SiteToSiteDataTransfer *bool `json:"siteToSiteDataTransfer" tf:"site_to_site_data_transfer,omitempty"` // The URIs of linked interconnect attachment resources + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.InterconnectAttachment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("self_link",true) // +kubebuilder:validation:Optional - Uris []*string `json:"uris" tf:"uris,omitempty"` + Uris []*string `json:"uris,omitempty" tf:"uris,omitempty"` + + // References to InterconnectAttachment in compute to populate uris. + // +kubebuilder:validation:Optional + UrisRefs []v1.Reference `json:"urisRefs,omitempty" tf:"-"` + + // Selector for a list of InterconnectAttachment in compute to populate uris. + // +kubebuilder:validation:Optional + UrisSelector *v1.Selector `json:"urisSelector,omitempty" tf:"-"` +} + +type LinkedProducerVPCNetworkInitParameters struct { + + // IP ranges encompassing the subnets to be excluded from peering. + ExcludeExportRanges []*string `json:"excludeExportRanges,omitempty" tf:"exclude_export_ranges,omitempty"` + + // IP ranges allowed to be included from peering. + IncludeExportRanges []*string `json:"includeExportRanges,omitempty" tf:"include_export_ranges,omitempty"` + + // The URI of the Service Consumer VPC that the Producer VPC is peered with. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.Network + Network *string `json:"network,omitempty" tf:"network,omitempty"` + + // Reference to a Network in compute to populate network. + // +kubebuilder:validation:Optional + NetworkRef *v1.Reference `json:"networkRef,omitempty" tf:"-"` + + // Selector for a Network in compute to populate network. + // +kubebuilder:validation:Optional + NetworkSelector *v1.Selector `json:"networkSelector,omitempty" tf:"-"` + + // The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/servicenetworking/v1beta1.Connection + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("peering",true) + Peering *string `json:"peering,omitempty" tf:"peering,omitempty"` + + // Reference to a Connection in servicenetworking to populate peering. + // +kubebuilder:validation:Optional + PeeringRef *v1.Reference `json:"peeringRef,omitempty" tf:"-"` + + // Selector for a Connection in servicenetworking to populate peering. + // +kubebuilder:validation:Optional + PeeringSelector *v1.Selector `json:"peeringSelector,omitempty" tf:"-"` +} + +type LinkedProducerVPCNetworkObservation struct { + + // IP ranges encompassing the subnets to be excluded from peering. + ExcludeExportRanges []*string `json:"excludeExportRanges,omitempty" tf:"exclude_export_ranges,omitempty"` + + // IP ranges allowed to be included from peering. + IncludeExportRanges []*string `json:"includeExportRanges,omitempty" tf:"include_export_ranges,omitempty"` + + // The URI of the Service Consumer VPC that the Producer VPC is peered with. + Network *string `json:"network,omitempty" tf:"network,omitempty"` + + // The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state. + Peering *string `json:"peering,omitempty" tf:"peering,omitempty"` + + // (Output) + // The URI of the Producer VPC. + ProducerNetwork *string `json:"producerNetwork,omitempty" tf:"producer_network,omitempty"` +} + +type LinkedProducerVPCNetworkParameters struct { + + // IP ranges encompassing the subnets to be excluded from peering. + // +kubebuilder:validation:Optional + ExcludeExportRanges []*string `json:"excludeExportRanges,omitempty" tf:"exclude_export_ranges,omitempty"` + + // IP ranges allowed to be included from peering. + // +kubebuilder:validation:Optional + IncludeExportRanges []*string `json:"includeExportRanges,omitempty" tf:"include_export_ranges,omitempty"` + + // The URI of the Service Consumer VPC that the Producer VPC is peered with. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.Network + // +kubebuilder:validation:Optional + Network *string `json:"network,omitempty" tf:"network,omitempty"` + + // Reference to a Network in compute to populate network. + // +kubebuilder:validation:Optional + NetworkRef *v1.Reference `json:"networkRef,omitempty" tf:"-"` + + // Selector for a Network in compute to populate network. + // +kubebuilder:validation:Optional + NetworkSelector *v1.Selector `json:"networkSelector,omitempty" tf:"-"` + + // The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/servicenetworking/v1beta1.Connection + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("peering",true) + // +kubebuilder:validation:Optional + Peering *string `json:"peering,omitempty" tf:"peering,omitempty"` + + // Reference to a Connection in servicenetworking to populate peering. + // +kubebuilder:validation:Optional + PeeringRef *v1.Reference `json:"peeringRef,omitempty" tf:"-"` + + // Selector for a Connection in servicenetworking to populate peering. + // +kubebuilder:validation:Optional + PeeringSelector *v1.Selector `json:"peeringSelector,omitempty" tf:"-"` } type LinkedRouterApplianceInstancesInitParameters struct { + // IP ranges allowed to be included during import from hub (does not control transit connectivity). + // The only allowed value for now is "ALL_IPV4_RANGES". + IncludeImportRanges []*string `json:"includeImportRanges,omitempty" tf:"include_import_ranges,omitempty"` + // The list of router appliance instances // Structure is documented below. Instances []InstancesInitParameters `json:"instances,omitempty" tf:"instances,omitempty"` @@ -103,6 +231,10 @@ type LinkedRouterApplianceInstancesInitParameters struct { type LinkedRouterApplianceInstancesObservation struct { + // IP ranges allowed to be included during import from hub (does not control transit connectivity). + // The only allowed value for now is "ALL_IPV4_RANGES". + IncludeImportRanges []*string `json:"includeImportRanges,omitempty" tf:"include_import_ranges,omitempty"` + // The list of router appliance instances // Structure is documented below. Instances []InstancesObservation `json:"instances,omitempty" tf:"instances,omitempty"` @@ -113,6 +245,11 @@ type LinkedRouterApplianceInstancesObservation struct { type LinkedRouterApplianceInstancesParameters struct { + // IP ranges allowed to be included during import from hub (does not control transit connectivity). + // The only allowed value for now is "ALL_IPV4_RANGES". + // +kubebuilder:validation:Optional + IncludeImportRanges []*string `json:"includeImportRanges,omitempty" tf:"include_import_ranges,omitempty"` + // The list of router appliance instances // Structure is documented below. // +kubebuilder:validation:Optional @@ -128,6 +265,9 @@ type LinkedVPCNetworkInitParameters struct { // IP ranges encompassing the subnets to be excluded from peering. ExcludeExportRanges []*string `json:"excludeExportRanges,omitempty" tf:"exclude_export_ranges,omitempty"` + // IP ranges allowed to be included from peering. + IncludeExportRanges []*string `json:"includeExportRanges,omitempty" tf:"include_export_ranges,omitempty"` + // The URI of the VPC network resource. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.Network // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("self_link",true) @@ -147,6 +287,9 @@ type LinkedVPCNetworkObservation struct { // IP ranges encompassing the subnets to be excluded from peering. ExcludeExportRanges []*string `json:"excludeExportRanges,omitempty" tf:"exclude_export_ranges,omitempty"` + // IP ranges allowed to be included from peering. + IncludeExportRanges []*string `json:"includeExportRanges,omitempty" tf:"include_export_ranges,omitempty"` + // The URI of the VPC network resource. URI *string `json:"uri,omitempty" tf:"uri,omitempty"` } @@ -157,6 +300,10 @@ type LinkedVPCNetworkParameters struct { // +kubebuilder:validation:Optional ExcludeExportRanges []*string `json:"excludeExportRanges,omitempty" tf:"exclude_export_ranges,omitempty"` + // IP ranges allowed to be included from peering. + // +kubebuilder:validation:Optional + IncludeExportRanges []*string `json:"includeExportRanges,omitempty" tf:"include_export_ranges,omitempty"` + // The URI of the VPC network resource. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.Network // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("self_link",true) @@ -174,15 +321,33 @@ type LinkedVPCNetworkParameters struct { type LinkedVPNTunnelsInitParameters struct { + // IP ranges allowed to be included during import from hub (does not control transit connectivity). + // The only allowed value for now is "ALL_IPV4_RANGES". + IncludeImportRanges []*string `json:"includeImportRanges,omitempty" tf:"include_import_ranges,omitempty"` + // A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. SiteToSiteDataTransfer *bool `json:"siteToSiteDataTransfer,omitempty" tf:"site_to_site_data_transfer,omitempty"` // The URIs of linked VPN tunnel resources. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.VPNTunnel + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("self_link",true) Uris []*string `json:"uris,omitempty" tf:"uris,omitempty"` + + // References to VPNTunnel in compute to populate uris. + // +kubebuilder:validation:Optional + UrisRefs []v1.Reference `json:"urisRefs,omitempty" tf:"-"` + + // Selector for a list of VPNTunnel in compute to populate uris. + // +kubebuilder:validation:Optional + UrisSelector *v1.Selector `json:"urisSelector,omitempty" tf:"-"` } type LinkedVPNTunnelsObservation struct { + // IP ranges allowed to be included during import from hub (does not control transit connectivity). + // The only allowed value for now is "ALL_IPV4_RANGES". + IncludeImportRanges []*string `json:"includeImportRanges,omitempty" tf:"include_import_ranges,omitempty"` + // A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. SiteToSiteDataTransfer *bool `json:"siteToSiteDataTransfer,omitempty" tf:"site_to_site_data_transfer,omitempty"` @@ -192,13 +357,28 @@ type LinkedVPNTunnelsObservation struct { type LinkedVPNTunnelsParameters struct { + // IP ranges allowed to be included during import from hub (does not control transit connectivity). + // The only allowed value for now is "ALL_IPV4_RANGES". + // +kubebuilder:validation:Optional + IncludeImportRanges []*string `json:"includeImportRanges,omitempty" tf:"include_import_ranges,omitempty"` + // A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. // +kubebuilder:validation:Optional SiteToSiteDataTransfer *bool `json:"siteToSiteDataTransfer" tf:"site_to_site_data_transfer,omitempty"` // The URIs of linked VPN tunnel resources. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/compute/v1beta1.VPNTunnel + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("self_link",true) + // +kubebuilder:validation:Optional + Uris []*string `json:"uris,omitempty" tf:"uris,omitempty"` + + // References to VPNTunnel in compute to populate uris. + // +kubebuilder:validation:Optional + UrisRefs []v1.Reference `json:"urisRefs,omitempty" tf:"-"` + + // Selector for a list of VPNTunnel in compute to populate uris. // +kubebuilder:validation:Optional - Uris []*string `json:"uris" tf:"uris,omitempty"` + UrisSelector *v1.Selector `json:"urisSelector,omitempty" tf:"-"` } type SpokeInitParameters struct { @@ -206,6 +386,9 @@ type SpokeInitParameters struct { // An optional description of the spoke. Description *string `json:"description,omitempty" tf:"description,omitempty"` + // The name of the group that this spoke is associated with. + Group *string `json:"group,omitempty" tf:"group,omitempty"` + // Immutable. The URI of the hub that this spoke is attached to. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/networkconnectivity/v1beta1.Hub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() @@ -229,6 +412,10 @@ type SpokeInitParameters struct { // Structure is documented below. LinkedInterconnectAttachments *LinkedInterconnectAttachmentsInitParameters `json:"linkedInterconnectAttachments,omitempty" tf:"linked_interconnect_attachments,omitempty"` + // Producer VPC network that is associated with the spoke. + // Structure is documented below. + LinkedProducerVPCNetwork *LinkedProducerVPCNetworkInitParameters `json:"linkedProducerVpcNetwork,omitempty" tf:"linked_producer_vpc_network,omitempty"` + // The URIs of linked Router appliance resources // Structure is documented below. LinkedRouterApplianceInstances *LinkedRouterApplianceInstancesInitParameters `json:"linkedRouterApplianceInstances,omitempty" tf:"linked_router_appliance_instances,omitempty"` @@ -263,6 +450,9 @@ type SpokeObservation struct { // +mapType=granular EffectiveLabels map[string]*string `json:"effectiveLabels,omitempty" tf:"effective_labels,omitempty"` + // The name of the group that this spoke is associated with. + Group *string `json:"group,omitempty" tf:"group,omitempty"` + // Immutable. The URI of the hub that this spoke is attached to. Hub *string `json:"hub,omitempty" tf:"hub,omitempty"` @@ -279,6 +469,10 @@ type SpokeObservation struct { // Structure is documented below. LinkedInterconnectAttachments *LinkedInterconnectAttachmentsObservation `json:"linkedInterconnectAttachments,omitempty" tf:"linked_interconnect_attachments,omitempty"` + // Producer VPC network that is associated with the spoke. + // Structure is documented below. + LinkedProducerVPCNetwork *LinkedProducerVPCNetworkObservation `json:"linkedProducerVpcNetwork,omitempty" tf:"linked_producer_vpc_network,omitempty"` + // The URIs of linked Router appliance resources // Structure is documented below. LinkedRouterApplianceInstances *LinkedRouterApplianceInstancesObservation `json:"linkedRouterApplianceInstances,omitempty" tf:"linked_router_appliance_instances,omitempty"` @@ -322,6 +516,10 @@ type SpokeParameters struct { // +kubebuilder:validation:Optional Description *string `json:"description,omitempty" tf:"description,omitempty"` + // The name of the group that this spoke is associated with. + // +kubebuilder:validation:Optional + Group *string `json:"group,omitempty" tf:"group,omitempty"` + // Immutable. The URI of the hub that this spoke is attached to. // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/networkconnectivity/v1beta1.Hub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() @@ -348,6 +546,11 @@ type SpokeParameters struct { // +kubebuilder:validation:Optional LinkedInterconnectAttachments *LinkedInterconnectAttachmentsParameters `json:"linkedInterconnectAttachments,omitempty" tf:"linked_interconnect_attachments,omitempty"` + // Producer VPC network that is associated with the spoke. + // Structure is documented below. + // +kubebuilder:validation:Optional + LinkedProducerVPCNetwork *LinkedProducerVPCNetworkParameters `json:"linkedProducerVpcNetwork,omitempty" tf:"linked_producer_vpc_network,omitempty"` + // The URIs of linked Router appliance resources // Structure is documented below. // +kubebuilder:validation:Optional diff --git a/apis/orgpolicy/v1beta1/zz_generated.deepcopy.go b/apis/orgpolicy/v1beta1/zz_generated.deepcopy.go index c99538e13..0c7dfd9fd 100644 --- a/apis/orgpolicy/v1beta1/zz_generated.deepcopy.go +++ b/apis/orgpolicy/v1beta1/zz_generated.deepcopy.go @@ -551,6 +551,11 @@ func (in *RulesInitParameters) DeepCopyInto(out *RulesInitParameters) { *out = new(string) **out = **in } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } if in.Values != nil { in, out := &in.Values, &out.Values *out = new(ValuesInitParameters) @@ -591,6 +596,11 @@ func (in *RulesObservation) DeepCopyInto(out *RulesObservation) { *out = new(string) **out = **in } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } if in.Values != nil { in, out := &in.Values, &out.Values *out = new(ValuesObservation) @@ -631,6 +641,11 @@ func (in *RulesParameters) DeepCopyInto(out *RulesParameters) { *out = new(string) **out = **in } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } if in.Values != nil { in, out := &in.Values, &out.Values *out = new(ValuesParameters) @@ -888,6 +903,11 @@ func (in *SpecRulesInitParameters) DeepCopyInto(out *SpecRulesInitParameters) { *out = new(string) **out = **in } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } if in.Values != nil { in, out := &in.Values, &out.Values *out = new(RulesValuesInitParameters) @@ -928,6 +948,11 @@ func (in *SpecRulesObservation) DeepCopyInto(out *SpecRulesObservation) { *out = new(string) **out = **in } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } if in.Values != nil { in, out := &in.Values, &out.Values *out = new(RulesValuesObservation) @@ -968,6 +993,11 @@ func (in *SpecRulesParameters) DeepCopyInto(out *SpecRulesParameters) { *out = new(string) **out = **in } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } if in.Values != nil { in, out := &in.Values, &out.Values *out = new(RulesValuesParameters) diff --git a/apis/orgpolicy/v1beta1/zz_policy_types.go b/apis/orgpolicy/v1beta1/zz_policy_types.go index 8b3d34fdb..5fd2eb286 100755 --- a/apis/orgpolicy/v1beta1/zz_policy_types.go +++ b/apis/orgpolicy/v1beta1/zz_policy_types.go @@ -234,6 +234,9 @@ type RulesInitParameters struct { // If "TRUE", then the Policy is enforced. If "FALSE", then any configuration is acceptable. This field can be set only in Policies for boolean constraints. Enforce *string `json:"enforce,omitempty" tf:"enforce,omitempty"` + // Optional. Required for Managed Constraints if parameters defined in constraints. Pass parameter values when policy enforcement is enabled. Ensure that parameter value types match those defined in the constraint definition. For example: { "allowedLocations" : ["us-east1", "us-west1"], "allowAll" : true } + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + // List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. // Structure is documented below. Values *ValuesInitParameters `json:"values,omitempty" tf:"values,omitempty"` @@ -254,6 +257,9 @@ type RulesObservation struct { // If "TRUE", then the Policy is enforced. If "FALSE", then any configuration is acceptable. This field can be set only in Policies for boolean constraints. Enforce *string `json:"enforce,omitempty" tf:"enforce,omitempty"` + // Optional. Required for Managed Constraints if parameters defined in constraints. Pass parameter values when policy enforcement is enabled. Ensure that parameter value types match those defined in the constraint definition. For example: { "allowedLocations" : ["us-east1", "us-west1"], "allowAll" : true } + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + // List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. // Structure is documented below. Values *ValuesObservation `json:"values,omitempty" tf:"values,omitempty"` @@ -278,6 +284,10 @@ type RulesParameters struct { // +kubebuilder:validation:Optional Enforce *string `json:"enforce,omitempty" tf:"enforce,omitempty"` + // Optional. Required for Managed Constraints if parameters defined in constraints. Pass parameter values when policy enforcement is enabled. Ensure that parameter value types match those defined in the constraint definition. For example: { "allowedLocations" : ["us-east1", "us-west1"], "allowAll" : true } + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + // List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. // Structure is documented below. // +kubebuilder:validation:Optional @@ -321,7 +331,7 @@ type SpecInitParameters struct { // Ignores policies set above this resource and restores the constraint_default enforcement behavior of the specific Constraint at this resource. This field can be set in policies for either list or boolean constraints. If set, rules must be empty and inherit_from_parent must be set to false. Reset *bool `json:"reset,omitempty" tf:"reset,omitempty"` - // Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. + // In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. // Structure is documented below. Rules []SpecRulesInitParameters `json:"rules,omitempty" tf:"rules,omitempty"` } @@ -338,7 +348,7 @@ type SpecObservation struct { // Ignores policies set above this resource and restores the constraint_default enforcement behavior of the specific Constraint at this resource. This field can be set in policies for either list or boolean constraints. If set, rules must be empty and inherit_from_parent must be set to false. Reset *bool `json:"reset,omitempty" tf:"reset,omitempty"` - // Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. + // In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. // Structure is documented below. Rules []SpecRulesObservation `json:"rules,omitempty" tf:"rules,omitempty"` @@ -357,7 +367,7 @@ type SpecParameters struct { // +kubebuilder:validation:Optional Reset *bool `json:"reset,omitempty" tf:"reset,omitempty"` - // Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. + // In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. // Structure is documented below. // +kubebuilder:validation:Optional Rules []SpecRulesParameters `json:"rules,omitempty" tf:"rules,omitempty"` @@ -378,6 +388,9 @@ type SpecRulesInitParameters struct { // If "TRUE", then the Policy is enforced. If "FALSE", then any configuration is acceptable. This field can be set only in Policies for boolean constraints. Enforce *string `json:"enforce,omitempty" tf:"enforce,omitempty"` + // Optional. Required for Managed Constraints if parameters defined in constraints. Pass parameter values when policy enforcement is enabled. Ensure that parameter value types match those defined in the constraint definition. For example: { "allowedLocations" : ["us-east1", "us-west1"], "allowAll" : true } + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + // List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. // Structure is documented below. Values *RulesValuesInitParameters `json:"values,omitempty" tf:"values,omitempty"` @@ -398,6 +411,9 @@ type SpecRulesObservation struct { // If "TRUE", then the Policy is enforced. If "FALSE", then any configuration is acceptable. This field can be set only in Policies for boolean constraints. Enforce *string `json:"enforce,omitempty" tf:"enforce,omitempty"` + // Optional. Required for Managed Constraints if parameters defined in constraints. Pass parameter values when policy enforcement is enabled. Ensure that parameter value types match those defined in the constraint definition. For example: { "allowedLocations" : ["us-east1", "us-west1"], "allowAll" : true } + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + // List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. // Structure is documented below. Values *RulesValuesObservation `json:"values,omitempty" tf:"values,omitempty"` @@ -422,6 +438,10 @@ type SpecRulesParameters struct { // +kubebuilder:validation:Optional Enforce *string `json:"enforce,omitempty" tf:"enforce,omitempty"` + // Optional. Required for Managed Constraints if parameters defined in constraints. Pass parameter values when policy enforcement is enabled. Ensure that parameter value types match those defined in the constraint definition. For example: { "allowedLocations" : ["us-east1", "us-west1"], "allowAll" : true } + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + // List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. // Structure is documented below. // +kubebuilder:validation:Optional diff --git a/apis/privateca/v1beta2/zz_certificate_types.go b/apis/privateca/v1beta2/zz_certificate_types.go index b10df16a8..54fee9d09 100755 --- a/apis/privateca/v1beta2/zz_certificate_types.go +++ b/apis/privateca/v1beta2/zz_certificate_types.go @@ -30,7 +30,7 @@ type AuthorityKeyIDInitParameters struct { type AuthorityKeyIDObservation struct { - // The value of the KeyId in lowercase hexidecimal. + // The value of the KeyId in lowercase hexadecimal. KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` } @@ -405,19 +405,19 @@ type ConfigPublicKeyParameters struct { type ConfigSubjectKeyIDInitParameters struct { - // The value of the KeyId in lowercase hexidecimal. + // The value of the KeyId in lowercase hexadecimal. KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` } type ConfigSubjectKeyIDObservation struct { - // The value of the KeyId in lowercase hexidecimal. + // The value of the KeyId in lowercase hexadecimal. KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` } type ConfigSubjectKeyIDParameters struct { - // The value of the KeyId in lowercase hexidecimal. + // The value of the KeyId in lowercase hexadecimal. // +kubebuilder:validation:Optional KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` } @@ -811,7 +811,7 @@ type SubjectKeyIDInitParameters struct { type SubjectKeyIDObservation struct { - // The value of the KeyId in lowercase hexidecimal. + // The value of the KeyId in lowercase hexadecimal. KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` } diff --git a/apis/privateca/v1beta2/zz_certificateauthority_types.go b/apis/privateca/v1beta2/zz_certificateauthority_types.go index 7f33266e8..b9b479448 100755 --- a/apis/privateca/v1beta2/zz_certificateauthority_types.go +++ b/apis/privateca/v1beta2/zz_certificateauthority_types.go @@ -82,19 +82,19 @@ type CertificateAuthorityConfigParameters struct { type CertificateAuthorityConfigSubjectKeyIDInitParameters struct { - // The value of the KeyId in lowercase hexidecimal. + // The value of the KeyId in lowercase hexadecimal. KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` } type CertificateAuthorityConfigSubjectKeyIDObservation struct { - // The value of the KeyId in lowercase hexidecimal. + // The value of the KeyId in lowercase hexadecimal. KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` } type CertificateAuthorityConfigSubjectKeyIDParameters struct { - // The value of the KeyId in lowercase hexidecimal. + // The value of the KeyId in lowercase hexadecimal. // +kubebuilder:validation:Optional KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` } @@ -105,10 +105,8 @@ type CertificateAuthorityInitParameters struct { // Structure is documented below. Config *CertificateAuthorityConfigInitParameters `json:"config,omitempty" tf:"config,omitempty"` - // When the field is set to false, deleting the CertificateAuthority is allowed. - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // Desired state of the CertificateAuthority. Set this field to STAGED to create a STAGED root CA. + // Possible values: ENABLED, DISABLED, STAGED. DesiredState *string `json:"desiredState,omitempty" tf:"desired_state,omitempty"` // The name of a Cloud Storage bucket where this CertificateAuthority will publish content, @@ -184,6 +182,7 @@ type CertificateAuthorityObservation struct { DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` // Desired state of the CertificateAuthority. Set this field to STAGED to create a STAGED root CA. + // Possible values: ENABLED, DISABLED, STAGED. DesiredState *string `json:"desiredState,omitempty" tf:"desired_state,omitempty"` // +mapType=granular @@ -283,11 +282,8 @@ type CertificateAuthorityParameters struct { // +kubebuilder:validation:Optional Config *CertificateAuthorityConfigParameters `json:"config,omitempty" tf:"config,omitempty"` - // When the field is set to false, deleting the CertificateAuthority is allowed. - // +kubebuilder:validation:Optional - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // Desired state of the CertificateAuthority. Set this field to STAGED to create a STAGED root CA. + // Possible values: ENABLED, DISABLED, STAGED. // +kubebuilder:validation:Optional DesiredState *string `json:"desiredState,omitempty" tf:"desired_state,omitempty"` diff --git a/apis/privateca/v1beta2/zz_generated.deepcopy.go b/apis/privateca/v1beta2/zz_generated.deepcopy.go index 69b73385a..fa815b25c 100644 --- a/apis/privateca/v1beta2/zz_generated.deepcopy.go +++ b/apis/privateca/v1beta2/zz_generated.deepcopy.go @@ -1785,11 +1785,6 @@ func (in *CertificateAuthorityInitParameters) DeepCopyInto(out *CertificateAutho *out = new(CertificateAuthorityConfigInitParameters) (*in).DeepCopyInto(*out) } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.DesiredState != nil { in, out := &in.DesiredState, &out.DesiredState *out = new(string) @@ -2084,11 +2079,6 @@ func (in *CertificateAuthorityParameters) DeepCopyInto(out *CertificateAuthority *out = new(CertificateAuthorityConfigParameters) (*in).DeepCopyInto(*out) } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.DesiredState != nil { in, out := &in.DesiredState, &out.DesiredState *out = new(string) diff --git a/apis/pubsub/v1beta2/zz_generated.deepcopy.go b/apis/pubsub/v1beta2/zz_generated.deepcopy.go index fc931901d..436794009 100644 --- a/apis/pubsub/v1beta2/zz_generated.deepcopy.go +++ b/apis/pubsub/v1beta2/zz_generated.deepcopy.go @@ -16,6 +16,11 @@ import ( // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AvroConfigInitParameters) DeepCopyInto(out *AvroConfigInitParameters) { *out = *in + if in.UseTopicSchema != nil { + in, out := &in.UseTopicSchema, &out.UseTopicSchema + *out = new(bool) + **out = **in + } if in.WriteMetadata != nil { in, out := &in.WriteMetadata, &out.WriteMetadata *out = new(bool) @@ -36,6 +41,11 @@ func (in *AvroConfigInitParameters) DeepCopy() *AvroConfigInitParameters { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AvroConfigObservation) DeepCopyInto(out *AvroConfigObservation) { *out = *in + if in.UseTopicSchema != nil { + in, out := &in.UseTopicSchema, &out.UseTopicSchema + *out = new(bool) + **out = **in + } if in.WriteMetadata != nil { in, out := &in.WriteMetadata, &out.WriteMetadata *out = new(bool) @@ -56,6 +66,11 @@ func (in *AvroConfigObservation) DeepCopy() *AvroConfigObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AvroConfigParameters) DeepCopyInto(out *AvroConfigParameters) { *out = *in + if in.UseTopicSchema != nil { + in, out := &in.UseTopicSchema, &out.UseTopicSchema + *out = new(bool) + **out = **in + } if in.WriteMetadata != nil { in, out := &in.WriteMetadata, &out.WriteMetadata *out = new(bool) @@ -73,6 +88,51 @@ func (in *AvroConfigParameters) DeepCopy() *AvroConfigParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvroFormatInitParameters) DeepCopyInto(out *AvroFormatInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvroFormatInitParameters. +func (in *AvroFormatInitParameters) DeepCopy() *AvroFormatInitParameters { + if in == nil { + return nil + } + out := new(AvroFormatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvroFormatObservation) DeepCopyInto(out *AvroFormatObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvroFormatObservation. +func (in *AvroFormatObservation) DeepCopy() *AvroFormatObservation { + if in == nil { + return nil + } + out := new(AvroFormatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvroFormatParameters) DeepCopyInto(out *AvroFormatParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvroFormatParameters. +func (in *AvroFormatParameters) DeepCopy() *AvroFormatParameters { + if in == nil { + return nil + } + out := new(AvroFormatParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AwsKinesisInitParameters) DeepCopyInto(out *AwsKinesisInitParameters) { *out = *in @@ -446,6 +506,11 @@ func (in *CloudStorageConfigInitParameters) DeepCopyInto(out *CloudStorageConfig *out = new(string) **out = **in } + if in.MaxMessages != nil { + in, out := &in.MaxMessages, &out.MaxMessages + *out = new(float64) + **out = **in + } if in.ServiceAccountEmail != nil { in, out := &in.ServiceAccountEmail, &out.ServiceAccountEmail *out = new(string) @@ -511,6 +576,11 @@ func (in *CloudStorageConfigObservation) DeepCopyInto(out *CloudStorageConfigObs *out = new(string) **out = **in } + if in.MaxMessages != nil { + in, out := &in.MaxMessages, &out.MaxMessages + *out = new(float64) + **out = **in + } if in.ServiceAccountEmail != nil { in, out := &in.ServiceAccountEmail, &out.ServiceAccountEmail *out = new(string) @@ -571,6 +641,11 @@ func (in *CloudStorageConfigParameters) DeepCopyInto(out *CloudStorageConfigPara *out = new(string) **out = **in } + if in.MaxMessages != nil { + in, out := &in.MaxMessages, &out.MaxMessages + *out = new(float64) + **out = **in + } if in.ServiceAccountEmail != nil { in, out := &in.ServiceAccountEmail, &out.ServiceAccountEmail *out = new(string) @@ -598,6 +673,141 @@ func (in *CloudStorageConfigParameters) DeepCopy() *CloudStorageConfigParameters return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudStorageInitParameters) DeepCopyInto(out *CloudStorageInitParameters) { + *out = *in + if in.AvroFormat != nil { + in, out := &in.AvroFormat, &out.AvroFormat + *out = new(AvroFormatInitParameters) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MatchGlob != nil { + in, out := &in.MatchGlob, &out.MatchGlob + *out = new(string) + **out = **in + } + if in.MinimumObjectCreateTime != nil { + in, out := &in.MinimumObjectCreateTime, &out.MinimumObjectCreateTime + *out = new(string) + **out = **in + } + if in.PubsubAvroFormat != nil { + in, out := &in.PubsubAvroFormat, &out.PubsubAvroFormat + *out = new(PubsubAvroFormatInitParameters) + **out = **in + } + if in.TextFormat != nil { + in, out := &in.TextFormat, &out.TextFormat + *out = new(TextFormatInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudStorageInitParameters. +func (in *CloudStorageInitParameters) DeepCopy() *CloudStorageInitParameters { + if in == nil { + return nil + } + out := new(CloudStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudStorageObservation) DeepCopyInto(out *CloudStorageObservation) { + *out = *in + if in.AvroFormat != nil { + in, out := &in.AvroFormat, &out.AvroFormat + *out = new(AvroFormatParameters) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MatchGlob != nil { + in, out := &in.MatchGlob, &out.MatchGlob + *out = new(string) + **out = **in + } + if in.MinimumObjectCreateTime != nil { + in, out := &in.MinimumObjectCreateTime, &out.MinimumObjectCreateTime + *out = new(string) + **out = **in + } + if in.PubsubAvroFormat != nil { + in, out := &in.PubsubAvroFormat, &out.PubsubAvroFormat + *out = new(PubsubAvroFormatParameters) + **out = **in + } + if in.TextFormat != nil { + in, out := &in.TextFormat, &out.TextFormat + *out = new(TextFormatObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudStorageObservation. +func (in *CloudStorageObservation) DeepCopy() *CloudStorageObservation { + if in == nil { + return nil + } + out := new(CloudStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudStorageParameters) DeepCopyInto(out *CloudStorageParameters) { + *out = *in + if in.AvroFormat != nil { + in, out := &in.AvroFormat, &out.AvroFormat + *out = new(AvroFormatParameters) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MatchGlob != nil { + in, out := &in.MatchGlob, &out.MatchGlob + *out = new(string) + **out = **in + } + if in.MinimumObjectCreateTime != nil { + in, out := &in.MinimumObjectCreateTime, &out.MinimumObjectCreateTime + *out = new(string) + **out = **in + } + if in.PubsubAvroFormat != nil { + in, out := &in.PubsubAvroFormat, &out.PubsubAvroFormat + *out = new(PubsubAvroFormatParameters) + **out = **in + } + if in.TextFormat != nil { + in, out := &in.TextFormat, &out.TextFormat + *out = new(TextFormatParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudStorageParameters. +func (in *CloudStorageParameters) DeepCopy() *CloudStorageParameters { + if in == nil { + return nil + } + out := new(CloudStorageParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { *out = *in @@ -911,6 +1121,16 @@ func (in *IngestionDataSourceSettingsInitParameters) DeepCopyInto(out *Ingestion *out = new(AwsKinesisInitParameters) (*in).DeepCopyInto(*out) } + if in.CloudStorage != nil { + in, out := &in.CloudStorage, &out.CloudStorage + *out = new(CloudStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformLogsSettings != nil { + in, out := &in.PlatformLogsSettings, &out.PlatformLogsSettings + *out = new(PlatformLogsSettingsInitParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestionDataSourceSettingsInitParameters. @@ -931,6 +1151,16 @@ func (in *IngestionDataSourceSettingsObservation) DeepCopyInto(out *IngestionDat *out = new(AwsKinesisObservation) (*in).DeepCopyInto(*out) } + if in.CloudStorage != nil { + in, out := &in.CloudStorage, &out.CloudStorage + *out = new(CloudStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.PlatformLogsSettings != nil { + in, out := &in.PlatformLogsSettings, &out.PlatformLogsSettings + *out = new(PlatformLogsSettingsObservation) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestionDataSourceSettingsObservation. @@ -951,6 +1181,16 @@ func (in *IngestionDataSourceSettingsParameters) DeepCopyInto(out *IngestionData *out = new(AwsKinesisParameters) (*in).DeepCopyInto(*out) } + if in.CloudStorage != nil { + in, out := &in.CloudStorage, &out.CloudStorage + *out = new(CloudStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformLogsSettings != nil { + in, out := &in.PlatformLogsSettings, &out.PlatformLogsSettings + *out = new(PlatformLogsSettingsParameters) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestionDataSourceSettingsParameters. @@ -1714,6 +1954,111 @@ func (in *PartitionConfigParameters) DeepCopy() *PartitionConfigParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformLogsSettingsInitParameters) DeepCopyInto(out *PlatformLogsSettingsInitParameters) { + *out = *in + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformLogsSettingsInitParameters. +func (in *PlatformLogsSettingsInitParameters) DeepCopy() *PlatformLogsSettingsInitParameters { + if in == nil { + return nil + } + out := new(PlatformLogsSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformLogsSettingsObservation) DeepCopyInto(out *PlatformLogsSettingsObservation) { + *out = *in + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformLogsSettingsObservation. +func (in *PlatformLogsSettingsObservation) DeepCopy() *PlatformLogsSettingsObservation { + if in == nil { + return nil + } + out := new(PlatformLogsSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformLogsSettingsParameters) DeepCopyInto(out *PlatformLogsSettingsParameters) { + *out = *in + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformLogsSettingsParameters. +func (in *PlatformLogsSettingsParameters) DeepCopy() *PlatformLogsSettingsParameters { + if in == nil { + return nil + } + out := new(PlatformLogsSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PubsubAvroFormatInitParameters) DeepCopyInto(out *PubsubAvroFormatInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PubsubAvroFormatInitParameters. +func (in *PubsubAvroFormatInitParameters) DeepCopy() *PubsubAvroFormatInitParameters { + if in == nil { + return nil + } + out := new(PubsubAvroFormatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PubsubAvroFormatObservation) DeepCopyInto(out *PubsubAvroFormatObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PubsubAvroFormatObservation. +func (in *PubsubAvroFormatObservation) DeepCopy() *PubsubAvroFormatObservation { + if in == nil { + return nil + } + out := new(PubsubAvroFormatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PubsubAvroFormatParameters) DeepCopyInto(out *PubsubAvroFormatParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PubsubAvroFormatParameters. +func (in *PubsubAvroFormatParameters) DeepCopy() *PubsubAvroFormatParameters { + if in == nil { + return nil + } + out := new(PubsubAvroFormatParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PushConfigInitParameters) DeepCopyInto(out *PushConfigInitParameters) { *out = *in @@ -2855,6 +3200,66 @@ func (in *SubscriptionStatus) DeepCopy() *SubscriptionStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextFormatInitParameters) DeepCopyInto(out *TextFormatInitParameters) { + *out = *in + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextFormatInitParameters. +func (in *TextFormatInitParameters) DeepCopy() *TextFormatInitParameters { + if in == nil { + return nil + } + out := new(TextFormatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextFormatObservation) DeepCopyInto(out *TextFormatObservation) { + *out = *in + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextFormatObservation. +func (in *TextFormatObservation) DeepCopy() *TextFormatObservation { + if in == nil { + return nil + } + out := new(TextFormatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextFormatParameters) DeepCopyInto(out *TextFormatParameters) { + *out = *in + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextFormatParameters. +func (in *TextFormatParameters) DeepCopy() *TextFormatParameters { + if in == nil { + return nil + } + out := new(TextFormatParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Topic) DeepCopyInto(out *Topic) { *out = *in diff --git a/apis/pubsub/v1beta2/zz_subscription_types.go b/apis/pubsub/v1beta2/zz_subscription_types.go index c8d750903..e2f7226b2 100755 --- a/apis/pubsub/v1beta2/zz_subscription_types.go +++ b/apis/pubsub/v1beta2/zz_subscription_types.go @@ -15,6 +15,10 @@ import ( type AvroConfigInitParameters struct { + // When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + // Only one of use_topic_schema and use_table_schema can be set. + UseTopicSchema *bool `json:"useTopicSchema,omitempty" tf:"use_topic_schema,omitempty"` + // When true, writes the Pub/Sub message metadata to // x-goog-pubsub-: headers of the HTTP request. Writes the // Pub/Sub message attributes to : headers of the HTTP request. @@ -23,6 +27,10 @@ type AvroConfigInitParameters struct { type AvroConfigObservation struct { + // When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + // Only one of use_topic_schema and use_table_schema can be set. + UseTopicSchema *bool `json:"useTopicSchema,omitempty" tf:"use_topic_schema,omitempty"` + // When true, writes the Pub/Sub message metadata to // x-goog-pubsub-: headers of the HTTP request. Writes the // Pub/Sub message attributes to : headers of the HTTP request. @@ -31,6 +39,11 @@ type AvroConfigObservation struct { type AvroConfigParameters struct { + // When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + // Only one of use_topic_schema and use_table_schema can be set. + // +kubebuilder:validation:Optional + UseTopicSchema *bool `json:"useTopicSchema,omitempty" tf:"use_topic_schema,omitempty"` + // When true, writes the Pub/Sub message metadata to // x-goog-pubsub-: headers of the HTTP request. Writes the // Pub/Sub message attributes to : headers of the HTTP request. @@ -175,6 +188,9 @@ type CloudStorageConfigInitParameters struct { // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". MaxDuration *string `json:"maxDuration,omitempty" tf:"max_duration,omitempty"` + // The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + MaxMessages *float64 `json:"maxMessages,omitempty" tf:"max_messages,omitempty"` + // The service account to use to write to Cloud Storage. If not specified, the Pub/Sub // service agent, // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -218,6 +234,9 @@ type CloudStorageConfigObservation struct { // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". MaxDuration *string `json:"maxDuration,omitempty" tf:"max_duration,omitempty"` + // The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + MaxMessages *float64 `json:"maxMessages,omitempty" tf:"max_messages,omitempty"` + // The service account to use to write to Cloud Storage. If not specified, the Pub/Sub // service agent, // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -262,6 +281,10 @@ type CloudStorageConfigParameters struct { // +kubebuilder:validation:Optional MaxDuration *string `json:"maxDuration,omitempty" tf:"max_duration,omitempty"` + // The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + // +kubebuilder:validation:Optional + MaxMessages *float64 `json:"maxMessages,omitempty" tf:"max_messages,omitempty"` + // The service account to use to write to Cloud Storage. If not specified, the Pub/Sub // service agent, // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -707,7 +730,7 @@ type SubscriptionInitParameters struct { // retain_acked_messages is true, then this also configures the retention // of acknowledged messages, and thus configures how far back in time a // subscriptions.seek can be done. Defaults to 7 days. Cannot be more - // than 7 days ("604800s") or less than 10 minutes ("600s"). + // than 31 days ("2678400s") or less than 10 minutes ("600s"). // A duration in seconds with up to nine fractional digits, terminated // by 's'. Example: "600.5s". MessageRetentionDuration *string `json:"messageRetentionDuration,omitempty" tf:"message_retention_duration,omitempty"` @@ -831,7 +854,7 @@ type SubscriptionObservation struct { // retain_acked_messages is true, then this also configures the retention // of acknowledged messages, and thus configures how far back in time a // subscriptions.seek can be done. Defaults to 7 days. Cannot be more - // than 7 days ("604800s") or less than 10 minutes ("600s"). + // than 31 days ("2678400s") or less than 10 minutes ("600s"). // A duration in seconds with up to nine fractional digits, terminated // by 's'. Example: "600.5s". MessageRetentionDuration *string `json:"messageRetentionDuration,omitempty" tf:"message_retention_duration,omitempty"` @@ -953,7 +976,7 @@ type SubscriptionParameters struct { // retain_acked_messages is true, then this also configures the retention // of acknowledged messages, and thus configures how far back in time a // subscriptions.seek can be done. Defaults to 7 days. Cannot be more - // than 7 days ("604800s") or less than 10 minutes ("600s"). + // than 31 days ("2678400s") or less than 10 minutes ("600s"). // A duration in seconds with up to nine fractional digits, terminated // by 's'. Example: "600.5s". // +kubebuilder:validation:Optional diff --git a/apis/pubsub/v1beta2/zz_topic_types.go b/apis/pubsub/v1beta2/zz_topic_types.go index b51f5f472..b386e5cb2 100755 --- a/apis/pubsub/v1beta2/zz_topic_types.go +++ b/apis/pubsub/v1beta2/zz_topic_types.go @@ -13,6 +13,15 @@ import ( v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" ) +type AvroFormatInitParameters struct { +} + +type AvroFormatObservation struct { +} + +type AvroFormatParameters struct { +} + type AwsKinesisInitParameters struct { // AWS role ARN to be used for Federated Identity authentication with @@ -83,11 +92,128 @@ type AwsKinesisParameters struct { StreamArn *string `json:"streamArn" tf:"stream_arn,omitempty"` } +type CloudStorageInitParameters struct { + + // Configuration for reading Cloud Storage data in Avro binary format. The + // bytes of each object will be set to the data field of a Pub/Sub message. + AvroFormat *AvroFormatInitParameters `json:"avroFormat,omitempty" tf:"avro_format,omitempty"` + + // Cloud Storage bucket. The bucket name must be without any + // prefix like "gs://". See the bucket naming requirements: + // https://cloud.google.com/storage/docs/buckets#naming. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Glob pattern used to match objects that will be ingested. If unset, all + // objects will be ingested. See the supported patterns: + // https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob + MatchGlob *string `json:"matchGlob,omitempty" tf:"match_glob,omitempty"` + + // The timestamp set in RFC3339 text format. If set, only objects with a + // larger or equal timestamp will be ingested. Unset by default, meaning + // all objects will be ingested. + MinimumObjectCreateTime *string `json:"minimumObjectCreateTime,omitempty" tf:"minimum_object_create_time,omitempty"` + + // Configuration for reading Cloud Storage data written via Cloud Storage + // subscriptions(See https://cloud.google.com/pubsub/docs/cloudstorage). The + // data and attributes fields of the originally exported Pub/Sub message + // will be restored when publishing. + PubsubAvroFormat *PubsubAvroFormatInitParameters `json:"pubsubAvroFormat,omitempty" tf:"pubsub_avro_format,omitempty"` + + // Configuration for reading Cloud Storage data in text format. Each line of + // text as specified by the delimiter will be set to the data field of a + // Pub/Sub message. + // Structure is documented below. + TextFormat *TextFormatInitParameters `json:"textFormat,omitempty" tf:"text_format,omitempty"` +} + +type CloudStorageObservation struct { + + // Configuration for reading Cloud Storage data in Avro binary format. The + // bytes of each object will be set to the data field of a Pub/Sub message. + AvroFormat *AvroFormatParameters `json:"avroFormat,omitempty" tf:"avro_format,omitempty"` + + // Cloud Storage bucket. The bucket name must be without any + // prefix like "gs://". See the bucket naming requirements: + // https://cloud.google.com/storage/docs/buckets#naming. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Glob pattern used to match objects that will be ingested. If unset, all + // objects will be ingested. See the supported patterns: + // https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob + MatchGlob *string `json:"matchGlob,omitempty" tf:"match_glob,omitempty"` + + // The timestamp set in RFC3339 text format. If set, only objects with a + // larger or equal timestamp will be ingested. Unset by default, meaning + // all objects will be ingested. + MinimumObjectCreateTime *string `json:"minimumObjectCreateTime,omitempty" tf:"minimum_object_create_time,omitempty"` + + // Configuration for reading Cloud Storage data written via Cloud Storage + // subscriptions(See https://cloud.google.com/pubsub/docs/cloudstorage). The + // data and attributes fields of the originally exported Pub/Sub message + // will be restored when publishing. + PubsubAvroFormat *PubsubAvroFormatParameters `json:"pubsubAvroFormat,omitempty" tf:"pubsub_avro_format,omitempty"` + + // Configuration for reading Cloud Storage data in text format. Each line of + // text as specified by the delimiter will be set to the data field of a + // Pub/Sub message. + // Structure is documented below. + TextFormat *TextFormatObservation `json:"textFormat,omitempty" tf:"text_format,omitempty"` +} + +type CloudStorageParameters struct { + + // Configuration for reading Cloud Storage data in Avro binary format. The + // bytes of each object will be set to the data field of a Pub/Sub message. + // +kubebuilder:validation:Optional + AvroFormat *AvroFormatParameters `json:"avroFormat,omitempty" tf:"avro_format,omitempty"` + + // Cloud Storage bucket. The bucket name must be without any + // prefix like "gs://". See the bucket naming requirements: + // https://cloud.google.com/storage/docs/buckets#naming. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` + + // Glob pattern used to match objects that will be ingested. If unset, all + // objects will be ingested. See the supported patterns: + // https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob + // +kubebuilder:validation:Optional + MatchGlob *string `json:"matchGlob,omitempty" tf:"match_glob,omitempty"` + + // The timestamp set in RFC3339 text format. If set, only objects with a + // larger or equal timestamp will be ingested. Unset by default, meaning + // all objects will be ingested. + // +kubebuilder:validation:Optional + MinimumObjectCreateTime *string `json:"minimumObjectCreateTime,omitempty" tf:"minimum_object_create_time,omitempty"` + + // Configuration for reading Cloud Storage data written via Cloud Storage + // subscriptions(See https://cloud.google.com/pubsub/docs/cloudstorage). The + // data and attributes fields of the originally exported Pub/Sub message + // will be restored when publishing. + // +kubebuilder:validation:Optional + PubsubAvroFormat *PubsubAvroFormatParameters `json:"pubsubAvroFormat,omitempty" tf:"pubsub_avro_format,omitempty"` + + // Configuration for reading Cloud Storage data in text format. Each line of + // text as specified by the delimiter will be set to the data field of a + // Pub/Sub message. + // Structure is documented below. + // +kubebuilder:validation:Optional + TextFormat *TextFormatParameters `json:"textFormat,omitempty" tf:"text_format,omitempty"` +} + type IngestionDataSourceSettingsInitParameters struct { // Settings for ingestion from Amazon Kinesis Data Streams. // Structure is documented below. AwsKinesis *AwsKinesisInitParameters `json:"awsKinesis,omitempty" tf:"aws_kinesis,omitempty"` + + // Settings for ingestion from Cloud Storage. + // Structure is documented below. + CloudStorage *CloudStorageInitParameters `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` + + // Settings for Platform Logs regarding ingestion to Pub/Sub. If unset, + // no Platform Logs will be generated.' + // Structure is documented below. + PlatformLogsSettings *PlatformLogsSettingsInitParameters `json:"platformLogsSettings,omitempty" tf:"platform_logs_settings,omitempty"` } type IngestionDataSourceSettingsObservation struct { @@ -95,6 +221,15 @@ type IngestionDataSourceSettingsObservation struct { // Settings for ingestion from Amazon Kinesis Data Streams. // Structure is documented below. AwsKinesis *AwsKinesisObservation `json:"awsKinesis,omitempty" tf:"aws_kinesis,omitempty"` + + // Settings for ingestion from Cloud Storage. + // Structure is documented below. + CloudStorage *CloudStorageObservation `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` + + // Settings for Platform Logs regarding ingestion to Pub/Sub. If unset, + // no Platform Logs will be generated.' + // Structure is documented below. + PlatformLogsSettings *PlatformLogsSettingsObservation `json:"platformLogsSettings,omitempty" tf:"platform_logs_settings,omitempty"` } type IngestionDataSourceSettingsParameters struct { @@ -103,6 +238,17 @@ type IngestionDataSourceSettingsParameters struct { // Structure is documented below. // +kubebuilder:validation:Optional AwsKinesis *AwsKinesisParameters `json:"awsKinesis,omitempty" tf:"aws_kinesis,omitempty"` + + // Settings for ingestion from Cloud Storage. + // Structure is documented below. + // +kubebuilder:validation:Optional + CloudStorage *CloudStorageParameters `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` + + // Settings for Platform Logs regarding ingestion to Pub/Sub. If unset, + // no Platform Logs will be generated.' + // Structure is documented below. + // +kubebuilder:validation:Optional + PlatformLogsSettings *PlatformLogsSettingsParameters `json:"platformLogsSettings,omitempty" tf:"platform_logs_settings,omitempty"` } type MessageStoragePolicyInitParameters struct { @@ -139,6 +285,43 @@ type MessageStoragePolicyParameters struct { AllowedPersistenceRegions []*string `json:"allowedPersistenceRegions" tf:"allowed_persistence_regions,omitempty"` } +type PlatformLogsSettingsInitParameters struct { + + // The minimum severity level of Platform Logs that will be written. If unspecified, + // no Platform Logs will be written. + // Default value is SEVERITY_UNSPECIFIED. + // Possible values are: SEVERITY_UNSPECIFIED, DISABLED, DEBUG, INFO, WARNING, ERROR. + Severity *string `json:"severity,omitempty" tf:"severity,omitempty"` +} + +type PlatformLogsSettingsObservation struct { + + // The minimum severity level of Platform Logs that will be written. If unspecified, + // no Platform Logs will be written. + // Default value is SEVERITY_UNSPECIFIED. + // Possible values are: SEVERITY_UNSPECIFIED, DISABLED, DEBUG, INFO, WARNING, ERROR. + Severity *string `json:"severity,omitempty" tf:"severity,omitempty"` +} + +type PlatformLogsSettingsParameters struct { + + // The minimum severity level of Platform Logs that will be written. If unspecified, + // no Platform Logs will be written. + // Default value is SEVERITY_UNSPECIFIED. + // Possible values are: SEVERITY_UNSPECIFIED, DISABLED, DEBUG, INFO, WARNING, ERROR. + // +kubebuilder:validation:Optional + Severity *string `json:"severity,omitempty" tf:"severity,omitempty"` +} + +type PubsubAvroFormatInitParameters struct { +} + +type PubsubAvroFormatObservation struct { +} + +type PubsubAvroFormatParameters struct { +} + type SchemaSettingsInitParameters struct { // The encoding of messages validated against schema. @@ -183,6 +366,31 @@ type SchemaSettingsParameters struct { Schema *string `json:"schema" tf:"schema,omitempty"` } +type TextFormatInitParameters struct { + + // The delimiter to use when using the 'text' format. Each line of text as + // specified by the delimiter will be set to the 'data' field of a Pub/Sub + // message. When unset, '\n' is used. + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` +} + +type TextFormatObservation struct { + + // The delimiter to use when using the 'text' format. Each line of text as + // specified by the delimiter will be set to the 'data' field of a Pub/Sub + // message. When unset, '\n' is used. + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` +} + +type TextFormatParameters struct { + + // The delimiter to use when using the 'text' format. Each line of text as + // specified by the delimiter will be set to the 'data' field of a Pub/Sub + // message. When unset, '\n' is used. + // +kubebuilder:validation:Optional + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` +} + type TopicInitParameters struct { // Settings for ingestion from a data source into this topic. diff --git a/apis/redis/v1beta1/zz_cluster_types.go b/apis/redis/v1beta1/zz_cluster_types.go index f0e66a5fb..b3a75448b 100755 --- a/apis/redis/v1beta1/zz_cluster_types.go +++ b/apis/redis/v1beta1/zz_cluster_types.go @@ -13,6 +13,25 @@ import ( v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" ) +type AofConfigInitParameters struct { + + // Optional. Available fsync modes. + AppendFsync *string `json:"appendFsync,omitempty" tf:"append_fsync,omitempty"` +} + +type AofConfigObservation struct { + + // Optional. Available fsync modes. + AppendFsync *string `json:"appendFsync,omitempty" tf:"append_fsync,omitempty"` +} + +type AofConfigParameters struct { + + // Optional. Available fsync modes. + // +kubebuilder:validation:Optional + AppendFsync *string `json:"appendFsync,omitempty" tf:"append_fsync,omitempty"` +} + type ClusterInitParameters struct { // Optional. The authorization mode of the Redis cluster. If not provided, auth feature is disabled for the cluster. @@ -20,11 +39,27 @@ type ClusterInitParameters struct { // Possible values are: AUTH_MODE_UNSPECIFIED, AUTH_MODE_IAM_AUTH, AUTH_MODE_DISABLED. AuthorizationMode *string `json:"authorizationMode,omitempty" tf:"authorization_mode,omitempty"` + // field to the configuration file to match the latest value in the state. + CrossClusterReplicationConfig *CrossClusterReplicationConfigInitParameters `json:"crossClusterReplicationConfig,omitempty" tf:"cross_cluster_replication_config,omitempty"` + + // Optional. Indicates if the cluster is deletion protected or not. + // If the value if set to true, any delete cluster operation will fail. + // Default value is true. + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty" tf:"deletion_protection_enabled,omitempty"` + + // Maintenance policy for a cluster + // Structure is documented below. + MaintenancePolicy *ClusterMaintenancePolicyInitParameters `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + // The nodeType for the Redis cluster. // If not provided, REDIS_HIGHMEM_MEDIUM will be used as default // Possible values are: REDIS_SHARED_CORE_NANO, REDIS_HIGHMEM_MEDIUM, REDIS_HIGHMEM_XLARGE, REDIS_STANDARD_SMALL. NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + // Persistence config (RDB, AOF) for the cluster. + // Structure is documented below. + PersistenceConfig *ClusterPersistenceConfigInitParameters `json:"persistenceConfig,omitempty" tf:"persistence_config,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -58,6 +93,74 @@ type ClusterInitParameters struct { ZoneDistributionConfig *ZoneDistributionConfigInitParameters `json:"zoneDistributionConfig,omitempty" tf:"zone_distribution_config,omitempty"` } +type ClusterMaintenancePolicyInitParameters struct { + + // Optional. Maintenance window that is applied to resources covered by this policy. + // Minimum 1. For the current version, the maximum number + // of weekly_window is expected to be one. + // Structure is documented below. + WeeklyMaintenanceWindow []MaintenancePolicyWeeklyMaintenanceWindowInitParameters `json:"weeklyMaintenanceWindow,omitempty" tf:"weekly_maintenance_window,omitempty"` +} + +type ClusterMaintenancePolicyObservation struct { + + // (Output) + // Output only. The time when the policy was created. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + CreateTime *string `json:"createTime,omitempty" tf:"create_time,omitempty"` + + // (Output) + // Output only. The time when the policy was last updated. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + UpdateTime *string `json:"updateTime,omitempty" tf:"update_time,omitempty"` + + // Optional. Maintenance window that is applied to resources covered by this policy. + // Minimum 1. For the current version, the maximum number + // of weekly_window is expected to be one. + // Structure is documented below. + WeeklyMaintenanceWindow []MaintenancePolicyWeeklyMaintenanceWindowObservation `json:"weeklyMaintenanceWindow,omitempty" tf:"weekly_maintenance_window,omitempty"` +} + +type ClusterMaintenancePolicyParameters struct { + + // Optional. Maintenance window that is applied to resources covered by this policy. + // Minimum 1. For the current version, the maximum number + // of weekly_window is expected to be one. + // Structure is documented below. + // +kubebuilder:validation:Optional + WeeklyMaintenanceWindow []MaintenancePolicyWeeklyMaintenanceWindowParameters `json:"weeklyMaintenanceWindow,omitempty" tf:"weekly_maintenance_window,omitempty"` +} + +type ClusterMaintenanceScheduleInitParameters struct { +} + +type ClusterMaintenanceScheduleObservation struct { + + // (Output) + // Output only. The end time of any upcoming scheduled maintenance for this cluster. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // (Output) + // Output only. The deadline that the maintenance schedule start time + // can not go beyond, including reschedule. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + ScheduleDeadlineTime *string `json:"scheduleDeadlineTime,omitempty" tf:"schedule_deadline_time,omitempty"` + + // (Output) + // Output only. The start time of any upcoming scheduled maintenance for this cluster. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type ClusterMaintenanceScheduleParameters struct { +} + type ClusterObservation struct { // Optional. The authorization mode of the Redis cluster. If not provided, auth feature is disabled for the cluster. @@ -70,6 +173,14 @@ type ClusterObservation struct { // digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". CreateTime *string `json:"createTime,omitempty" tf:"create_time,omitempty"` + // field to the configuration file to match the latest value in the state. + CrossClusterReplicationConfig *CrossClusterReplicationConfigObservation `json:"crossClusterReplicationConfig,omitempty" tf:"cross_cluster_replication_config,omitempty"` + + // Optional. Indicates if the cluster is deletion protected or not. + // If the value if set to true, any delete cluster operation will fail. + // Default value is true. + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty" tf:"deletion_protection_enabled,omitempty"` + // Output only. Endpoints created on each given network, // for Redis clients to connect to the cluster. // Currently only one endpoint is supported. @@ -79,11 +190,23 @@ type ClusterObservation struct { // an identifier for the resource with format projects/{{project}}/locations/{{region}}/clusters/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Maintenance policy for a cluster + // Structure is documented below. + MaintenancePolicy *ClusterMaintenancePolicyObservation `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + + // Upcoming maintenance schedule. + // Structure is documented below. + MaintenanceSchedule []ClusterMaintenanceScheduleObservation `json:"maintenanceSchedule,omitempty" tf:"maintenance_schedule,omitempty"` + // The nodeType for the Redis cluster. // If not provided, REDIS_HIGHMEM_MEDIUM will be used as default // Possible values are: REDIS_SHARED_CORE_NANO, REDIS_HIGHMEM_MEDIUM, REDIS_HIGHMEM_XLARGE, REDIS_STANDARD_SMALL. NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + // Persistence config (RDB, AOF) for the cluster. + // Structure is documented below. + PersistenceConfig *ClusterPersistenceConfigObservation `json:"persistenceConfig,omitempty" tf:"persistence_config,omitempty"` + // Output only. Redis memory precise size in GB for the entire cluster. PreciseSizeGb *float64 `json:"preciseSizeGb,omitempty" tf:"precise_size_gb,omitempty"` @@ -148,12 +271,32 @@ type ClusterParameters struct { // +kubebuilder:validation:Optional AuthorizationMode *string `json:"authorizationMode,omitempty" tf:"authorization_mode,omitempty"` + // field to the configuration file to match the latest value in the state. + // +kubebuilder:validation:Optional + CrossClusterReplicationConfig *CrossClusterReplicationConfigParameters `json:"crossClusterReplicationConfig,omitempty" tf:"cross_cluster_replication_config,omitempty"` + + // Optional. Indicates if the cluster is deletion protected or not. + // If the value if set to true, any delete cluster operation will fail. + // Default value is true. + // +kubebuilder:validation:Optional + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty" tf:"deletion_protection_enabled,omitempty"` + + // Maintenance policy for a cluster + // Structure is documented below. + // +kubebuilder:validation:Optional + MaintenancePolicy *ClusterMaintenancePolicyParameters `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + // The nodeType for the Redis cluster. // If not provided, REDIS_HIGHMEM_MEDIUM will be used as default // Possible values are: REDIS_SHARED_CORE_NANO, REDIS_HIGHMEM_MEDIUM, REDIS_HIGHMEM_XLARGE, REDIS_STANDARD_SMALL. // +kubebuilder:validation:Optional NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + // Persistence config (RDB, AOF) for the cluster. + // Structure is documented below. + // +kubebuilder:validation:Optional + PersistenceConfig *ClusterPersistenceConfigParameters `json:"persistenceConfig,omitempty" tf:"persistence_config,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional @@ -198,6 +341,163 @@ type ClusterParameters struct { ZoneDistributionConfig *ZoneDistributionConfigParameters `json:"zoneDistributionConfig,omitempty" tf:"zone_distribution_config,omitempty"` } +type ClusterPersistenceConfigInitParameters struct { + + // AOF configuration. This field will be ignored if mode is not AOF. + // Structure is documented below. + AofConfig *AofConfigInitParameters `json:"aofConfig,omitempty" tf:"aof_config,omitempty"` + + // Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // RDB configuration. This field will be ignored if mode is not RDB. + // Structure is documented below. + RdbConfig *RdbConfigInitParameters `json:"rdbConfig,omitempty" tf:"rdb_config,omitempty"` +} + +type ClusterPersistenceConfigObservation struct { + + // AOF configuration. This field will be ignored if mode is not AOF. + // Structure is documented below. + AofConfig *AofConfigObservation `json:"aofConfig,omitempty" tf:"aof_config,omitempty"` + + // Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // RDB configuration. This field will be ignored if mode is not RDB. + // Structure is documented below. + RdbConfig *RdbConfigObservation `json:"rdbConfig,omitempty" tf:"rdb_config,omitempty"` +} + +type ClusterPersistenceConfigParameters struct { + + // AOF configuration. This field will be ignored if mode is not AOF. + // Structure is documented below. + // +kubebuilder:validation:Optional + AofConfig *AofConfigParameters `json:"aofConfig,omitempty" tf:"aof_config,omitempty"` + + // Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // RDB configuration. This field will be ignored if mode is not RDB. + // Structure is documented below. + // +kubebuilder:validation:Optional + RdbConfig *RdbConfigParameters `json:"rdbConfig,omitempty" tf:"rdb_config,omitempty"` +} + +type CrossClusterReplicationConfigInitParameters struct { + + // from SECONDARY to PRIMARY. + ClusterRole *string `json:"clusterRole,omitempty" tf:"cluster_role,omitempty"` + + // field. + PrimaryCluster *CrossClusterReplicationConfigPrimaryClusterInitParameters `json:"primaryCluster,omitempty" tf:"primary_cluster,omitempty"` + + // list with the new secondaries. The new secondaries are the current primary and other secondary clusters(if any). + SecondaryClusters []CrossClusterReplicationConfigSecondaryClustersInitParameters `json:"secondaryClusters,omitempty" tf:"secondary_clusters,omitempty"` +} + +type CrossClusterReplicationConfigObservation struct { + + // from SECONDARY to PRIMARY. + ClusterRole *string `json:"clusterRole,omitempty" tf:"cluster_role,omitempty"` + + // (Output) + // An output only view of all the member clusters participating in cross cluster replication. This field is populated for all the member clusters irrespective of their cluster role. + // Structure is documented below. + Membership []MembershipObservation `json:"membership,omitempty" tf:"membership,omitempty"` + + // field. + PrimaryCluster *CrossClusterReplicationConfigPrimaryClusterObservation `json:"primaryCluster,omitempty" tf:"primary_cluster,omitempty"` + + // list with the new secondaries. The new secondaries are the current primary and other secondary clusters(if any). + SecondaryClusters []CrossClusterReplicationConfigSecondaryClustersObservation `json:"secondaryClusters,omitempty" tf:"secondary_clusters,omitempty"` + + // (Output) + // The last time cross cluster replication config was updated. + UpdateTime *string `json:"updateTime,omitempty" tf:"update_time,omitempty"` +} + +type CrossClusterReplicationConfigParameters struct { + + // from SECONDARY to PRIMARY. + // +kubebuilder:validation:Optional + ClusterRole *string `json:"clusterRole,omitempty" tf:"cluster_role,omitempty"` + + // field. + // +kubebuilder:validation:Optional + PrimaryCluster *CrossClusterReplicationConfigPrimaryClusterParameters `json:"primaryCluster,omitempty" tf:"primary_cluster,omitempty"` + + // list with the new secondaries. The new secondaries are the current primary and other secondary clusters(if any). + // +kubebuilder:validation:Optional + SecondaryClusters []CrossClusterReplicationConfigSecondaryClustersParameters `json:"secondaryClusters,omitempty" tf:"secondary_clusters,omitempty"` +} + +type CrossClusterReplicationConfigPrimaryClusterInitParameters struct { + + // The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/redis/v1beta1.Cluster + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Cluster *string `json:"cluster,omitempty" tf:"cluster,omitempty"` + + // Reference to a Cluster in redis to populate cluster. + // +kubebuilder:validation:Optional + ClusterRef *v1.Reference `json:"clusterRef,omitempty" tf:"-"` + + // Selector for a Cluster in redis to populate cluster. + // +kubebuilder:validation:Optional + ClusterSelector *v1.Selector `json:"clusterSelector,omitempty" tf:"-"` +} + +type CrossClusterReplicationConfigPrimaryClusterObservation struct { + + // The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} + Cluster *string `json:"cluster,omitempty" tf:"cluster,omitempty"` + + // System assigned, unique identifier for the cluster. + UID *string `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type CrossClusterReplicationConfigPrimaryClusterParameters struct { + + // The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/redis/v1beta1.Cluster + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Cluster *string `json:"cluster,omitempty" tf:"cluster,omitempty"` + + // Reference to a Cluster in redis to populate cluster. + // +kubebuilder:validation:Optional + ClusterRef *v1.Reference `json:"clusterRef,omitempty" tf:"-"` + + // Selector for a Cluster in redis to populate cluster. + // +kubebuilder:validation:Optional + ClusterSelector *v1.Selector `json:"clusterSelector,omitempty" tf:"-"` +} + +type CrossClusterReplicationConfigSecondaryClustersInitParameters struct { + + // The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} + Cluster *string `json:"cluster,omitempty" tf:"cluster,omitempty"` +} + +type CrossClusterReplicationConfigSecondaryClustersObservation struct { + + // The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} + Cluster *string `json:"cluster,omitempty" tf:"cluster,omitempty"` + + // System assigned, unique identifier for the cluster. + UID *string `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type CrossClusterReplicationConfigSecondaryClustersParameters struct { + + // The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} + // +kubebuilder:validation:Optional + Cluster *string `json:"cluster,omitempty" tf:"cluster,omitempty"` +} + type DiscoveryEndpointsInitParameters struct { } @@ -218,6 +518,79 @@ type DiscoveryEndpointsObservation struct { type DiscoveryEndpointsParameters struct { } +type MaintenancePolicyWeeklyMaintenanceWindowInitParameters struct { + + // Required. The day of week that maintenance updates occur. + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // Required. Start time of the window in UTC time. + // Structure is documented below. + StartTime *WeeklyMaintenanceWindowStartTimeInitParameters `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type MaintenancePolicyWeeklyMaintenanceWindowObservation struct { + + // Required. The day of week that maintenance updates occur. + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // (Output) + // Output only. Duration of the maintenance window. + // The current window is fixed at 1 hour. + // A duration in seconds with up to nine fractional digits, + // terminated by 's'. Example: "3.5s". + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // Required. Start time of the window in UTC time. + // Structure is documented below. + StartTime *WeeklyMaintenanceWindowStartTimeObservation `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type MaintenancePolicyWeeklyMaintenanceWindowParameters struct { + + // Required. The day of week that maintenance updates occur. + // +kubebuilder:validation:Optional + Day *string `json:"day" tf:"day,omitempty"` + + // Required. Start time of the window in UTC time. + // Structure is documented below. + // +kubebuilder:validation:Optional + StartTime *WeeklyMaintenanceWindowStartTimeParameters `json:"startTime" tf:"start_time,omitempty"` +} + +type MembershipInitParameters struct { +} + +type MembershipObservation struct { + + // (Output) + // Details of the primary cluster that is used as the replication source for all the secondary clusters. + // Structure is documented below. + PrimaryCluster []PrimaryClusterObservation `json:"primaryCluster,omitempty" tf:"primary_cluster,omitempty"` + + // (Output) + // List of secondary clusters that are replicating from the primary cluster. + // Structure is documented below. + SecondaryClusters []SecondaryClustersObservation `json:"secondaryClusters,omitempty" tf:"secondary_clusters,omitempty"` +} + +type MembershipParameters struct { +} + +type PrimaryClusterInitParameters struct { +} + +type PrimaryClusterObservation struct { + + // The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} + Cluster *string `json:"cluster,omitempty" tf:"cluster,omitempty"` + + // System assigned, unique identifier for the cluster. + UID *string `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type PrimaryClusterParameters struct { +} + type PscConfigInitParameters struct { } @@ -299,12 +672,62 @@ type PscConnectionsObservation struct { type PscConnectionsParameters struct { } +type RdbConfigInitParameters struct { + + // Optional. Available snapshot periods for scheduling. + RdbSnapshotPeriod *string `json:"rdbSnapshotPeriod,omitempty" tf:"rdb_snapshot_period,omitempty"` + + // The time that the first snapshot was/will be attempted, and to which + // future snapshots will be aligned. + // If not provided, the current time will be used. + RdbSnapshotStartTime *string `json:"rdbSnapshotStartTime,omitempty" tf:"rdb_snapshot_start_time,omitempty"` +} + +type RdbConfigObservation struct { + + // Optional. Available snapshot periods for scheduling. + RdbSnapshotPeriod *string `json:"rdbSnapshotPeriod,omitempty" tf:"rdb_snapshot_period,omitempty"` + + // The time that the first snapshot was/will be attempted, and to which + // future snapshots will be aligned. + // If not provided, the current time will be used. + RdbSnapshotStartTime *string `json:"rdbSnapshotStartTime,omitempty" tf:"rdb_snapshot_start_time,omitempty"` +} + +type RdbConfigParameters struct { + + // Optional. Available snapshot periods for scheduling. + // +kubebuilder:validation:Optional + RdbSnapshotPeriod *string `json:"rdbSnapshotPeriod,omitempty" tf:"rdb_snapshot_period,omitempty"` + + // The time that the first snapshot was/will be attempted, and to which + // future snapshots will be aligned. + // If not provided, the current time will be used. + // +kubebuilder:validation:Optional + RdbSnapshotStartTime *string `json:"rdbSnapshotStartTime,omitempty" tf:"rdb_snapshot_start_time,omitempty"` +} + +type SecondaryClustersInitParameters struct { +} + +type SecondaryClustersObservation struct { + + // The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} + Cluster *string `json:"cluster,omitempty" tf:"cluster,omitempty"` + + // System assigned, unique identifier for the cluster. + UID *string `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type SecondaryClustersParameters struct { +} + type StateInfoInitParameters struct { } type StateInfoObservation struct { - // A nested object resource + // A nested object resource. // Structure is documented below. UpdateInfo *UpdateInfoObservation `json:"updateInfo,omitempty" tf:"update_info,omitempty"` } @@ -327,6 +750,61 @@ type UpdateInfoObservation struct { type UpdateInfoParameters struct { } +type WeeklyMaintenanceWindowStartTimeInitParameters struct { + + // Hours of day in 24 hour format. Should be from 0 to 23. + // An API may choose to allow the value "24:00:00" for scenarios like business closing time. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Minutes of hour of day. Must be from 0 to 59. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + Nanos *float64 `json:"nanos,omitempty" tf:"nanos,omitempty"` + + // Seconds of minutes of the time. Must normally be from 0 to 59. + // An API may allow the value 60 if it allows leap-seconds. + Seconds *float64 `json:"seconds,omitempty" tf:"seconds,omitempty"` +} + +type WeeklyMaintenanceWindowStartTimeObservation struct { + + // Hours of day in 24 hour format. Should be from 0 to 23. + // An API may choose to allow the value "24:00:00" for scenarios like business closing time. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Minutes of hour of day. Must be from 0 to 59. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + Nanos *float64 `json:"nanos,omitempty" tf:"nanos,omitempty"` + + // Seconds of minutes of the time. Must normally be from 0 to 59. + // An API may allow the value 60 if it allows leap-seconds. + Seconds *float64 `json:"seconds,omitempty" tf:"seconds,omitempty"` +} + +type WeeklyMaintenanceWindowStartTimeParameters struct { + + // Hours of day in 24 hour format. Should be from 0 to 23. + // An API may choose to allow the value "24:00:00" for scenarios like business closing time. + // +kubebuilder:validation:Optional + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Minutes of hour of day. Must be from 0 to 59. + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + // +kubebuilder:validation:Optional + Nanos *float64 `json:"nanos,omitempty" tf:"nanos,omitempty"` + + // Seconds of minutes of the time. Must normally be from 0 to 59. + // An API may allow the value 60 if it allows leap-seconds. + // +kubebuilder:validation:Optional + Seconds *float64 `json:"seconds,omitempty" tf:"seconds,omitempty"` +} + type ZoneDistributionConfigInitParameters struct { // Immutable. The mode for zone distribution for Memorystore Redis cluster. diff --git a/apis/redis/v1beta1/zz_generated.deepcopy.go b/apis/redis/v1beta1/zz_generated.deepcopy.go index fec5ad9f7..0e8eeedbf 100644 --- a/apis/redis/v1beta1/zz_generated.deepcopy.go +++ b/apis/redis/v1beta1/zz_generated.deepcopy.go @@ -13,6 +13,66 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AofConfigInitParameters) DeepCopyInto(out *AofConfigInitParameters) { + *out = *in + if in.AppendFsync != nil { + in, out := &in.AppendFsync, &out.AppendFsync + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AofConfigInitParameters. +func (in *AofConfigInitParameters) DeepCopy() *AofConfigInitParameters { + if in == nil { + return nil + } + out := new(AofConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AofConfigObservation) DeepCopyInto(out *AofConfigObservation) { + *out = *in + if in.AppendFsync != nil { + in, out := &in.AppendFsync, &out.AppendFsync + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AofConfigObservation. +func (in *AofConfigObservation) DeepCopy() *AofConfigObservation { + if in == nil { + return nil + } + out := new(AofConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AofConfigParameters) DeepCopyInto(out *AofConfigParameters) { + *out = *in + if in.AppendFsync != nil { + in, out := &in.AppendFsync, &out.AppendFsync + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AofConfigParameters. +func (in *AofConfigParameters) DeepCopy() *AofConfigParameters { + if in == nil { + return nil + } + out := new(AofConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in @@ -48,11 +108,31 @@ func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { *out = new(string) **out = **in } + if in.CrossClusterReplicationConfig != nil { + in, out := &in.CrossClusterReplicationConfig, &out.CrossClusterReplicationConfig + *out = new(CrossClusterReplicationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = new(ClusterMaintenancePolicyInitParameters) + (*in).DeepCopyInto(*out) + } if in.NodeType != nil { in, out := &in.NodeType, &out.NodeType *out = new(string) **out = **in } + if in.PersistenceConfig != nil { + in, out := &in.PersistenceConfig, &out.PersistenceConfig + *out = new(ClusterPersistenceConfigInitParameters) + (*in).DeepCopyInto(*out) + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -145,6 +225,142 @@ func (in *ClusterList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMaintenancePolicyInitParameters) DeepCopyInto(out *ClusterMaintenancePolicyInitParameters) { + *out = *in + if in.WeeklyMaintenanceWindow != nil { + in, out := &in.WeeklyMaintenanceWindow, &out.WeeklyMaintenanceWindow + *out = make([]MaintenancePolicyWeeklyMaintenanceWindowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMaintenancePolicyInitParameters. +func (in *ClusterMaintenancePolicyInitParameters) DeepCopy() *ClusterMaintenancePolicyInitParameters { + if in == nil { + return nil + } + out := new(ClusterMaintenancePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMaintenancePolicyObservation) DeepCopyInto(out *ClusterMaintenancePolicyObservation) { + *out = *in + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = new(string) + **out = **in + } + if in.UpdateTime != nil { + in, out := &in.UpdateTime, &out.UpdateTime + *out = new(string) + **out = **in + } + if in.WeeklyMaintenanceWindow != nil { + in, out := &in.WeeklyMaintenanceWindow, &out.WeeklyMaintenanceWindow + *out = make([]MaintenancePolicyWeeklyMaintenanceWindowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMaintenancePolicyObservation. +func (in *ClusterMaintenancePolicyObservation) DeepCopy() *ClusterMaintenancePolicyObservation { + if in == nil { + return nil + } + out := new(ClusterMaintenancePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMaintenancePolicyParameters) DeepCopyInto(out *ClusterMaintenancePolicyParameters) { + *out = *in + if in.WeeklyMaintenanceWindow != nil { + in, out := &in.WeeklyMaintenanceWindow, &out.WeeklyMaintenanceWindow + *out = make([]MaintenancePolicyWeeklyMaintenanceWindowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMaintenancePolicyParameters. +func (in *ClusterMaintenancePolicyParameters) DeepCopy() *ClusterMaintenancePolicyParameters { + if in == nil { + return nil + } + out := new(ClusterMaintenancePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMaintenanceScheduleInitParameters) DeepCopyInto(out *ClusterMaintenanceScheduleInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMaintenanceScheduleInitParameters. +func (in *ClusterMaintenanceScheduleInitParameters) DeepCopy() *ClusterMaintenanceScheduleInitParameters { + if in == nil { + return nil + } + out := new(ClusterMaintenanceScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMaintenanceScheduleObservation) DeepCopyInto(out *ClusterMaintenanceScheduleObservation) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.ScheduleDeadlineTime != nil { + in, out := &in.ScheduleDeadlineTime, &out.ScheduleDeadlineTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMaintenanceScheduleObservation. +func (in *ClusterMaintenanceScheduleObservation) DeepCopy() *ClusterMaintenanceScheduleObservation { + if in == nil { + return nil + } + out := new(ClusterMaintenanceScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMaintenanceScheduleParameters) DeepCopyInto(out *ClusterMaintenanceScheduleParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMaintenanceScheduleParameters. +func (in *ClusterMaintenanceScheduleParameters) DeepCopy() *ClusterMaintenanceScheduleParameters { + if in == nil { + return nil + } + out := new(ClusterMaintenanceScheduleParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = *in @@ -158,6 +374,16 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = new(string) **out = **in } + if in.CrossClusterReplicationConfig != nil { + in, out := &in.CrossClusterReplicationConfig, &out.CrossClusterReplicationConfig + *out = new(CrossClusterReplicationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } if in.DiscoveryEndpoints != nil { in, out := &in.DiscoveryEndpoints, &out.DiscoveryEndpoints *out = make([]DiscoveryEndpointsObservation, len(*in)) @@ -170,11 +396,28 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { *out = new(string) **out = **in } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = new(ClusterMaintenancePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceSchedule != nil { + in, out := &in.MaintenanceSchedule, &out.MaintenanceSchedule + *out = make([]ClusterMaintenanceScheduleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.NodeType != nil { in, out := &in.NodeType, &out.NodeType *out = new(string) **out = **in } + if in.PersistenceConfig != nil { + in, out := &in.PersistenceConfig, &out.PersistenceConfig + *out = new(ClusterPersistenceConfigObservation) + (*in).DeepCopyInto(*out) + } if in.PreciseSizeGb != nil { in, out := &in.PreciseSizeGb, &out.PreciseSizeGb *out = new(float64) @@ -247,142 +490,510 @@ func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.TransitEncryptionMode != nil { - in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(string) + **out = **in + } + if in.ZoneDistributionConfig != nil { + in, out := &in.ZoneDistributionConfig, &out.ZoneDistributionConfig + *out = new(ZoneDistributionConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.AuthorizationMode != nil { + in, out := &in.AuthorizationMode, &out.AuthorizationMode + *out = new(string) + **out = **in + } + if in.CrossClusterReplicationConfig != nil { + in, out := &in.CrossClusterReplicationConfig, &out.CrossClusterReplicationConfig + *out = new(CrossClusterReplicationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = new(ClusterMaintenancePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.PersistenceConfig != nil { + in, out := &in.PersistenceConfig, &out.PersistenceConfig + *out = new(ClusterPersistenceConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.PscConfigs != nil { + in, out := &in.PscConfigs, &out.PscConfigs + *out = make([]PscConfigsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RedisConfigs != nil { + in, out := &in.RedisConfigs, &out.RedisConfigs + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReplicaCount != nil { + in, out := &in.ReplicaCount, &out.ReplicaCount + *out = new(float64) + **out = **in + } + if in.ShardCount != nil { + in, out := &in.ShardCount, &out.ShardCount + *out = new(float64) + **out = **in + } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } + if in.ZoneDistributionConfig != nil { + in, out := &in.ZoneDistributionConfig, &out.ZoneDistributionConfig + *out = new(ZoneDistributionConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPersistenceConfigInitParameters) DeepCopyInto(out *ClusterPersistenceConfigInitParameters) { + *out = *in + if in.AofConfig != nil { + in, out := &in.AofConfig, &out.AofConfig + *out = new(AofConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.RdbConfig != nil { + in, out := &in.RdbConfig, &out.RdbConfig + *out = new(RdbConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPersistenceConfigInitParameters. +func (in *ClusterPersistenceConfigInitParameters) DeepCopy() *ClusterPersistenceConfigInitParameters { + if in == nil { + return nil + } + out := new(ClusterPersistenceConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPersistenceConfigObservation) DeepCopyInto(out *ClusterPersistenceConfigObservation) { + *out = *in + if in.AofConfig != nil { + in, out := &in.AofConfig, &out.AofConfig + *out = new(AofConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.RdbConfig != nil { + in, out := &in.RdbConfig, &out.RdbConfig + *out = new(RdbConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPersistenceConfigObservation. +func (in *ClusterPersistenceConfigObservation) DeepCopy() *ClusterPersistenceConfigObservation { + if in == nil { + return nil + } + out := new(ClusterPersistenceConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPersistenceConfigParameters) DeepCopyInto(out *ClusterPersistenceConfigParameters) { + *out = *in + if in.AofConfig != nil { + in, out := &in.AofConfig, &out.AofConfig + *out = new(AofConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.RdbConfig != nil { + in, out := &in.RdbConfig, &out.RdbConfig + *out = new(RdbConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPersistenceConfigParameters. +func (in *ClusterPersistenceConfigParameters) DeepCopy() *ClusterPersistenceConfigParameters { + if in == nil { + return nil + } + out := new(ClusterPersistenceConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossClusterReplicationConfigInitParameters) DeepCopyInto(out *CrossClusterReplicationConfigInitParameters) { + *out = *in + if in.ClusterRole != nil { + in, out := &in.ClusterRole, &out.ClusterRole + *out = new(string) + **out = **in + } + if in.PrimaryCluster != nil { + in, out := &in.PrimaryCluster, &out.PrimaryCluster + *out = new(CrossClusterReplicationConfigPrimaryClusterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecondaryClusters != nil { + in, out := &in.SecondaryClusters, &out.SecondaryClusters + *out = make([]CrossClusterReplicationConfigSecondaryClustersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossClusterReplicationConfigInitParameters. +func (in *CrossClusterReplicationConfigInitParameters) DeepCopy() *CrossClusterReplicationConfigInitParameters { + if in == nil { + return nil + } + out := new(CrossClusterReplicationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossClusterReplicationConfigObservation) DeepCopyInto(out *CrossClusterReplicationConfigObservation) { + *out = *in + if in.ClusterRole != nil { + in, out := &in.ClusterRole, &out.ClusterRole + *out = new(string) + **out = **in + } + if in.Membership != nil { + in, out := &in.Membership, &out.Membership + *out = make([]MembershipObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrimaryCluster != nil { + in, out := &in.PrimaryCluster, &out.PrimaryCluster + *out = new(CrossClusterReplicationConfigPrimaryClusterObservation) + (*in).DeepCopyInto(*out) + } + if in.SecondaryClusters != nil { + in, out := &in.SecondaryClusters, &out.SecondaryClusters + *out = make([]CrossClusterReplicationConfigSecondaryClustersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UpdateTime != nil { + in, out := &in.UpdateTime, &out.UpdateTime *out = new(string) **out = **in } - if in.UID != nil { - in, out := &in.UID, &out.UID +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossClusterReplicationConfigObservation. +func (in *CrossClusterReplicationConfigObservation) DeepCopy() *CrossClusterReplicationConfigObservation { + if in == nil { + return nil + } + out := new(CrossClusterReplicationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossClusterReplicationConfigParameters) DeepCopyInto(out *CrossClusterReplicationConfigParameters) { + *out = *in + if in.ClusterRole != nil { + in, out := &in.ClusterRole, &out.ClusterRole *out = new(string) **out = **in } - if in.ZoneDistributionConfig != nil { - in, out := &in.ZoneDistributionConfig, &out.ZoneDistributionConfig - *out = new(ZoneDistributionConfigObservation) + if in.PrimaryCluster != nil { + in, out := &in.PrimaryCluster, &out.PrimaryCluster + *out = new(CrossClusterReplicationConfigPrimaryClusterParameters) (*in).DeepCopyInto(*out) } + if in.SecondaryClusters != nil { + in, out := &in.SecondaryClusters, &out.SecondaryClusters + *out = make([]CrossClusterReplicationConfigSecondaryClustersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. -func (in *ClusterObservation) DeepCopy() *ClusterObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossClusterReplicationConfigParameters. +func (in *CrossClusterReplicationConfigParameters) DeepCopy() *CrossClusterReplicationConfigParameters { if in == nil { return nil } - out := new(ClusterObservation) + out := new(CrossClusterReplicationConfigParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { +func (in *CrossClusterReplicationConfigPrimaryClusterInitParameters) DeepCopyInto(out *CrossClusterReplicationConfigPrimaryClusterInitParameters) { *out = *in - if in.AuthorizationMode != nil { - in, out := &in.AuthorizationMode, &out.AuthorizationMode + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster *out = new(string) **out = **in } - if in.NodeType != nil { - in, out := &in.NodeType, &out.NodeType + if in.ClusterRef != nil { + in, out := &in.ClusterRef, &out.ClusterRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterSelector != nil { + in, out := &in.ClusterSelector, &out.ClusterSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossClusterReplicationConfigPrimaryClusterInitParameters. +func (in *CrossClusterReplicationConfigPrimaryClusterInitParameters) DeepCopy() *CrossClusterReplicationConfigPrimaryClusterInitParameters { + if in == nil { + return nil + } + out := new(CrossClusterReplicationConfigPrimaryClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossClusterReplicationConfigPrimaryClusterObservation) DeepCopyInto(out *CrossClusterReplicationConfigPrimaryClusterObservation) { + *out = *in + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster *out = new(string) **out = **in } - if in.Project != nil { - in, out := &in.Project, &out.Project + if in.UID != nil { + in, out := &in.UID, &out.UID *out = new(string) **out = **in } - if in.PscConfigs != nil { - in, out := &in.PscConfigs, &out.PscConfigs - *out = make([]PscConfigsParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RedisConfigs != nil { - in, out := &in.RedisConfigs, &out.RedisConfigs - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - inVal := (*in)[key] - in, out := &inVal, &outVal - *out = new(string) - **out = **in - } - (*out)[key] = outVal - } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossClusterReplicationConfigPrimaryClusterObservation. +func (in *CrossClusterReplicationConfigPrimaryClusterObservation) DeepCopy() *CrossClusterReplicationConfigPrimaryClusterObservation { + if in == nil { + return nil } - if in.Region != nil { - in, out := &in.Region, &out.Region + out := new(CrossClusterReplicationConfigPrimaryClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossClusterReplicationConfigPrimaryClusterParameters) DeepCopyInto(out *CrossClusterReplicationConfigPrimaryClusterParameters) { + *out = *in + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster *out = new(string) **out = **in } - if in.ReplicaCount != nil { - in, out := &in.ReplicaCount, &out.ReplicaCount - *out = new(float64) - **out = **in + if in.ClusterRef != nil { + in, out := &in.ClusterRef, &out.ClusterRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) } - if in.ShardCount != nil { - in, out := &in.ShardCount, &out.ShardCount - *out = new(float64) - **out = **in + if in.ClusterSelector != nil { + in, out := &in.ClusterSelector, &out.ClusterSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) } - if in.TransitEncryptionMode != nil { - in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossClusterReplicationConfigPrimaryClusterParameters. +func (in *CrossClusterReplicationConfigPrimaryClusterParameters) DeepCopy() *CrossClusterReplicationConfigPrimaryClusterParameters { + if in == nil { + return nil + } + out := new(CrossClusterReplicationConfigPrimaryClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossClusterReplicationConfigSecondaryClustersInitParameters) DeepCopyInto(out *CrossClusterReplicationConfigSecondaryClustersInitParameters) { + *out = *in + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster *out = new(string) **out = **in } - if in.ZoneDistributionConfig != nil { - in, out := &in.ZoneDistributionConfig, &out.ZoneDistributionConfig - *out = new(ZoneDistributionConfigParameters) - (*in).DeepCopyInto(*out) - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. -func (in *ClusterParameters) DeepCopy() *ClusterParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossClusterReplicationConfigSecondaryClustersInitParameters. +func (in *CrossClusterReplicationConfigSecondaryClustersInitParameters) DeepCopy() *CrossClusterReplicationConfigSecondaryClustersInitParameters { if in == nil { return nil } - out := new(ClusterParameters) + out := new(CrossClusterReplicationConfigSecondaryClustersInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { +func (in *CrossClusterReplicationConfigSecondaryClustersObservation) DeepCopyInto(out *CrossClusterReplicationConfigSecondaryClustersObservation) { *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) - in.InitProvider.DeepCopyInto(&out.InitProvider) + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(string) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. -func (in *ClusterSpec) DeepCopy() *ClusterSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossClusterReplicationConfigSecondaryClustersObservation. +func (in *CrossClusterReplicationConfigSecondaryClustersObservation) DeepCopy() *CrossClusterReplicationConfigSecondaryClustersObservation { if in == nil { return nil } - out := new(ClusterSpec) + out := new(CrossClusterReplicationConfigSecondaryClustersObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { +func (in *CrossClusterReplicationConfigSecondaryClustersParameters) DeepCopyInto(out *CrossClusterReplicationConfigSecondaryClustersParameters) { *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - in.AtProvider.DeepCopyInto(&out.AtProvider) + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. -func (in *ClusterStatus) DeepCopy() *ClusterStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossClusterReplicationConfigSecondaryClustersParameters. +func (in *CrossClusterReplicationConfigSecondaryClustersParameters) DeepCopy() *CrossClusterReplicationConfigSecondaryClustersParameters { if in == nil { return nil } - out := new(ClusterStatus) + out := new(CrossClusterReplicationConfigSecondaryClustersParameters) in.DeepCopyInto(out) return out } @@ -1213,6 +1824,86 @@ func (in *MaintenancePolicyParameters) DeepCopy() *MaintenancePolicyParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenancePolicyWeeklyMaintenanceWindowInitParameters) DeepCopyInto(out *MaintenancePolicyWeeklyMaintenanceWindowInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(WeeklyMaintenanceWindowStartTimeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenancePolicyWeeklyMaintenanceWindowInitParameters. +func (in *MaintenancePolicyWeeklyMaintenanceWindowInitParameters) DeepCopy() *MaintenancePolicyWeeklyMaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(MaintenancePolicyWeeklyMaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenancePolicyWeeklyMaintenanceWindowObservation) DeepCopyInto(out *MaintenancePolicyWeeklyMaintenanceWindowObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(WeeklyMaintenanceWindowStartTimeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenancePolicyWeeklyMaintenanceWindowObservation. +func (in *MaintenancePolicyWeeklyMaintenanceWindowObservation) DeepCopy() *MaintenancePolicyWeeklyMaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(MaintenancePolicyWeeklyMaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenancePolicyWeeklyMaintenanceWindowParameters) DeepCopyInto(out *MaintenancePolicyWeeklyMaintenanceWindowParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(WeeklyMaintenanceWindowStartTimeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenancePolicyWeeklyMaintenanceWindowParameters. +func (in *MaintenancePolicyWeeklyMaintenanceWindowParameters) DeepCopy() *MaintenancePolicyWeeklyMaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(MaintenancePolicyWeeklyMaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MaintenanceScheduleInitParameters) DeepCopyInto(out *MaintenanceScheduleInitParameters) { *out = *in @@ -1241,34 +1932,93 @@ func (in *MaintenanceScheduleObservation) DeepCopyInto(out *MaintenanceScheduleO *out = new(string) **out = **in } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = new(string) - **out = **in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceScheduleObservation. +func (in *MaintenanceScheduleObservation) DeepCopy() *MaintenanceScheduleObservation { + if in == nil { + return nil + } + out := new(MaintenanceScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceScheduleParameters) DeepCopyInto(out *MaintenanceScheduleParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceScheduleParameters. +func (in *MaintenanceScheduleParameters) DeepCopy() *MaintenanceScheduleParameters { + if in == nil { + return nil + } + out := new(MaintenanceScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MembershipInitParameters) DeepCopyInto(out *MembershipInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MembershipInitParameters. +func (in *MembershipInitParameters) DeepCopy() *MembershipInitParameters { + if in == nil { + return nil + } + out := new(MembershipInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MembershipObservation) DeepCopyInto(out *MembershipObservation) { + *out = *in + if in.PrimaryCluster != nil { + in, out := &in.PrimaryCluster, &out.PrimaryCluster + *out = make([]PrimaryClusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryClusters != nil { + in, out := &in.SecondaryClusters, &out.SecondaryClusters + *out = make([]SecondaryClustersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceScheduleObservation. -func (in *MaintenanceScheduleObservation) DeepCopy() *MaintenanceScheduleObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MembershipObservation. +func (in *MembershipObservation) DeepCopy() *MembershipObservation { if in == nil { return nil } - out := new(MaintenanceScheduleObservation) + out := new(MembershipObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MaintenanceScheduleParameters) DeepCopyInto(out *MaintenanceScheduleParameters) { +func (in *MembershipParameters) DeepCopyInto(out *MembershipParameters) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceScheduleParameters. -func (in *MaintenanceScheduleParameters) DeepCopy() *MaintenanceScheduleParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MembershipParameters. +func (in *MembershipParameters) DeepCopy() *MembershipParameters { if in == nil { return nil } - out := new(MaintenanceScheduleParameters) + out := new(MembershipParameters) in.DeepCopyInto(out) return out } @@ -1423,6 +2173,61 @@ func (in *PersistenceConfigParameters) DeepCopy() *PersistenceConfigParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryClusterInitParameters) DeepCopyInto(out *PrimaryClusterInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryClusterInitParameters. +func (in *PrimaryClusterInitParameters) DeepCopy() *PrimaryClusterInitParameters { + if in == nil { + return nil + } + out := new(PrimaryClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryClusterObservation) DeepCopyInto(out *PrimaryClusterObservation) { + *out = *in + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(string) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryClusterObservation. +func (in *PrimaryClusterObservation) DeepCopy() *PrimaryClusterObservation { + if in == nil { + return nil + } + out := new(PrimaryClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryClusterParameters) DeepCopyInto(out *PrimaryClusterParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryClusterParameters. +func (in *PrimaryClusterParameters) DeepCopy() *PrimaryClusterParameters { + if in == nil { + return nil + } + out := new(PrimaryClusterParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PscConfigInitParameters) DeepCopyInto(out *PscConfigInitParameters) { *out = *in @@ -1623,6 +2428,136 @@ func (in *PscConnectionsParameters) DeepCopy() *PscConnectionsParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RdbConfigInitParameters) DeepCopyInto(out *RdbConfigInitParameters) { + *out = *in + if in.RdbSnapshotPeriod != nil { + in, out := &in.RdbSnapshotPeriod, &out.RdbSnapshotPeriod + *out = new(string) + **out = **in + } + if in.RdbSnapshotStartTime != nil { + in, out := &in.RdbSnapshotStartTime, &out.RdbSnapshotStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RdbConfigInitParameters. +func (in *RdbConfigInitParameters) DeepCopy() *RdbConfigInitParameters { + if in == nil { + return nil + } + out := new(RdbConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RdbConfigObservation) DeepCopyInto(out *RdbConfigObservation) { + *out = *in + if in.RdbSnapshotPeriod != nil { + in, out := &in.RdbSnapshotPeriod, &out.RdbSnapshotPeriod + *out = new(string) + **out = **in + } + if in.RdbSnapshotStartTime != nil { + in, out := &in.RdbSnapshotStartTime, &out.RdbSnapshotStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RdbConfigObservation. +func (in *RdbConfigObservation) DeepCopy() *RdbConfigObservation { + if in == nil { + return nil + } + out := new(RdbConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RdbConfigParameters) DeepCopyInto(out *RdbConfigParameters) { + *out = *in + if in.RdbSnapshotPeriod != nil { + in, out := &in.RdbSnapshotPeriod, &out.RdbSnapshotPeriod + *out = new(string) + **out = **in + } + if in.RdbSnapshotStartTime != nil { + in, out := &in.RdbSnapshotStartTime, &out.RdbSnapshotStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RdbConfigParameters. +func (in *RdbConfigParameters) DeepCopy() *RdbConfigParameters { + if in == nil { + return nil + } + out := new(RdbConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryClustersInitParameters) DeepCopyInto(out *SecondaryClustersInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryClustersInitParameters. +func (in *SecondaryClustersInitParameters) DeepCopy() *SecondaryClustersInitParameters { + if in == nil { + return nil + } + out := new(SecondaryClustersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryClustersObservation) DeepCopyInto(out *SecondaryClustersObservation) { + *out = *in + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(string) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryClustersObservation. +func (in *SecondaryClustersObservation) DeepCopy() *SecondaryClustersObservation { + if in == nil { + return nil + } + out := new(SecondaryClustersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryClustersParameters) DeepCopyInto(out *SecondaryClustersParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryClustersParameters. +func (in *SecondaryClustersParameters) DeepCopy() *SecondaryClustersParameters { + if in == nil { + return nil + } + out := new(SecondaryClustersParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServerCACertsInitParameters) DeepCopyInto(out *ServerCACertsInitParameters) { *out = *in @@ -1989,6 +2924,111 @@ func (in *WeeklyMaintenanceWindowParameters) DeepCopy() *WeeklyMaintenanceWindow return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyMaintenanceWindowStartTimeInitParameters) DeepCopyInto(out *WeeklyMaintenanceWindowStartTimeInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } + if in.Nanos != nil { + in, out := &in.Nanos, &out.Nanos + *out = new(float64) + **out = **in + } + if in.Seconds != nil { + in, out := &in.Seconds, &out.Seconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyMaintenanceWindowStartTimeInitParameters. +func (in *WeeklyMaintenanceWindowStartTimeInitParameters) DeepCopy() *WeeklyMaintenanceWindowStartTimeInitParameters { + if in == nil { + return nil + } + out := new(WeeklyMaintenanceWindowStartTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyMaintenanceWindowStartTimeObservation) DeepCopyInto(out *WeeklyMaintenanceWindowStartTimeObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } + if in.Nanos != nil { + in, out := &in.Nanos, &out.Nanos + *out = new(float64) + **out = **in + } + if in.Seconds != nil { + in, out := &in.Seconds, &out.Seconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyMaintenanceWindowStartTimeObservation. +func (in *WeeklyMaintenanceWindowStartTimeObservation) DeepCopy() *WeeklyMaintenanceWindowStartTimeObservation { + if in == nil { + return nil + } + out := new(WeeklyMaintenanceWindowStartTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyMaintenanceWindowStartTimeParameters) DeepCopyInto(out *WeeklyMaintenanceWindowStartTimeParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } + if in.Nanos != nil { + in, out := &in.Nanos, &out.Nanos + *out = new(float64) + **out = **in + } + if in.Seconds != nil { + in, out := &in.Seconds, &out.Seconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyMaintenanceWindowStartTimeParameters. +func (in *WeeklyMaintenanceWindowStartTimeParameters) DeepCopy() *WeeklyMaintenanceWindowStartTimeParameters { + if in == nil { + return nil + } + out := new(WeeklyMaintenanceWindowStartTimeParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZoneDistributionConfigInitParameters) DeepCopyInto(out *ZoneDistributionConfigInitParameters) { *out = *in diff --git a/apis/redis/v1beta1/zz_generated.resolvers.go b/apis/redis/v1beta1/zz_generated.resolvers.go index 580b88472..0dcd0838e 100644 --- a/apis/redis/v1beta1/zz_generated.resolvers.go +++ b/apis/redis/v1beta1/zz_generated.resolvers.go @@ -27,6 +27,29 @@ func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error + if mg.Spec.ForProvider.CrossClusterReplicationConfig != nil { + if mg.Spec.ForProvider.CrossClusterReplicationConfig.PrimaryCluster != nil { + { + m, l, err = apisresolver.GetManagedResource("redis.gcp.upbound.io", "v1beta1", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CrossClusterReplicationConfig.PrimaryCluster.Cluster), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CrossClusterReplicationConfig.PrimaryCluster.ClusterRef, + Selector: mg.Spec.ForProvider.CrossClusterReplicationConfig.PrimaryCluster.ClusterSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CrossClusterReplicationConfig.PrimaryCluster.Cluster") + } + mg.Spec.ForProvider.CrossClusterReplicationConfig.PrimaryCluster.Cluster = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CrossClusterReplicationConfig.PrimaryCluster.ClusterRef = rsp.ResolvedReference + + } + } for i3 := 0; i3 < len(mg.Spec.ForProvider.PscConfigs); i3++ { { m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta1", "Network", "NetworkList") @@ -48,6 +71,29 @@ func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.PscConfigs[i3].NetworkRef = rsp.ResolvedReference } + if mg.Spec.InitProvider.CrossClusterReplicationConfig != nil { + if mg.Spec.InitProvider.CrossClusterReplicationConfig.PrimaryCluster != nil { + { + m, l, err = apisresolver.GetManagedResource("redis.gcp.upbound.io", "v1beta1", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CrossClusterReplicationConfig.PrimaryCluster.Cluster), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.CrossClusterReplicationConfig.PrimaryCluster.ClusterRef, + Selector: mg.Spec.InitProvider.CrossClusterReplicationConfig.PrimaryCluster.ClusterSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CrossClusterReplicationConfig.PrimaryCluster.Cluster") + } + mg.Spec.InitProvider.CrossClusterReplicationConfig.PrimaryCluster.Cluster = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CrossClusterReplicationConfig.PrimaryCluster.ClusterRef = rsp.ResolvedReference + + } + } for i3 := 0; i3 < len(mg.Spec.InitProvider.PscConfigs); i3++ { { m, l, err = apisresolver.GetManagedResource("compute.gcp.upbound.io", "v1beta1", "Network", "NetworkList") diff --git a/apis/sourcerepo/v1beta1/zz_generated.deepcopy.go b/apis/sourcerepo/v1beta1/zz_generated.deepcopy.go index 14cbe8d80..4d41adc7f 100644 --- a/apis/sourcerepo/v1beta1/zz_generated.deepcopy.go +++ b/apis/sourcerepo/v1beta1/zz_generated.deepcopy.go @@ -513,6 +513,11 @@ func (in *RepositoryIAMMemberStatus) DeepCopy() *RepositoryIAMMemberStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RepositoryInitParameters) DeepCopyInto(out *RepositoryInitParameters) { *out = *in + if in.CreateIgnoreAlreadyExists != nil { + in, out := &in.CreateIgnoreAlreadyExists, &out.CreateIgnoreAlreadyExists + *out = new(bool) + **out = **in + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) @@ -572,6 +577,11 @@ func (in *RepositoryList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RepositoryObservation) DeepCopyInto(out *RepositoryObservation) { *out = *in + if in.CreateIgnoreAlreadyExists != nil { + in, out := &in.CreateIgnoreAlreadyExists, &out.CreateIgnoreAlreadyExists + *out = new(bool) + **out = **in + } if in.ID != nil { in, out := &in.ID, &out.ID *out = new(string) @@ -614,6 +624,11 @@ func (in *RepositoryObservation) DeepCopy() *RepositoryObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RepositoryParameters) DeepCopyInto(out *RepositoryParameters) { *out = *in + if in.CreateIgnoreAlreadyExists != nil { + in, out := &in.CreateIgnoreAlreadyExists, &out.CreateIgnoreAlreadyExists + *out = new(bool) + **out = **in + } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) diff --git a/apis/sourcerepo/v1beta1/zz_repository_types.go b/apis/sourcerepo/v1beta1/zz_repository_types.go index f46b08cca..42b895ede 100755 --- a/apis/sourcerepo/v1beta1/zz_repository_types.go +++ b/apis/sourcerepo/v1beta1/zz_repository_types.go @@ -103,6 +103,9 @@ type PubsubConfigsParameters struct { type RepositoryInitParameters struct { + // If set to true, skip repository creation if a repository with the same name already exists. + CreateIgnoreAlreadyExists *bool `json:"createIgnoreAlreadyExists,omitempty" tf:"create_ignore_already_exists,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `json:"project,omitempty" tf:"project,omitempty"` @@ -115,6 +118,9 @@ type RepositoryInitParameters struct { type RepositoryObservation struct { + // If set to true, skip repository creation if a repository with the same name already exists. + CreateIgnoreAlreadyExists *bool `json:"createIgnoreAlreadyExists,omitempty" tf:"create_ignore_already_exists,omitempty"` + // an identifier for the resource with format projects/{{project}}/repos/{{name}} ID *string `json:"id,omitempty" tf:"id,omitempty"` @@ -136,6 +142,10 @@ type RepositoryObservation struct { type RepositoryParameters struct { + // If set to true, skip repository creation if a repository with the same name already exists. + // +kubebuilder:validation:Optional + CreateIgnoreAlreadyExists *bool `json:"createIgnoreAlreadyExists,omitempty" tf:"create_ignore_already_exists,omitempty"` + // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. // +kubebuilder:validation:Optional diff --git a/apis/spanner/v1beta2/zz_database_types.go b/apis/spanner/v1beta2/zz_database_types.go index b5ee77815..5b219b973 100755 --- a/apis/spanner/v1beta2/zz_database_types.go +++ b/apis/spanner/v1beta2/zz_database_types.go @@ -26,10 +26,6 @@ type DatabaseInitParameters struct { // error in any statement, the database is not created. Ddl []*string `json:"ddl,omitempty" tf:"ddl,omitempty"` - // Defaults to true. - // When the field is set to false, deleting the database is allowed. - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // Whether drop protection is enabled for this database. Defaults to false. // whereas setting “enableDropProtection” to true protects the database from deletions in all interfaces. // (2) Setting "enableDropProtection" to true also prevents the deletion of the parent instance containing the database. @@ -115,11 +111,6 @@ type DatabaseParameters struct { // +kubebuilder:validation:Optional Ddl []*string `json:"ddl,omitempty" tf:"ddl,omitempty"` - // Defaults to true. - // When the field is set to false, deleting the database is allowed. - // +kubebuilder:validation:Optional - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // Whether drop protection is enabled for this database. Defaults to false. // whereas setting “enableDropProtection” to true protects the database from deletions in all interfaces. // (2) Setting "enableDropProtection" to true also prevents the deletion of the parent instance containing the database. @@ -164,6 +155,10 @@ type EncryptionConfigInitParameters struct { // Fully qualified name of the KMS key to use to encrypt this database. This key must exist // in the same location as the Spanner Database. KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` + + // Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist + // in the same locations as the Spanner Database. + KMSKeyNames []*string `json:"kmsKeyNames,omitempty" tf:"kms_key_names,omitempty"` } type EncryptionConfigObservation struct { @@ -171,6 +166,10 @@ type EncryptionConfigObservation struct { // Fully qualified name of the KMS key to use to encrypt this database. This key must exist // in the same location as the Spanner Database. KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` + + // Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist + // in the same locations as the Spanner Database. + KMSKeyNames []*string `json:"kmsKeyNames,omitempty" tf:"kms_key_names,omitempty"` } type EncryptionConfigParameters struct { @@ -178,7 +177,12 @@ type EncryptionConfigParameters struct { // Fully qualified name of the KMS key to use to encrypt this database. This key must exist // in the same location as the Spanner Database. // +kubebuilder:validation:Optional - KMSKeyName *string `json:"kmsKeyName" tf:"kms_key_name,omitempty"` + KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"` + + // Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist + // in the same locations as the Spanner Database. + // +kubebuilder:validation:Optional + KMSKeyNames []*string `json:"kmsKeyNames,omitempty" tf:"kms_key_names,omitempty"` } // DatabaseSpec defines the desired state of Database diff --git a/apis/spanner/v1beta2/zz_generated.deepcopy.go b/apis/spanner/v1beta2/zz_generated.deepcopy.go index decdb1b40..4be0d52e0 100644 --- a/apis/spanner/v1beta2/zz_generated.deepcopy.go +++ b/apis/spanner/v1beta2/zz_generated.deepcopy.go @@ -13,12 +13,199 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricAutoscalingOptionsInitParameters) DeepCopyInto(out *AsymmetricAutoscalingOptionsInitParameters) { + *out = *in + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = new(OverridesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReplicaSelection != nil { + in, out := &in.ReplicaSelection, &out.ReplicaSelection + *out = new(ReplicaSelectionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricAutoscalingOptionsInitParameters. +func (in *AsymmetricAutoscalingOptionsInitParameters) DeepCopy() *AsymmetricAutoscalingOptionsInitParameters { + if in == nil { + return nil + } + out := new(AsymmetricAutoscalingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricAutoscalingOptionsObservation) DeepCopyInto(out *AsymmetricAutoscalingOptionsObservation) { + *out = *in + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = new(OverridesObservation) + (*in).DeepCopyInto(*out) + } + if in.ReplicaSelection != nil { + in, out := &in.ReplicaSelection, &out.ReplicaSelection + *out = new(ReplicaSelectionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricAutoscalingOptionsObservation. +func (in *AsymmetricAutoscalingOptionsObservation) DeepCopy() *AsymmetricAutoscalingOptionsObservation { + if in == nil { + return nil + } + out := new(AsymmetricAutoscalingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricAutoscalingOptionsParameters) DeepCopyInto(out *AsymmetricAutoscalingOptionsParameters) { + *out = *in + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = new(OverridesParameters) + (*in).DeepCopyInto(*out) + } + if in.ReplicaSelection != nil { + in, out := &in.ReplicaSelection, &out.ReplicaSelection + *out = new(ReplicaSelectionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricAutoscalingOptionsParameters. +func (in *AsymmetricAutoscalingOptionsParameters) DeepCopy() *AsymmetricAutoscalingOptionsParameters { + if in == nil { + return nil + } + out := new(AsymmetricAutoscalingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingConfigAutoscalingLimitsInitParameters) DeepCopyInto(out *AutoscalingConfigAutoscalingLimitsInitParameters) { + *out = *in + if in.MaxNodes != nil { + in, out := &in.MaxNodes, &out.MaxNodes + *out = new(float64) + **out = **in + } + if in.MaxProcessingUnits != nil { + in, out := &in.MaxProcessingUnits, &out.MaxProcessingUnits + *out = new(float64) + **out = **in + } + if in.MinNodes != nil { + in, out := &in.MinNodes, &out.MinNodes + *out = new(float64) + **out = **in + } + if in.MinProcessingUnits != nil { + in, out := &in.MinProcessingUnits, &out.MinProcessingUnits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingConfigAutoscalingLimitsInitParameters. +func (in *AutoscalingConfigAutoscalingLimitsInitParameters) DeepCopy() *AutoscalingConfigAutoscalingLimitsInitParameters { + if in == nil { + return nil + } + out := new(AutoscalingConfigAutoscalingLimitsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingConfigAutoscalingLimitsObservation) DeepCopyInto(out *AutoscalingConfigAutoscalingLimitsObservation) { + *out = *in + if in.MaxNodes != nil { + in, out := &in.MaxNodes, &out.MaxNodes + *out = new(float64) + **out = **in + } + if in.MaxProcessingUnits != nil { + in, out := &in.MaxProcessingUnits, &out.MaxProcessingUnits + *out = new(float64) + **out = **in + } + if in.MinNodes != nil { + in, out := &in.MinNodes, &out.MinNodes + *out = new(float64) + **out = **in + } + if in.MinProcessingUnits != nil { + in, out := &in.MinProcessingUnits, &out.MinProcessingUnits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingConfigAutoscalingLimitsObservation. +func (in *AutoscalingConfigAutoscalingLimitsObservation) DeepCopy() *AutoscalingConfigAutoscalingLimitsObservation { + if in == nil { + return nil + } + out := new(AutoscalingConfigAutoscalingLimitsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingConfigAutoscalingLimitsParameters) DeepCopyInto(out *AutoscalingConfigAutoscalingLimitsParameters) { + *out = *in + if in.MaxNodes != nil { + in, out := &in.MaxNodes, &out.MaxNodes + *out = new(float64) + **out = **in + } + if in.MaxProcessingUnits != nil { + in, out := &in.MaxProcessingUnits, &out.MaxProcessingUnits + *out = new(float64) + **out = **in + } + if in.MinNodes != nil { + in, out := &in.MinNodes, &out.MinNodes + *out = new(float64) + **out = **in + } + if in.MinProcessingUnits != nil { + in, out := &in.MinProcessingUnits, &out.MinProcessingUnits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingConfigAutoscalingLimitsParameters. +func (in *AutoscalingConfigAutoscalingLimitsParameters) DeepCopy() *AutoscalingConfigAutoscalingLimitsParameters { + if in == nil { + return nil + } + out := new(AutoscalingConfigAutoscalingLimitsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AutoscalingConfigInitParameters) DeepCopyInto(out *AutoscalingConfigInitParameters) { *out = *in + if in.AsymmetricAutoscalingOptions != nil { + in, out := &in.AsymmetricAutoscalingOptions, &out.AsymmetricAutoscalingOptions + *out = make([]AsymmetricAutoscalingOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.AutoscalingLimits != nil { in, out := &in.AutoscalingLimits, &out.AutoscalingLimits - *out = new(AutoscalingLimitsInitParameters) + *out = new(AutoscalingConfigAutoscalingLimitsInitParameters) (*in).DeepCopyInto(*out) } if in.AutoscalingTargets != nil { @@ -41,9 +228,16 @@ func (in *AutoscalingConfigInitParameters) DeepCopy() *AutoscalingConfigInitPara // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AutoscalingConfigObservation) DeepCopyInto(out *AutoscalingConfigObservation) { *out = *in + if in.AsymmetricAutoscalingOptions != nil { + in, out := &in.AsymmetricAutoscalingOptions, &out.AsymmetricAutoscalingOptions + *out = make([]AsymmetricAutoscalingOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.AutoscalingLimits != nil { in, out := &in.AutoscalingLimits, &out.AutoscalingLimits - *out = new(AutoscalingLimitsObservation) + *out = new(AutoscalingConfigAutoscalingLimitsObservation) (*in).DeepCopyInto(*out) } if in.AutoscalingTargets != nil { @@ -66,9 +260,16 @@ func (in *AutoscalingConfigObservation) DeepCopy() *AutoscalingConfigObservation // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AutoscalingConfigParameters) DeepCopyInto(out *AutoscalingConfigParameters) { *out = *in + if in.AsymmetricAutoscalingOptions != nil { + in, out := &in.AsymmetricAutoscalingOptions, &out.AsymmetricAutoscalingOptions + *out = make([]AsymmetricAutoscalingOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.AutoscalingLimits != nil { in, out := &in.AutoscalingLimits, &out.AutoscalingLimits - *out = new(AutoscalingLimitsParameters) + *out = new(AutoscalingConfigAutoscalingLimitsParameters) (*in).DeepCopyInto(*out) } if in.AutoscalingTargets != nil { @@ -96,21 +297,11 @@ func (in *AutoscalingLimitsInitParameters) DeepCopyInto(out *AutoscalingLimitsIn *out = new(float64) **out = **in } - if in.MaxProcessingUnits != nil { - in, out := &in.MaxProcessingUnits, &out.MaxProcessingUnits - *out = new(float64) - **out = **in - } if in.MinNodes != nil { in, out := &in.MinNodes, &out.MinNodes *out = new(float64) **out = **in } - if in.MinProcessingUnits != nil { - in, out := &in.MinProcessingUnits, &out.MinProcessingUnits - *out = new(float64) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingLimitsInitParameters. @@ -131,21 +322,11 @@ func (in *AutoscalingLimitsObservation) DeepCopyInto(out *AutoscalingLimitsObser *out = new(float64) **out = **in } - if in.MaxProcessingUnits != nil { - in, out := &in.MaxProcessingUnits, &out.MaxProcessingUnits - *out = new(float64) - **out = **in - } if in.MinNodes != nil { in, out := &in.MinNodes, &out.MinNodes *out = new(float64) **out = **in } - if in.MinProcessingUnits != nil { - in, out := &in.MinProcessingUnits, &out.MinProcessingUnits - *out = new(float64) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingLimitsObservation. @@ -166,21 +347,11 @@ func (in *AutoscalingLimitsParameters) DeepCopyInto(out *AutoscalingLimitsParame *out = new(float64) **out = **in } - if in.MaxProcessingUnits != nil { - in, out := &in.MaxProcessingUnits, &out.MaxProcessingUnits - *out = new(float64) - **out = **in - } if in.MinNodes != nil { in, out := &in.MinNodes, &out.MinNodes *out = new(float64) **out = **in } - if in.MinProcessingUnits != nil { - in, out := &in.MinProcessingUnits, &out.MinProcessingUnits - *out = new(float64) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingLimitsParameters. @@ -683,11 +854,6 @@ func (in *DatabaseInitParameters) DeepCopyInto(out *DatabaseInitParameters) { } } } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.EnableDropProtection != nil { in, out := &in.EnableDropProtection, &out.EnableDropProtection *out = new(bool) @@ -842,11 +1008,6 @@ func (in *DatabaseParameters) DeepCopyInto(out *DatabaseParameters) { } } } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.EnableDropProtection != nil { in, out := &in.EnableDropProtection, &out.EnableDropProtection *out = new(bool) @@ -937,6 +1098,17 @@ func (in *EncryptionConfigInitParameters) DeepCopyInto(out *EncryptionConfigInit *out = new(string) **out = **in } + if in.KMSKeyNames != nil { + in, out := &in.KMSKeyNames, &out.KMSKeyNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigInitParameters. @@ -957,6 +1129,17 @@ func (in *EncryptionConfigObservation) DeepCopyInto(out *EncryptionConfigObserva *out = new(string) **out = **in } + if in.KMSKeyNames != nil { + in, out := &in.KMSKeyNames, &out.KMSKeyNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigObservation. @@ -977,6 +1160,17 @@ func (in *EncryptionConfigParameters) DeepCopyInto(out *EncryptionConfigParamete *out = new(string) **out = **in } + if in.KMSKeyNames != nil { + in, out := &in.KMSKeyNames, &out.KMSKeyNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigParameters. @@ -1373,6 +1567,11 @@ func (in *InstanceInitParameters) DeepCopyInto(out *InstanceInitParameters) { *out = new(string) **out = **in } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } if in.ForceDestroy != nil { in, out := &in.ForceDestroy, &out.ForceDestroy *out = new(bool) @@ -1476,6 +1675,11 @@ func (in *InstanceObservation) DeepCopyInto(out *InstanceObservation) { *out = new(string) **out = **in } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } if in.EffectiveLabels != nil { in, out := &in.EffectiveLabels, &out.EffectiveLabels *out = make(map[string]*string, len(*in)) @@ -1589,6 +1793,11 @@ func (in *InstanceParameters) DeepCopyInto(out *InstanceParameters) { *out = new(string) **out = **in } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } if in.ForceDestroy != nil { in, out := &in.ForceDestroy, &out.ForceDestroy *out = new(bool) @@ -1671,3 +1880,123 @@ func (in *InstanceStatus) DeepCopy() *InstanceStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesInitParameters) DeepCopyInto(out *OverridesInitParameters) { + *out = *in + if in.AutoscalingLimits != nil { + in, out := &in.AutoscalingLimits, &out.AutoscalingLimits + *out = new(AutoscalingLimitsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesInitParameters. +func (in *OverridesInitParameters) DeepCopy() *OverridesInitParameters { + if in == nil { + return nil + } + out := new(OverridesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesObservation) DeepCopyInto(out *OverridesObservation) { + *out = *in + if in.AutoscalingLimits != nil { + in, out := &in.AutoscalingLimits, &out.AutoscalingLimits + *out = new(AutoscalingLimitsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesObservation. +func (in *OverridesObservation) DeepCopy() *OverridesObservation { + if in == nil { + return nil + } + out := new(OverridesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesParameters) DeepCopyInto(out *OverridesParameters) { + *out = *in + if in.AutoscalingLimits != nil { + in, out := &in.AutoscalingLimits, &out.AutoscalingLimits + *out = new(AutoscalingLimitsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesParameters. +func (in *OverridesParameters) DeepCopy() *OverridesParameters { + if in == nil { + return nil + } + out := new(OverridesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSelectionInitParameters) DeepCopyInto(out *ReplicaSelectionInitParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSelectionInitParameters. +func (in *ReplicaSelectionInitParameters) DeepCopy() *ReplicaSelectionInitParameters { + if in == nil { + return nil + } + out := new(ReplicaSelectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSelectionObservation) DeepCopyInto(out *ReplicaSelectionObservation) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSelectionObservation. +func (in *ReplicaSelectionObservation) DeepCopy() *ReplicaSelectionObservation { + if in == nil { + return nil + } + out := new(ReplicaSelectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSelectionParameters) DeepCopyInto(out *ReplicaSelectionParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSelectionParameters. +func (in *ReplicaSelectionParameters) DeepCopy() *ReplicaSelectionParameters { + if in == nil { + return nil + } + out := new(ReplicaSelectionParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/spanner/v1beta2/zz_instance_types.go b/apis/spanner/v1beta2/zz_instance_types.go index dda8d3fd3..f862239ec 100755 --- a/apis/spanner/v1beta2/zz_instance_types.go +++ b/apis/spanner/v1beta2/zz_instance_types.go @@ -13,8 +13,105 @@ import ( v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" ) +type AsymmetricAutoscalingOptionsInitParameters struct { + + // A nested object resource. + // Structure is documented below. + Overrides *OverridesInitParameters `json:"overrides,omitempty" tf:"overrides,omitempty"` + + // A nested object resource. + // Structure is documented below. + ReplicaSelection *ReplicaSelectionInitParameters `json:"replicaSelection,omitempty" tf:"replica_selection,omitempty"` +} + +type AsymmetricAutoscalingOptionsObservation struct { + + // A nested object resource. + // Structure is documented below. + Overrides *OverridesObservation `json:"overrides,omitempty" tf:"overrides,omitempty"` + + // A nested object resource. + // Structure is documented below. + ReplicaSelection *ReplicaSelectionObservation `json:"replicaSelection,omitempty" tf:"replica_selection,omitempty"` +} + +type AsymmetricAutoscalingOptionsParameters struct { + + // A nested object resource. + // Structure is documented below. + // +kubebuilder:validation:Optional + Overrides *OverridesParameters `json:"overrides" tf:"overrides,omitempty"` + + // A nested object resource. + // Structure is documented below. + // +kubebuilder:validation:Optional + ReplicaSelection *ReplicaSelectionParameters `json:"replicaSelection" tf:"replica_selection,omitempty"` +} + +type AutoscalingConfigAutoscalingLimitsInitParameters struct { + + // The maximum number of nodes for this specific replica. + MaxNodes *float64 `json:"maxNodes,omitempty" tf:"max_nodes,omitempty"` + + // Specifies maximum number of processing units allocated to the instance. + // If set, this number should be multiples of 1000 and be greater than or equal to + // min_processing_units. + MaxProcessingUnits *float64 `json:"maxProcessingUnits,omitempty" tf:"max_processing_units,omitempty"` + + // The minimum number of nodes for this specific replica. + MinNodes *float64 `json:"minNodes,omitempty" tf:"min_nodes,omitempty"` + + // Specifies minimum number of processing units allocated to the instance. + // If set, this number should be multiples of 1000. + MinProcessingUnits *float64 `json:"minProcessingUnits,omitempty" tf:"min_processing_units,omitempty"` +} + +type AutoscalingConfigAutoscalingLimitsObservation struct { + + // The maximum number of nodes for this specific replica. + MaxNodes *float64 `json:"maxNodes,omitempty" tf:"max_nodes,omitempty"` + + // Specifies maximum number of processing units allocated to the instance. + // If set, this number should be multiples of 1000 and be greater than or equal to + // min_processing_units. + MaxProcessingUnits *float64 `json:"maxProcessingUnits,omitempty" tf:"max_processing_units,omitempty"` + + // The minimum number of nodes for this specific replica. + MinNodes *float64 `json:"minNodes,omitempty" tf:"min_nodes,omitempty"` + + // Specifies minimum number of processing units allocated to the instance. + // If set, this number should be multiples of 1000. + MinProcessingUnits *float64 `json:"minProcessingUnits,omitempty" tf:"min_processing_units,omitempty"` +} + +type AutoscalingConfigAutoscalingLimitsParameters struct { + + // The maximum number of nodes for this specific replica. + // +kubebuilder:validation:Optional + MaxNodes *float64 `json:"maxNodes,omitempty" tf:"max_nodes,omitempty"` + + // Specifies maximum number of processing units allocated to the instance. + // If set, this number should be multiples of 1000 and be greater than or equal to + // min_processing_units. + // +kubebuilder:validation:Optional + MaxProcessingUnits *float64 `json:"maxProcessingUnits,omitempty" tf:"max_processing_units,omitempty"` + + // The minimum number of nodes for this specific replica. + // +kubebuilder:validation:Optional + MinNodes *float64 `json:"minNodes,omitempty" tf:"min_nodes,omitempty"` + + // Specifies minimum number of processing units allocated to the instance. + // If set, this number should be multiples of 1000. + // +kubebuilder:validation:Optional + MinProcessingUnits *float64 `json:"minProcessingUnits,omitempty" tf:"min_processing_units,omitempty"` +} + type AutoscalingConfigInitParameters struct { + // Asymmetric autoscaling options for specific replicas. + // Structure is documented below. + AsymmetricAutoscalingOptions []AsymmetricAutoscalingOptionsInitParameters `json:"asymmetricAutoscalingOptions,omitempty" tf:"asymmetric_autoscaling_options,omitempty"` + // Defines scale in controls to reduce the risk of response latency // and outages due to abrupt scale-in events. Users can define the minimum and // maximum compute capacity allocated to the instance, and the autoscaler will @@ -22,7 +119,7 @@ type AutoscalingConfigInitParameters struct { // units to specify the limits, but should use the same unit to set both the // min_limit and max_limit. // Structure is documented below. - AutoscalingLimits *AutoscalingLimitsInitParameters `json:"autoscalingLimits,omitempty" tf:"autoscaling_limits,omitempty"` + AutoscalingLimits *AutoscalingConfigAutoscalingLimitsInitParameters `json:"autoscalingLimits,omitempty" tf:"autoscaling_limits,omitempty"` // Defines scale in controls to reduce the risk of response latency // and outages due to abrupt scale-in events @@ -32,6 +129,10 @@ type AutoscalingConfigInitParameters struct { type AutoscalingConfigObservation struct { + // Asymmetric autoscaling options for specific replicas. + // Structure is documented below. + AsymmetricAutoscalingOptions []AsymmetricAutoscalingOptionsObservation `json:"asymmetricAutoscalingOptions,omitempty" tf:"asymmetric_autoscaling_options,omitempty"` + // Defines scale in controls to reduce the risk of response latency // and outages due to abrupt scale-in events. Users can define the minimum and // maximum compute capacity allocated to the instance, and the autoscaler will @@ -39,7 +140,7 @@ type AutoscalingConfigObservation struct { // units to specify the limits, but should use the same unit to set both the // min_limit and max_limit. // Structure is documented below. - AutoscalingLimits *AutoscalingLimitsObservation `json:"autoscalingLimits,omitempty" tf:"autoscaling_limits,omitempty"` + AutoscalingLimits *AutoscalingConfigAutoscalingLimitsObservation `json:"autoscalingLimits,omitempty" tf:"autoscaling_limits,omitempty"` // Defines scale in controls to reduce the risk of response latency // and outages due to abrupt scale-in events @@ -49,6 +150,11 @@ type AutoscalingConfigObservation struct { type AutoscalingConfigParameters struct { + // Asymmetric autoscaling options for specific replicas. + // Structure is documented below. + // +kubebuilder:validation:Optional + AsymmetricAutoscalingOptions []AsymmetricAutoscalingOptionsParameters `json:"asymmetricAutoscalingOptions,omitempty" tf:"asymmetric_autoscaling_options,omitempty"` + // Defines scale in controls to reduce the risk of response latency // and outages due to abrupt scale-in events. Users can define the minimum and // maximum compute capacity allocated to the instance, and the autoscaler will @@ -57,7 +163,7 @@ type AutoscalingConfigParameters struct { // min_limit and max_limit. // Structure is documented below. // +kubebuilder:validation:Optional - AutoscalingLimits *AutoscalingLimitsParameters `json:"autoscalingLimits,omitempty" tf:"autoscaling_limits,omitempty"` + AutoscalingLimits *AutoscalingConfigAutoscalingLimitsParameters `json:"autoscalingLimits,omitempty" tf:"autoscaling_limits,omitempty"` // Defines scale in controls to reduce the risk of response latency // and outages due to abrupt scale-in events @@ -68,66 +174,31 @@ type AutoscalingConfigParameters struct { type AutoscalingLimitsInitParameters struct { - // Specifies maximum number of nodes allocated to the instance. If set, this number - // should be greater than or equal to min_nodes. + // The maximum number of nodes for this specific replica. MaxNodes *float64 `json:"maxNodes,omitempty" tf:"max_nodes,omitempty"` - // Specifies maximum number of processing units allocated to the instance. - // If set, this number should be multiples of 1000 and be greater than or equal to - // min_processing_units. - MaxProcessingUnits *float64 `json:"maxProcessingUnits,omitempty" tf:"max_processing_units,omitempty"` - - // Specifies number of nodes allocated to the instance. If set, this number - // should be greater than or equal to 1. + // The minimum number of nodes for this specific replica. MinNodes *float64 `json:"minNodes,omitempty" tf:"min_nodes,omitempty"` - - // Specifies minimum number of processing units allocated to the instance. - // If set, this number should be multiples of 1000. - MinProcessingUnits *float64 `json:"minProcessingUnits,omitempty" tf:"min_processing_units,omitempty"` } type AutoscalingLimitsObservation struct { - // Specifies maximum number of nodes allocated to the instance. If set, this number - // should be greater than or equal to min_nodes. + // The maximum number of nodes for this specific replica. MaxNodes *float64 `json:"maxNodes,omitempty" tf:"max_nodes,omitempty"` - // Specifies maximum number of processing units allocated to the instance. - // If set, this number should be multiples of 1000 and be greater than or equal to - // min_processing_units. - MaxProcessingUnits *float64 `json:"maxProcessingUnits,omitempty" tf:"max_processing_units,omitempty"` - - // Specifies number of nodes allocated to the instance. If set, this number - // should be greater than or equal to 1. + // The minimum number of nodes for this specific replica. MinNodes *float64 `json:"minNodes,omitempty" tf:"min_nodes,omitempty"` - - // Specifies minimum number of processing units allocated to the instance. - // If set, this number should be multiples of 1000. - MinProcessingUnits *float64 `json:"minProcessingUnits,omitempty" tf:"min_processing_units,omitempty"` } type AutoscalingLimitsParameters struct { - // Specifies maximum number of nodes allocated to the instance. If set, this number - // should be greater than or equal to min_nodes. + // The maximum number of nodes for this specific replica. // +kubebuilder:validation:Optional - MaxNodes *float64 `json:"maxNodes,omitempty" tf:"max_nodes,omitempty"` - - // Specifies maximum number of processing units allocated to the instance. - // If set, this number should be multiples of 1000 and be greater than or equal to - // min_processing_units. - // +kubebuilder:validation:Optional - MaxProcessingUnits *float64 `json:"maxProcessingUnits,omitempty" tf:"max_processing_units,omitempty"` + MaxNodes *float64 `json:"maxNodes" tf:"max_nodes,omitempty"` - // Specifies number of nodes allocated to the instance. If set, this number - // should be greater than or equal to 1. + // The minimum number of nodes for this specific replica. // +kubebuilder:validation:Optional - MinNodes *float64 `json:"minNodes,omitempty" tf:"min_nodes,omitempty"` - - // Specifies minimum number of processing units allocated to the instance. - // If set, this number should be multiples of 1000. - // +kubebuilder:validation:Optional - MinProcessingUnits *float64 `json:"minProcessingUnits,omitempty" tf:"min_processing_units,omitempty"` + MinNodes *float64 `json:"minNodes" tf:"min_nodes,omitempty"` } type AutoscalingTargetsInitParameters struct { @@ -198,6 +269,10 @@ type InstanceInitParameters struct { // unique per project and between 4 and 30 characters in length. DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + // The edition selected for this instance. Different editions provide different capabilities at different price points. + // Possible values are: EDITION_UNSPECIFIED, STANDARD, ENTERPRISE, ENTERPRISE_PLUS. + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + // When deleting a spanner instance, this boolean option will delete all backups of this instance. // This must be set to true if you created a backup manually in the console. ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` @@ -245,6 +320,10 @@ type InstanceObservation struct { // unique per project and between 4 and 30 characters in length. DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + // The edition selected for this instance. Different editions provide different capabilities at different price points. + // Possible values are: EDITION_UNSPECIFIED, STANDARD, ENTERPRISE, ENTERPRISE_PLUS. + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + // for all of the labels present on the resource. // +mapType=granular EffectiveLabels map[string]*string `json:"effectiveLabels,omitempty" tf:"effective_labels,omitempty"` @@ -311,6 +390,11 @@ type InstanceParameters struct { // +kubebuilder:validation:Optional DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + // The edition selected for this instance. Different editions provide different capabilities at different price points. + // Possible values are: EDITION_UNSPECIFIED, STANDARD, ENTERPRISE, ENTERPRISE_PLUS. + // +kubebuilder:validation:Optional + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + // When deleting a spanner instance, this boolean option will delete all backups of this instance. // This must be set to true if you created a backup manually in the console. // +kubebuilder:validation:Optional @@ -336,6 +420,47 @@ type InstanceParameters struct { Project *string `json:"project,omitempty" tf:"project,omitempty"` } +type OverridesInitParameters struct { + + // A nested object resource. + // Structure is documented below. + AutoscalingLimits *AutoscalingLimitsInitParameters `json:"autoscalingLimits,omitempty" tf:"autoscaling_limits,omitempty"` +} + +type OverridesObservation struct { + + // A nested object resource. + // Structure is documented below. + AutoscalingLimits *AutoscalingLimitsObservation `json:"autoscalingLimits,omitempty" tf:"autoscaling_limits,omitempty"` +} + +type OverridesParameters struct { + + // A nested object resource. + // Structure is documented below. + // +kubebuilder:validation:Optional + AutoscalingLimits *AutoscalingLimitsParameters `json:"autoscalingLimits" tf:"autoscaling_limits,omitempty"` +} + +type ReplicaSelectionInitParameters struct { + + // The location of the replica to apply asymmetric autoscaling options. + Location *string `json:"location,omitempty" tf:"location,omitempty"` +} + +type ReplicaSelectionObservation struct { + + // The location of the replica to apply asymmetric autoscaling options. + Location *string `json:"location,omitempty" tf:"location,omitempty"` +} + +type ReplicaSelectionParameters struct { + + // The location of the replica to apply asymmetric autoscaling options. + // +kubebuilder:validation:Optional + Location *string `json:"location" tf:"location,omitempty"` +} + // InstanceSpec defines the desired state of Instance type InstanceSpec struct { v1.ResourceSpec `json:",inline"` diff --git a/apis/sql/v1beta2/zz_databaseinstance_types.go b/apis/sql/v1beta2/zz_databaseinstance_types.go index 342aa7916..332ed252a 100755 --- a/apis/sql/v1beta2/zz_databaseinstance_types.go +++ b/apis/sql/v1beta2/zz_databaseinstance_types.go @@ -340,18 +340,15 @@ type DatabaseInstanceInitParameters struct { // The MySQL, PostgreSQL or // SQL Server version to use. Supported values include MYSQL_5_6, - // MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, - // POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, - // SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. + // MYSQL_5_7, MYSQL_8_0, MYSQL_8_4, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, + // POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, POSTGRES_16, POSTGRES_17, + // SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. // SQLSERVER_2019_STANDARD, SQLSERVER_2019_ENTERPRISE, SQLSERVER_2019_EXPRESS, // SQLSERVER_2019_WEB. // Database Version Policies // includes an up-to-date reference of supported versions. DatabaseVersion *string `json:"databaseVersion,omitempty" tf:"database_version,omitempty"` - // When the field is set to false, deleting the instance is allowed. - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // The full path to the encryption key used for the CMEK disk encryption. // The provided key must be in the same region as the SQL instance. In order // to use this feature, a special kind of service account must be created and @@ -378,9 +375,12 @@ type DatabaseInstanceInitParameters struct { Region *string `json:"region,omitempty" tf:"region,omitempty"` // The configuration for replication. The - // configuration is detailed below. Valid only for MySQL instances. + // configuration is detailed below. ReplicaConfiguration *ReplicaConfigurationInitParameters `json:"replicaConfiguration,omitempty" tf:"replica_configuration,omitempty"` + // List of replica names. Can be updated. + ReplicaNames []*string `json:"replicaNames,omitempty" tf:"replica_names,omitempty"` + // The context needed to restore the database to a backup run. The configuration is detailed below. Adding or modifying this // block during resource creation/update will trigger the restore action after the resource is created/updated. RestoreBackupContext *RestoreBackupContextInitParameters `json:"restoreBackupContext,omitempty" tf:"restore_backup_context,omitempty"` @@ -406,15 +406,14 @@ type DatabaseInstanceObservation struct { // connection strings. For example, when connecting with Cloud SQL Proxy. ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` - // The name of the instance. This is done because after a name is used, it cannot be reused for - // up to one week. + // The DNS name of the instance. See Connect to an instance using Private Service Connect for more details. DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` // The MySQL, PostgreSQL or // SQL Server version to use. Supported values include MYSQL_5_6, - // MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, - // POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, - // SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. + // MYSQL_5_7, MYSQL_8_0, MYSQL_8_4, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, + // POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, POSTGRES_16, POSTGRES_17, + // SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. // SQLSERVER_2019_STANDARD, SQLSERVER_2019_ENTERPRISE, SQLSERVER_2019_EXPRESS, // SQLSERVER_2019_WEB. // Database Version Policies @@ -470,9 +469,12 @@ type DatabaseInstanceObservation struct { Region *string `json:"region,omitempty" tf:"region,omitempty"` // The configuration for replication. The - // configuration is detailed below. Valid only for MySQL instances. + // configuration is detailed below. ReplicaConfiguration *ReplicaConfigurationObservation `json:"replicaConfiguration,omitempty" tf:"replica_configuration,omitempty"` + // List of replica names. Can be updated. + ReplicaNames []*string `json:"replicaNames,omitempty" tf:"replica_names,omitempty"` + // The context needed to restore the database to a backup run. The configuration is detailed below. Adding or modifying this // block during resource creation/update will trigger the restore action after the resource is created/updated. RestoreBackupContext *RestoreBackupContextObservation `json:"restoreBackupContext,omitempty" tf:"restore_backup_context,omitempty"` @@ -498,9 +500,9 @@ type DatabaseInstanceParameters struct { // The MySQL, PostgreSQL or // SQL Server version to use. Supported values include MYSQL_5_6, - // MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, - // POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, - // SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. + // MYSQL_5_7, MYSQL_8_0, MYSQL_8_4, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, + // POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, POSTGRES_16, POSTGRES_17, + // SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. // SQLSERVER_2019_STANDARD, SQLSERVER_2019_ENTERPRISE, SQLSERVER_2019_EXPRESS, // SQLSERVER_2019_WEB. // Database Version Policies @@ -508,10 +510,6 @@ type DatabaseInstanceParameters struct { // +kubebuilder:validation:Optional DatabaseVersion *string `json:"databaseVersion,omitempty" tf:"database_version,omitempty"` - // When the field is set to false, deleting the instance is allowed. - // +kubebuilder:validation:Optional - DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - // The full path to the encryption key used for the CMEK disk encryption. // The provided key must be in the same region as the SQL instance. In order // to use this feature, a special kind of service account must be created and @@ -543,10 +541,14 @@ type DatabaseInstanceParameters struct { Region *string `json:"region,omitempty" tf:"region,omitempty"` // The configuration for replication. The - // configuration is detailed below. Valid only for MySQL instances. + // configuration is detailed below. // +kubebuilder:validation:Optional ReplicaConfiguration *ReplicaConfigurationParameters `json:"replicaConfiguration,omitempty" tf:"replica_configuration,omitempty"` + // List of replica names. Can be updated. + // +kubebuilder:validation:Optional + ReplicaNames []*string `json:"replicaNames,omitempty" tf:"replica_names,omitempty"` + // The context needed to restore the database to a backup run. The configuration is detailed below. Adding or modifying this // block during resource creation/update will trigger the restore action after the resource is created/updated. // +kubebuilder:validation:Optional @@ -654,14 +656,14 @@ type IPConfigurationInitParameters struct { PscConfig []PscConfigInitParameters `json:"pscConfig,omitempty" tf:"psc_config,omitempty"` - // Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in ssl_mode. It will be fully deprecated in a future major release. For now, please use ssl_mode with a compatible require_ssl value instead. - RequireSSL *bool `json:"requireSsl,omitempty" tf:"require_ssl,omitempty"` - - // Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcement options compared to require_ssl. To change this field, also set the correspoding value in require_ssl. + // Specify how SSL connection should be enforced in DB connections. Supported values are ALLOW_UNENCRYPTED_AND_ENCRYPTED, ENCRYPTED_ONLY, and TRUSTED_CLIENT_CERTIFICATE_REQUIRED (not supported for SQL Server). See API reference doc for details. SSLMode *string `json:"sslMode,omitempty" tf:"ssl_mode,omitempty"` - // Specify how the server certificate's Certificate Authority is hosted. Supported value is GOOGLE_MANAGED_INTERNAL_CA. + // Specify how the server certificate's Certificate Authority is hosted. Supported values are GOOGLE_MANAGED_INTERNAL_CA and GOOGLE_MANAGED_CAS_CA. ServerCAMode *string `json:"serverCaMode,omitempty" tf:"server_ca_mode,omitempty"` + + // The resource name of the server CA pool for an instance with CUSTOMER_MANAGED_CAS_CA as the server_ca_mode. + ServerCAPool *string `json:"serverCaPool,omitempty" tf:"server_ca_pool,omitempty"` } type IPConfigurationObservation struct { @@ -688,14 +690,14 @@ type IPConfigurationObservation struct { PscConfig []PscConfigObservation `json:"pscConfig,omitempty" tf:"psc_config,omitempty"` - // Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in ssl_mode. It will be fully deprecated in a future major release. For now, please use ssl_mode with a compatible require_ssl value instead. - RequireSSL *bool `json:"requireSsl,omitempty" tf:"require_ssl,omitempty"` - - // Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcement options compared to require_ssl. To change this field, also set the correspoding value in require_ssl. + // Specify how SSL connection should be enforced in DB connections. Supported values are ALLOW_UNENCRYPTED_AND_ENCRYPTED, ENCRYPTED_ONLY, and TRUSTED_CLIENT_CERTIFICATE_REQUIRED (not supported for SQL Server). See API reference doc for details. SSLMode *string `json:"sslMode,omitempty" tf:"ssl_mode,omitempty"` - // Specify how the server certificate's Certificate Authority is hosted. Supported value is GOOGLE_MANAGED_INTERNAL_CA. + // Specify how the server certificate's Certificate Authority is hosted. Supported values are GOOGLE_MANAGED_INTERNAL_CA and GOOGLE_MANAGED_CAS_CA. ServerCAMode *string `json:"serverCaMode,omitempty" tf:"server_ca_mode,omitempty"` + + // The resource name of the server CA pool for an instance with CUSTOMER_MANAGED_CAS_CA as the server_ca_mode. + ServerCAPool *string `json:"serverCaPool,omitempty" tf:"server_ca_pool,omitempty"` } type IPConfigurationParameters struct { @@ -738,17 +740,17 @@ type IPConfigurationParameters struct { // +kubebuilder:validation:Optional PscConfig []PscConfigParameters `json:"pscConfig,omitempty" tf:"psc_config,omitempty"` - // Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in ssl_mode. It will be fully deprecated in a future major release. For now, please use ssl_mode with a compatible require_ssl value instead. - // +kubebuilder:validation:Optional - RequireSSL *bool `json:"requireSsl,omitempty" tf:"require_ssl,omitempty"` - - // Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcement options compared to require_ssl. To change this field, also set the correspoding value in require_ssl. + // Specify how SSL connection should be enforced in DB connections. Supported values are ALLOW_UNENCRYPTED_AND_ENCRYPTED, ENCRYPTED_ONLY, and TRUSTED_CLIENT_CERTIFICATE_REQUIRED (not supported for SQL Server). See API reference doc for details. // +kubebuilder:validation:Optional SSLMode *string `json:"sslMode,omitempty" tf:"ssl_mode,omitempty"` - // Specify how the server certificate's Certificate Authority is hosted. Supported value is GOOGLE_MANAGED_INTERNAL_CA. + // Specify how the server certificate's Certificate Authority is hosted. Supported values are GOOGLE_MANAGED_INTERNAL_CA and GOOGLE_MANAGED_CAS_CA. // +kubebuilder:validation:Optional ServerCAMode *string `json:"serverCaMode,omitempty" tf:"server_ca_mode,omitempty"` + + // The resource name of the server CA pool for an instance with CUSTOMER_MANAGED_CAS_CA as the server_ca_mode. + // +kubebuilder:validation:Optional + ServerCAPool *string `json:"serverCaPool,omitempty" tf:"server_ca_pool,omitempty"` } type InsightsConfigInitParameters struct { @@ -963,12 +965,44 @@ type PasswordValidationPolicyParameters struct { ReuseInterval *float64 `json:"reuseInterval,omitempty" tf:"reuse_interval,omitempty"` } +type PscAutoConnectionsInitParameters struct { + + // "The consumer network of this consumer endpoint. This must be a resource path that includes both the host project and the network name. For example, projects/project1/global/networks/network1. The consumer host project of this network might be different from the consumer service project." + ConsumerNetwork *string `json:"consumerNetwork,omitempty" tf:"consumer_network,omitempty"` + + // The project ID of consumer service project of this consumer endpoint. + ConsumerServiceProjectID *string `json:"consumerServiceProjectId,omitempty" tf:"consumer_service_project_id,omitempty"` +} + +type PscAutoConnectionsObservation struct { + + // "The consumer network of this consumer endpoint. This must be a resource path that includes both the host project and the network name. For example, projects/project1/global/networks/network1. The consumer host project of this network might be different from the consumer service project." + ConsumerNetwork *string `json:"consumerNetwork,omitempty" tf:"consumer_network,omitempty"` + + // The project ID of consumer service project of this consumer endpoint. + ConsumerServiceProjectID *string `json:"consumerServiceProjectId,omitempty" tf:"consumer_service_project_id,omitempty"` +} + +type PscAutoConnectionsParameters struct { + + // "The consumer network of this consumer endpoint. This must be a resource path that includes both the host project and the network name. For example, projects/project1/global/networks/network1. The consumer host project of this network might be different from the consumer service project." + // +kubebuilder:validation:Optional + ConsumerNetwork *string `json:"consumerNetwork" tf:"consumer_network,omitempty"` + + // The project ID of consumer service project of this consumer endpoint. + // +kubebuilder:validation:Optional + ConsumerServiceProjectID *string `json:"consumerServiceProjectId,omitempty" tf:"consumer_service_project_id,omitempty"` +} + type PscConfigInitParameters struct { // List of consumer projects that are allow-listed for PSC connections to this instance. This instance can be connected to with PSC from any network in these projects. Each consumer project in this list may be represented by a project number (numeric) or by a project id (alphanumeric). // +listType=set AllowedConsumerProjects []*string `json:"allowedConsumerProjects,omitempty" tf:"allowed_consumer_projects,omitempty"` + // A comma-separated list of networks or a comma-separated list of network-project pairs. Each project in this list is represented by a project number (numeric) or by a project ID (alphanumeric). This allows Private Service Connect connections to be created automatically for the specified networks. + PscAutoConnections []PscAutoConnectionsInitParameters `json:"pscAutoConnections,omitempty" tf:"psc_auto_connections,omitempty"` + // Whether PSC connectivity is enabled for this instance. PscEnabled *bool `json:"pscEnabled,omitempty" tf:"psc_enabled,omitempty"` } @@ -979,6 +1013,9 @@ type PscConfigObservation struct { // +listType=set AllowedConsumerProjects []*string `json:"allowedConsumerProjects,omitempty" tf:"allowed_consumer_projects,omitempty"` + // A comma-separated list of networks or a comma-separated list of network-project pairs. Each project in this list is represented by a project number (numeric) or by a project ID (alphanumeric). This allows Private Service Connect connections to be created automatically for the specified networks. + PscAutoConnections []PscAutoConnectionsObservation `json:"pscAutoConnections,omitempty" tf:"psc_auto_connections,omitempty"` + // Whether PSC connectivity is enabled for this instance. PscEnabled *bool `json:"pscEnabled,omitempty" tf:"psc_enabled,omitempty"` } @@ -990,6 +1027,10 @@ type PscConfigParameters struct { // +listType=set AllowedConsumerProjects []*string `json:"allowedConsumerProjects,omitempty" tf:"allowed_consumer_projects,omitempty"` + // A comma-separated list of networks or a comma-separated list of network-project pairs. Each project in this list is represented by a project number (numeric) or by a project ID (alphanumeric). This allows Private Service Connect connections to be created automatically for the specified networks. + // +kubebuilder:validation:Optional + PscAutoConnections []PscAutoConnectionsParameters `json:"pscAutoConnections,omitempty" tf:"psc_auto_connections,omitempty"` + // Whether PSC connectivity is enabled for this instance. // +kubebuilder:validation:Optional PscEnabled *bool `json:"pscEnabled,omitempty" tf:"psc_enabled,omitempty"` @@ -1001,6 +1042,9 @@ type ReplicaConfigurationInitParameters struct { // certificate. CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // Specifies if the replica is a cascadable replica. If true, instance must be in different region from primary. + CascadableReplica *bool `json:"cascadableReplica,omitempty" tf:"cascadable_replica,omitempty"` + // PEM representation of the replica's x509 // certificate. ClientCertificate *string `json:"clientCertificate,omitempty" tf:"client_certificate,omitempty"` @@ -1014,7 +1058,8 @@ type ReplicaConfigurationInitParameters struct { ConnectRetryInterval *float64 `json:"connectRetryInterval,omitempty" tf:"connect_retry_interval,omitempty"` // Path to a SQL file in GCS from which replica - // instances are created. Format is gs://bucket/filename. + // instances are created. Format is gs://bucket/filename. Note, if the master + // instance is a source representation instance this field must be present. DumpFilePath *string `json:"dumpFilePath,omitempty" tf:"dump_file_path,omitempty"` // Specifies if the replica is the failover target. @@ -1048,6 +1093,9 @@ type ReplicaConfigurationObservation struct { // certificate. CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // Specifies if the replica is a cascadable replica. If true, instance must be in different region from primary. + CascadableReplica *bool `json:"cascadableReplica,omitempty" tf:"cascadable_replica,omitempty"` + // PEM representation of the replica's x509 // certificate. ClientCertificate *string `json:"clientCertificate,omitempty" tf:"client_certificate,omitempty"` @@ -1061,7 +1109,8 @@ type ReplicaConfigurationObservation struct { ConnectRetryInterval *float64 `json:"connectRetryInterval,omitempty" tf:"connect_retry_interval,omitempty"` // Path to a SQL file in GCS from which replica - // instances are created. Format is gs://bucket/filename. + // instances are created. Format is gs://bucket/filename. Note, if the master + // instance is a source representation instance this field must be present. DumpFilePath *string `json:"dumpFilePath,omitempty" tf:"dump_file_path,omitempty"` // Specifies if the replica is the failover target. @@ -1093,6 +1142,10 @@ type ReplicaConfigurationParameters struct { // +kubebuilder:validation:Optional CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // Specifies if the replica is a cascadable replica. If true, instance must be in different region from primary. + // +kubebuilder:validation:Optional + CascadableReplica *bool `json:"cascadableReplica,omitempty" tf:"cascadable_replica,omitempty"` + // PEM representation of the replica's x509 // certificate. // +kubebuilder:validation:Optional @@ -1109,7 +1162,8 @@ type ReplicaConfigurationParameters struct { ConnectRetryInterval *float64 `json:"connectRetryInterval,omitempty" tf:"connect_retry_interval,omitempty"` // Path to a SQL file in GCS from which replica - // instances are created. Format is gs://bucket/filename. + // instances are created. Format is gs://bucket/filename. Note, if the master + // instance is a source representation instance this field must be present. // +kubebuilder:validation:Optional DumpFilePath *string `json:"dumpFilePath,omitempty" tf:"dump_file_path,omitempty"` @@ -1273,7 +1327,7 @@ type SettingsInitParameters struct { // The name of server instance collation. Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` - // Enables the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections. If enabled, all the direct connections are rejected. + // Control the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections, can be REQUIRED or NOT_REQUIRED. If enabled, all the direct connections are rejected. ConnectorEnforcement *string `json:"connectorEnforcement,omitempty" tf:"connector_enforcement,omitempty"` DataCacheConfig *DataCacheConfigInitParameters `json:"dataCacheConfig,omitempty" tf:"data_cache_config,omitempty"` @@ -1357,7 +1411,7 @@ type SettingsObservation struct { // The name of server instance collation. Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` - // Enables the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections. If enabled, all the direct connections are rejected. + // Control the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections, can be REQUIRED or NOT_REQUIRED. If enabled, all the direct connections are rejected. ConnectorEnforcement *string `json:"connectorEnforcement,omitempty" tf:"connector_enforcement,omitempty"` DataCacheConfig *DataCacheConfigObservation `json:"dataCacheConfig,omitempty" tf:"data_cache_config,omitempty"` @@ -1451,7 +1505,7 @@ type SettingsParameters struct { // +kubebuilder:validation:Optional Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` - // Enables the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections. If enabled, all the direct connections are rejected. + // Control the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections, can be REQUIRED or NOT_REQUIRED. If enabled, all the direct connections are rejected. // +kubebuilder:validation:Optional ConnectorEnforcement *string `json:"connectorEnforcement,omitempty" tf:"connector_enforcement,omitempty"` diff --git a/apis/sql/v1beta2/zz_generated.deepcopy.go b/apis/sql/v1beta2/zz_generated.deepcopy.go index 98ef24a13..5e4ff77e3 100644 --- a/apis/sql/v1beta2/zz_generated.deepcopy.go +++ b/apis/sql/v1beta2/zz_generated.deepcopy.go @@ -761,11 +761,6 @@ func (in *DatabaseInstanceInitParameters) DeepCopyInto(out *DatabaseInstanceInit *out = new(string) **out = **in } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.EncryptionKeyName != nil { in, out := &in.EncryptionKeyName, &out.EncryptionKeyName *out = new(string) @@ -796,6 +791,17 @@ func (in *DatabaseInstanceInitParameters) DeepCopyInto(out *DatabaseInstanceInit *out = new(ReplicaConfigurationInitParameters) (*in).DeepCopyInto(*out) } + if in.ReplicaNames != nil { + in, out := &in.ReplicaNames, &out.ReplicaNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.RestoreBackupContext != nil { in, out := &in.RestoreBackupContext, &out.RestoreBackupContext *out = new(RestoreBackupContextInitParameters) @@ -961,6 +967,17 @@ func (in *DatabaseInstanceObservation) DeepCopyInto(out *DatabaseInstanceObserva *out = new(ReplicaConfigurationObservation) (*in).DeepCopyInto(*out) } + if in.ReplicaNames != nil { + in, out := &in.ReplicaNames, &out.ReplicaNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.RestoreBackupContext != nil { in, out := &in.RestoreBackupContext, &out.RestoreBackupContext *out = new(RestoreBackupContextObservation) @@ -1006,11 +1023,6 @@ func (in *DatabaseInstanceParameters) DeepCopyInto(out *DatabaseInstanceParamete *out = new(string) **out = **in } - if in.DeletionProtection != nil { - in, out := &in.DeletionProtection, &out.DeletionProtection - *out = new(bool) - **out = **in - } if in.EncryptionKeyName != nil { in, out := &in.EncryptionKeyName, &out.EncryptionKeyName *out = new(string) @@ -1041,6 +1053,17 @@ func (in *DatabaseInstanceParameters) DeepCopyInto(out *DatabaseInstanceParamete *out = new(ReplicaConfigurationParameters) (*in).DeepCopyInto(*out) } + if in.ReplicaNames != nil { + in, out := &in.ReplicaNames, &out.ReplicaNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.RestoreBackupContext != nil { in, out := &in.RestoreBackupContext, &out.RestoreBackupContext *out = new(RestoreBackupContextParameters) @@ -1300,11 +1323,6 @@ func (in *IPConfigurationInitParameters) DeepCopyInto(out *IPConfigurationInitPa (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.RequireSSL != nil { - in, out := &in.RequireSSL, &out.RequireSSL - *out = new(bool) - **out = **in - } if in.SSLMode != nil { in, out := &in.SSLMode, &out.SSLMode *out = new(string) @@ -1315,6 +1333,11 @@ func (in *IPConfigurationInitParameters) DeepCopyInto(out *IPConfigurationInitPa *out = new(string) **out = **in } + if in.ServerCAPool != nil { + in, out := &in.ServerCAPool, &out.ServerCAPool + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationInitParameters. @@ -1364,11 +1387,6 @@ func (in *IPConfigurationObservation) DeepCopyInto(out *IPConfigurationObservati (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.RequireSSL != nil { - in, out := &in.RequireSSL, &out.RequireSSL - *out = new(bool) - **out = **in - } if in.SSLMode != nil { in, out := &in.SSLMode, &out.SSLMode *out = new(string) @@ -1379,6 +1397,11 @@ func (in *IPConfigurationObservation) DeepCopyInto(out *IPConfigurationObservati *out = new(string) **out = **in } + if in.ServerCAPool != nil { + in, out := &in.ServerCAPool, &out.ServerCAPool + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationObservation. @@ -1438,11 +1461,6 @@ func (in *IPConfigurationParameters) DeepCopyInto(out *IPConfigurationParameters (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.RequireSSL != nil { - in, out := &in.RequireSSL, &out.RequireSSL - *out = new(bool) - **out = **in - } if in.SSLMode != nil { in, out := &in.SSLMode, &out.SSLMode *out = new(string) @@ -1453,6 +1471,11 @@ func (in *IPConfigurationParameters) DeepCopyInto(out *IPConfigurationParameters *out = new(string) **out = **in } + if in.ServerCAPool != nil { + in, out := &in.ServerCAPool, &out.ServerCAPool + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationParameters. @@ -2012,6 +2035,81 @@ func (in *PasswordValidationPolicyParameters) DeepCopy() *PasswordValidationPoli return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PscAutoConnectionsInitParameters) DeepCopyInto(out *PscAutoConnectionsInitParameters) { + *out = *in + if in.ConsumerNetwork != nil { + in, out := &in.ConsumerNetwork, &out.ConsumerNetwork + *out = new(string) + **out = **in + } + if in.ConsumerServiceProjectID != nil { + in, out := &in.ConsumerServiceProjectID, &out.ConsumerServiceProjectID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PscAutoConnectionsInitParameters. +func (in *PscAutoConnectionsInitParameters) DeepCopy() *PscAutoConnectionsInitParameters { + if in == nil { + return nil + } + out := new(PscAutoConnectionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PscAutoConnectionsObservation) DeepCopyInto(out *PscAutoConnectionsObservation) { + *out = *in + if in.ConsumerNetwork != nil { + in, out := &in.ConsumerNetwork, &out.ConsumerNetwork + *out = new(string) + **out = **in + } + if in.ConsumerServiceProjectID != nil { + in, out := &in.ConsumerServiceProjectID, &out.ConsumerServiceProjectID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PscAutoConnectionsObservation. +func (in *PscAutoConnectionsObservation) DeepCopy() *PscAutoConnectionsObservation { + if in == nil { + return nil + } + out := new(PscAutoConnectionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PscAutoConnectionsParameters) DeepCopyInto(out *PscAutoConnectionsParameters) { + *out = *in + if in.ConsumerNetwork != nil { + in, out := &in.ConsumerNetwork, &out.ConsumerNetwork + *out = new(string) + **out = **in + } + if in.ConsumerServiceProjectID != nil { + in, out := &in.ConsumerServiceProjectID, &out.ConsumerServiceProjectID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PscAutoConnectionsParameters. +func (in *PscAutoConnectionsParameters) DeepCopy() *PscAutoConnectionsParameters { + if in == nil { + return nil + } + out := new(PscAutoConnectionsParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PscConfigInitParameters) DeepCopyInto(out *PscConfigInitParameters) { *out = *in @@ -2026,6 +2124,13 @@ func (in *PscConfigInitParameters) DeepCopyInto(out *PscConfigInitParameters) { } } } + if in.PscAutoConnections != nil { + in, out := &in.PscAutoConnections, &out.PscAutoConnections + *out = make([]PscAutoConnectionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.PscEnabled != nil { in, out := &in.PscEnabled, &out.PscEnabled *out = new(bool) @@ -2057,6 +2162,13 @@ func (in *PscConfigObservation) DeepCopyInto(out *PscConfigObservation) { } } } + if in.PscAutoConnections != nil { + in, out := &in.PscAutoConnections, &out.PscAutoConnections + *out = make([]PscAutoConnectionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.PscEnabled != nil { in, out := &in.PscEnabled, &out.PscEnabled *out = new(bool) @@ -2088,6 +2200,13 @@ func (in *PscConfigParameters) DeepCopyInto(out *PscConfigParameters) { } } } + if in.PscAutoConnections != nil { + in, out := &in.PscAutoConnections, &out.PscAutoConnections + *out = make([]PscAutoConnectionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.PscEnabled != nil { in, out := &in.PscEnabled, &out.PscEnabled *out = new(bool) @@ -2113,6 +2232,11 @@ func (in *ReplicaConfigurationInitParameters) DeepCopyInto(out *ReplicaConfigura *out = new(string) **out = **in } + if in.CascadableReplica != nil { + in, out := &in.CascadableReplica, &out.CascadableReplica + *out = new(bool) + **out = **in + } if in.ClientCertificate != nil { in, out := &in.ClientCertificate, &out.ClientCertificate *out = new(string) @@ -2183,6 +2307,11 @@ func (in *ReplicaConfigurationObservation) DeepCopyInto(out *ReplicaConfiguratio *out = new(string) **out = **in } + if in.CascadableReplica != nil { + in, out := &in.CascadableReplica, &out.CascadableReplica + *out = new(bool) + **out = **in + } if in.ClientCertificate != nil { in, out := &in.ClientCertificate, &out.ClientCertificate *out = new(string) @@ -2248,6 +2377,11 @@ func (in *ReplicaConfigurationParameters) DeepCopyInto(out *ReplicaConfiguration *out = new(string) **out = **in } + if in.CascadableReplica != nil { + in, out := &in.CascadableReplica, &out.CascadableReplica + *out = new(bool) + **out = **in + } if in.ClientCertificate != nil { in, out := &in.ClientCertificate, &out.ClientCertificate *out = new(string) diff --git a/apis/sql/v1beta2/zz_user_types.go b/apis/sql/v1beta2/zz_user_types.go index 8839d8db2..62f0c4cab 100755 --- a/apis/sql/v1beta2/zz_user_types.go +++ b/apis/sql/v1beta2/zz_user_types.go @@ -130,9 +130,10 @@ type UserInitParameters struct { // The user type. It determines the method to authenticate the // user during login. The default is the database's built-in user type. Flags - // include "BUILT_IN", "CLOUD_IAM_USER", and "CLOUD_IAM_SERVICE_ACCOUNT" for both - // Postgres and MySQL. - // MySQL also includes "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT". + // include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", "CLOUD_IAM_GROUP", + // "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" for + // Postgres + // and MySQL. Type *string `json:"type,omitempty" tf:"type,omitempty"` } @@ -164,9 +165,10 @@ type UserObservation struct { // The user type. It determines the method to authenticate the // user during login. The default is the database's built-in user type. Flags - // include "BUILT_IN", "CLOUD_IAM_USER", and "CLOUD_IAM_SERVICE_ACCOUNT" for both - // Postgres and MySQL. - // MySQL also includes "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT". + // include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", "CLOUD_IAM_GROUP", + // "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" for + // Postgres + // and MySQL. Type *string `json:"type,omitempty" tf:"type,omitempty"` } @@ -215,9 +217,10 @@ type UserParameters struct { // The user type. It determines the method to authenticate the // user during login. The default is the database's built-in user type. Flags - // include "BUILT_IN", "CLOUD_IAM_USER", and "CLOUD_IAM_SERVICE_ACCOUNT" for both - // Postgres and MySQL. - // MySQL also includes "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT". + // include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", "CLOUD_IAM_GROUP", + // "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" for + // Postgres + // and MySQL. // +kubebuilder:validation:Optional Type *string `json:"type,omitempty" tf:"type,omitempty"` } diff --git a/apis/storage/v1beta2/zz_bucket_terraformed.go b/apis/storage/v1beta2/zz_bucket_terraformed.go index 9e8b5d9ae..4c7cdd648 100755 --- a/apis/storage/v1beta2/zz_bucket_terraformed.go +++ b/apis/storage/v1beta2/zz_bucket_terraformed.go @@ -125,5 +125,5 @@ func (tr *Bucket) LateInitialize(attrs []byte) (bool, error) { // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Bucket) GetTerraformSchemaVersion() int { - return 2 + return 3 } diff --git a/apis/storage/v1beta2/zz_bucket_types.go b/apis/storage/v1beta2/zz_bucket_types.go index 42d6c009a..b906deeeb 100755 --- a/apis/storage/v1beta2/zz_bucket_types.go +++ b/apis/storage/v1beta2/zz_bucket_types.go @@ -95,6 +95,9 @@ type BucketInitParameters struct { // boolean option will delete all contained objects. ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + // The bucket's hierarchical namespace policy, which defines the bucket capability to handle folders in logical structure. Structure is documented below. To use this configuration, uniform_bucket_level_access must be enabled on bucket. + HierarchicalNamespace *HierarchicalNamespaceInitParameters `json:"hierarchicalNamespace,omitempty" tf:"hierarchical_namespace,omitempty"` + // A map of key/value label pairs to assign to the bucket. // +mapType=granular Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` @@ -168,6 +171,9 @@ type BucketObservation struct { // boolean option will delete all contained objects. ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + // The bucket's hierarchical namespace policy, which defines the bucket capability to handle folders in logical structure. Structure is documented below. To use this configuration, uniform_bucket_level_access must be enabled on bucket. + HierarchicalNamespace *HierarchicalNamespaceObservation `json:"hierarchicalNamespace,omitempty" tf:"hierarchical_namespace,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` // A map of key/value label pairs to assign to the bucket. @@ -258,6 +264,10 @@ type BucketParameters struct { // +kubebuilder:validation:Optional ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + // The bucket's hierarchical namespace policy, which defines the bucket capability to handle folders in logical structure. Structure is documented below. To use this configuration, uniform_bucket_level_access must be enabled on bucket. + // +kubebuilder:validation:Optional + HierarchicalNamespace *HierarchicalNamespaceParameters `json:"hierarchicalNamespace,omitempty" tf:"hierarchical_namespace,omitempty"` + // A map of key/value label pairs to assign to the bucket. // +kubebuilder:validation:Optional // +mapType=granular @@ -319,7 +329,7 @@ type BucketParameters struct { type ConditionInitParameters struct { - // Minimum age of an object in days to satisfy this condition. If not supplied alongside another condition and without setting no_age to true, a default age of 0 will be set. + // Minimum age of an object in days to satisfy this condition. Note To set 0 value of age, send_age_if_zero should be set true otherwise 0 value of age field will be ignored. Age *float64 `json:"age,omitempty" tf:"age,omitempty"` // A date in the RFC 3339 format YYYY-MM-DD. This condition is satisfied when an object is created before midnight of the specified date in UTC. @@ -343,9 +353,6 @@ type ConditionInitParameters struct { // One or more matching name suffixes to satisfy this condition. MatchesSuffix []*string `json:"matchesSuffix,omitempty" tf:"matches_suffix,omitempty"` - // While set true, age value will be omitted from requests. This prevents a default age of 0 from being applied, and if you do not have an age value set, setting this to true is strongly recommended. When unset and other conditions are set to zero values, this can result in a rule that applies your action to all files in the bucket. no_age is deprecated and will be removed in a future major release. Use send_age_if_zero instead. - NoAge *bool `json:"noAge,omitempty" tf:"no_age,omitempty"` - // Relevant only for versioned objects. The date in RFC 3339 (e.g. 2017-06-13) when the object became nonconcurrent. When set to 0 it will be ignored, and your state will treat it as though you supplied no noncurrent_time_before condition. NoncurrentTimeBefore *string `json:"noncurrentTimeBefore,omitempty" tf:"noncurrent_time_before,omitempty"` @@ -370,7 +377,7 @@ type ConditionInitParameters struct { type ConditionObservation struct { - // Minimum age of an object in days to satisfy this condition. If not supplied alongside another condition and without setting no_age to true, a default age of 0 will be set. + // Minimum age of an object in days to satisfy this condition. Note To set 0 value of age, send_age_if_zero should be set true otherwise 0 value of age field will be ignored. Age *float64 `json:"age,omitempty" tf:"age,omitempty"` // A date in the RFC 3339 format YYYY-MM-DD. This condition is satisfied when an object is created before midnight of the specified date in UTC. @@ -394,9 +401,6 @@ type ConditionObservation struct { // One or more matching name suffixes to satisfy this condition. MatchesSuffix []*string `json:"matchesSuffix,omitempty" tf:"matches_suffix,omitempty"` - // While set true, age value will be omitted from requests. This prevents a default age of 0 from being applied, and if you do not have an age value set, setting this to true is strongly recommended. When unset and other conditions are set to zero values, this can result in a rule that applies your action to all files in the bucket. no_age is deprecated and will be removed in a future major release. Use send_age_if_zero instead. - NoAge *bool `json:"noAge,omitempty" tf:"no_age,omitempty"` - // Relevant only for versioned objects. The date in RFC 3339 (e.g. 2017-06-13) when the object became nonconcurrent. When set to 0 it will be ignored, and your state will treat it as though you supplied no noncurrent_time_before condition. NoncurrentTimeBefore *string `json:"noncurrentTimeBefore,omitempty" tf:"noncurrent_time_before,omitempty"` @@ -421,7 +425,7 @@ type ConditionObservation struct { type ConditionParameters struct { - // Minimum age of an object in days to satisfy this condition. If not supplied alongside another condition and without setting no_age to true, a default age of 0 will be set. + // Minimum age of an object in days to satisfy this condition. Note To set 0 value of age, send_age_if_zero should be set true otherwise 0 value of age field will be ignored. // +kubebuilder:validation:Optional Age *float64 `json:"age,omitempty" tf:"age,omitempty"` @@ -453,10 +457,6 @@ type ConditionParameters struct { // +kubebuilder:validation:Optional MatchesSuffix []*string `json:"matchesSuffix,omitempty" tf:"matches_suffix,omitempty"` - // While set true, age value will be omitted from requests. This prevents a default age of 0 from being applied, and if you do not have an age value set, setting this to true is strongly recommended. When unset and other conditions are set to zero values, this can result in a rule that applies your action to all files in the bucket. no_age is deprecated and will be removed in a future major release. Use send_age_if_zero instead. - // +kubebuilder:validation:Optional - NoAge *bool `json:"noAge,omitempty" tf:"no_age,omitempty"` - // Relevant only for versioned objects. The date in RFC 3339 (e.g. 2017-06-13) when the object became nonconcurrent. When set to 0 it will be ignored, and your state will treat it as though you supplied no noncurrent_time_before condition. // +kubebuilder:validation:Optional NoncurrentTimeBefore *string `json:"noncurrentTimeBefore,omitempty" tf:"noncurrent_time_before,omitempty"` @@ -582,6 +582,25 @@ type EncryptionParameters struct { DefaultKMSKeyName *string `json:"defaultKmsKeyName" tf:"default_kms_key_name,omitempty"` } +type HierarchicalNamespaceInitParameters struct { + + // Enables hierarchical namespace for the bucket. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type HierarchicalNamespaceObservation struct { + + // Enables hierarchical namespace for the bucket. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type HierarchicalNamespaceParameters struct { + + // Enables hierarchical namespace for the bucket. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + type LifecycleRuleInitParameters struct { // The Lifecycle Rule's action configuration. A single block of this type is supported. Structure is documented below. diff --git a/apis/storage/v1beta2/zz_generated.deepcopy.go b/apis/storage/v1beta2/zz_generated.deepcopy.go index eeca91970..c669d7612 100644 --- a/apis/storage/v1beta2/zz_generated.deepcopy.go +++ b/apis/storage/v1beta2/zz_generated.deepcopy.go @@ -549,6 +549,11 @@ func (in *BucketInitParameters) DeepCopyInto(out *BucketInitParameters) { *out = new(bool) **out = **in } + if in.HierarchicalNamespace != nil { + in, out := &in.HierarchicalNamespace, &out.HierarchicalNamespace + *out = new(HierarchicalNamespaceInitParameters) + (*in).DeepCopyInto(*out) + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -1214,6 +1219,11 @@ func (in *BucketObservation) DeepCopyInto(out *BucketObservation) { *out = new(bool) **out = **in } + if in.HierarchicalNamespace != nil { + in, out := &in.HierarchicalNamespace, &out.HierarchicalNamespace + *out = new(HierarchicalNamespaceObservation) + (*in).DeepCopyInto(*out) + } if in.ID != nil { in, out := &in.ID, &out.ID *out = new(string) @@ -1385,6 +1395,11 @@ func (in *BucketParameters) DeepCopyInto(out *BucketParameters) { *out = new(bool) **out = **in } + if in.HierarchicalNamespace != nil { + in, out := &in.HierarchicalNamespace, &out.HierarchicalNamespace + *out = new(HierarchicalNamespaceParameters) + (*in).DeepCopyInto(*out) + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]*string, len(*in)) @@ -1576,11 +1591,6 @@ func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { } } } - if in.NoAge != nil { - in, out := &in.NoAge, &out.NoAge - *out = new(bool) - **out = **in - } if in.NoncurrentTimeBefore != nil { in, out := &in.NoncurrentTimeBefore, &out.NoncurrentTimeBefore *out = new(string) @@ -1689,11 +1699,6 @@ func (in *ConditionObservation) DeepCopyInto(out *ConditionObservation) { } } } - if in.NoAge != nil { - in, out := &in.NoAge, &out.NoAge - *out = new(bool) - **out = **in - } if in.NoncurrentTimeBefore != nil { in, out := &in.NoncurrentTimeBefore, &out.NoncurrentTimeBefore *out = new(string) @@ -1802,11 +1807,6 @@ func (in *ConditionParameters) DeepCopyInto(out *ConditionParameters) { } } } - if in.NoAge != nil { - in, out := &in.NoAge, &out.NoAge - *out = new(bool) - **out = **in - } if in.NoncurrentTimeBefore != nil { in, out := &in.NoncurrentTimeBefore, &out.NoncurrentTimeBefore *out = new(string) @@ -2213,6 +2213,66 @@ func (in *EncryptionParameters) DeepCopy() *EncryptionParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HierarchicalNamespaceInitParameters) DeepCopyInto(out *HierarchicalNamespaceInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchicalNamespaceInitParameters. +func (in *HierarchicalNamespaceInitParameters) DeepCopy() *HierarchicalNamespaceInitParameters { + if in == nil { + return nil + } + out := new(HierarchicalNamespaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HierarchicalNamespaceObservation) DeepCopyInto(out *HierarchicalNamespaceObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchicalNamespaceObservation. +func (in *HierarchicalNamespaceObservation) DeepCopy() *HierarchicalNamespaceObservation { + if in == nil { + return nil + } + out := new(HierarchicalNamespaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HierarchicalNamespaceParameters) DeepCopyInto(out *HierarchicalNamespaceParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchicalNamespaceParameters. +func (in *HierarchicalNamespaceParameters) DeepCopy() *HierarchicalNamespaceParameters { + if in == nil { + return nil + } + out := new(HierarchicalNamespaceParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LifecycleRuleInitParameters) DeepCopyInto(out *LifecycleRuleInitParameters) { *out = *in diff --git a/apis/tags/v1beta1/zz_tagkey_types.go b/apis/tags/v1beta1/zz_tagkey_types.go index 0ea6f8390..3fd21bd83 100755 --- a/apis/tags/v1beta1/zz_tagkey_types.go +++ b/apis/tags/v1beta1/zz_tagkey_types.go @@ -32,7 +32,7 @@ type TagKeyInitParameters struct { PurposeData map[string]*string `json:"purposeData,omitempty" tf:"purpose_data,omitempty"` // Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace. - // The short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + // The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` } @@ -68,7 +68,7 @@ type TagKeyObservation struct { PurposeData map[string]*string `json:"purposeData,omitempty" tf:"purpose_data,omitempty"` // Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace. - // The short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + // The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` // Output only. Update time. @@ -99,7 +99,7 @@ type TagKeyParameters struct { PurposeData map[string]*string `json:"purposeData,omitempty" tf:"purpose_data,omitempty"` // Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace. - // The short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + // The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). // +kubebuilder:validation:Optional ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` } diff --git a/apis/tags/v1beta1/zz_tagvalue_types.go b/apis/tags/v1beta1/zz_tagvalue_types.go index 1cfc768e8..832f18ec6 100755 --- a/apis/tags/v1beta1/zz_tagvalue_types.go +++ b/apis/tags/v1beta1/zz_tagvalue_types.go @@ -32,7 +32,7 @@ type TagValueInitParameters struct { ParentSelector *v1.Selector `json:"parentSelector,omitempty" tf:"-"` // Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. - // The short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + // The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` } @@ -58,7 +58,7 @@ type TagValueObservation struct { Parent *string `json:"parent,omitempty" tf:"parent,omitempty"` // Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. - // The short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + // The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` // Output only. Update time. @@ -87,7 +87,7 @@ type TagValueParameters struct { ParentSelector *v1.Selector `json:"parentSelector,omitempty" tf:"-"` // Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. - // The short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + // The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). // +kubebuilder:validation:Optional ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` } diff --git a/apis/vpcaccess/v1beta2/zz_connector_types.go b/apis/vpcaccess/v1beta2/zz_connector_types.go index 2466ee1af..0fcd2efe7 100755 --- a/apis/vpcaccess/v1beta2/zz_connector_types.go +++ b/apis/vpcaccess/v1beta2/zz_connector_types.go @@ -27,8 +27,7 @@ type ConnectorInitParameters struct { // Maximum throughput of the connector in Mbps, must be greater than min_throughput. Default is 300. Refers to the expected throughput // when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by - // min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of - // max_throughput is discouraged in favor of max_instances. + // min_throughput. Only one of max_throughput and max_instances can be specified. The use of max_throughput is discouraged in favor of max_instances. MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` // Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be @@ -36,8 +35,8 @@ type ConnectorInitParameters struct { MinInstances *float64 `json:"minInstances,omitempty" tf:"min_instances,omitempty"` // Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type. - // Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and - // min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances. + // Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. + // Only one of min_throughput and min_instances can be specified. The use of min_throughput is discouraged in favor of min_instances. MinThroughput *float64 `json:"minThroughput,omitempty" tf:"min_throughput,omitempty"` // Name or self_link of the VPC network. Required if ip_cidr_range is set. @@ -81,8 +80,7 @@ type ConnectorObservation struct { // Maximum throughput of the connector in Mbps, must be greater than min_throughput. Default is 300. Refers to the expected throughput // when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by - // min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of - // max_throughput is discouraged in favor of max_instances. + // min_throughput. Only one of max_throughput and max_instances can be specified. The use of max_throughput is discouraged in favor of max_instances. MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` // Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be @@ -90,8 +88,8 @@ type ConnectorObservation struct { MinInstances *float64 `json:"minInstances,omitempty" tf:"min_instances,omitempty"` // Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type. - // Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and - // min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances. + // Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. + // Only one of min_throughput and min_instances can be specified. The use of min_throughput is discouraged in favor of min_instances. MinThroughput *float64 `json:"minThroughput,omitempty" tf:"min_throughput,omitempty"` // Name or self_link of the VPC network. Required if ip_cidr_range is set. @@ -132,8 +130,7 @@ type ConnectorParameters struct { // Maximum throughput of the connector in Mbps, must be greater than min_throughput. Default is 300. Refers to the expected throughput // when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by - // min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of - // max_throughput is discouraged in favor of max_instances. + // min_throughput. Only one of max_throughput and max_instances can be specified. The use of max_throughput is discouraged in favor of max_instances. // +kubebuilder:validation:Optional MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` @@ -143,8 +140,8 @@ type ConnectorParameters struct { MinInstances *float64 `json:"minInstances,omitempty" tf:"min_instances,omitempty"` // Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type. - // Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and - // min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances. + // Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. + // Only one of min_throughput and min_instances can be specified. The use of min_throughput is discouraged in favor of min_instances. // +kubebuilder:validation:Optional MinThroughput *float64 `json:"minThroughput,omitempty" tf:"min_throughput,omitempty"` diff --git a/apis/workflows/v1beta1/zz_generated.deepcopy.go b/apis/workflows/v1beta1/zz_generated.deepcopy.go index cfc1cf3c2..ce228ef05 100644 --- a/apis/workflows/v1beta1/zz_generated.deepcopy.go +++ b/apis/workflows/v1beta1/zz_generated.deepcopy.go @@ -192,6 +192,11 @@ func (in *WorkflowObservation) DeepCopyInto(out *WorkflowObservation) { *out = new(string) **out = **in } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) diff --git a/apis/workflows/v1beta1/zz_workflow_types.go b/apis/workflows/v1beta1/zz_workflow_types.go index 8f25cdc77..3c0b03944 100755 --- a/apis/workflows/v1beta1/zz_workflow_types.go +++ b/apis/workflows/v1beta1/zz_workflow_types.go @@ -88,6 +88,10 @@ type WorkflowObservation struct { // Format: projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} CryptoKeyName *string `json:"cryptoKeyName,omitempty" tf:"crypto_key_name,omitempty"` + // Defaults to true. + // When the field is set to false, deleting the workflow is allowed. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Description of the workflow provided by the user. Must be at most 1000 unicode characters long. Description *string `json:"description,omitempty" tf:"description,omitempty"` diff --git a/apis/zz_register.go b/apis/zz_register.go index e05735673..cbef4a300 100755 --- a/apis/zz_register.go +++ b/apis/zz_register.go @@ -71,7 +71,6 @@ import ( v1beta2dataplex "github.com/upbound/provider-gcp/apis/dataplex/v1beta2" v1beta1dataproc "github.com/upbound/provider-gcp/apis/dataproc/v1beta1" v1beta2dataproc "github.com/upbound/provider-gcp/apis/dataproc/v1beta2" - v1beta1datastore "github.com/upbound/provider-gcp/apis/datastore/v1beta1" v1beta1datastream "github.com/upbound/provider-gcp/apis/datastream/v1beta1" v1beta2datastream "github.com/upbound/provider-gcp/apis/datastream/v1beta2" v1beta1dialogflowcx "github.com/upbound/provider-gcp/apis/dialogflowcx/v1beta1" @@ -214,7 +213,6 @@ func init() { v1beta2dataplex.SchemeBuilder.AddToScheme, v1beta1dataproc.SchemeBuilder.AddToScheme, v1beta2dataproc.SchemeBuilder.AddToScheme, - v1beta1datastore.SchemeBuilder.AddToScheme, v1beta1datastream.SchemeBuilder.AddToScheme, v1beta2datastream.SchemeBuilder.AddToScheme, v1beta1dialogflowcx.SchemeBuilder.AddToScheme, diff --git a/cmd/provider/datastore/zz_main.go b/cmd/provider/datastore/zz_main.go deleted file mode 100644 index 0c6c43899..000000000 --- a/cmd/provider/datastore/zz_main.go +++ /dev/null @@ -1,221 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -package main - -import ( - "context" - "fmt" - "io" - "log" - "os" - "path/filepath" - "time" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - "github.com/crossplane/crossplane-runtime/pkg/certificates" - xpcontroller "github.com/crossplane/crossplane-runtime/pkg/controller" - "github.com/crossplane/crossplane-runtime/pkg/feature" - "github.com/crossplane/crossplane-runtime/pkg/logging" - "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" - "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" - "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/controller/conversion" - "gopkg.in/alecthomas/kingpin.v2" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/tools/leaderelection/resourcelock" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/metrics" - "sigs.k8s.io/controller-runtime/pkg/webhook" - - "github.com/upbound/provider-gcp/apis" - "github.com/upbound/provider-gcp/apis/v1alpha1" - "github.com/upbound/provider-gcp/config" - resolverapis "github.com/upbound/provider-gcp/internal/apis" - "github.com/upbound/provider-gcp/internal/clients" - "github.com/upbound/provider-gcp/internal/controller" - "github.com/upbound/provider-gcp/internal/features" -) - -const ( - webhookTLSCertDirEnvVar = "WEBHOOK_TLS_CERT_DIR" - tlsServerCertDirEnvVar = "TLS_SERVER_CERTS_DIR" - certsDirEnvVar = "CERTS_DIR" - tlsServerCertDir = "/tls/server" -) - -func deprecationAction(flagName string) kingpin.Action { - return func(c *kingpin.ParseContext) error { - _, err := fmt.Fprintf(os.Stderr, "warning: Command-line flag %q is deprecated and no longer used. It will be removed in a future release. Please remove it from all of your configurations (ControllerConfigs, etc.).\n", flagName) - kingpin.FatalIfError(err, "Failed to print the deprecation notice.") - return nil - } -} - -func main() { - var ( - app = kingpin.New(filepath.Base(os.Args[0]), "Terraform based Crossplane provider for GCP").DefaultEnvars() - debug = app.Flag("debug", "Run with debug logging.").Short('d').Bool() - syncInterval = app.Flag("sync", "Sync interval controls how often all resources will be double checked for drift.").Short('s').Default("1h").Duration() - pollInterval = app.Flag("poll", "Poll interval controls how often an individual resource should be checked for drift.").Default("10m").Duration() - pollStateMetricInterval = app.Flag("poll-state-metric", "State metric recording interval").Default("5s").Duration() - leaderElection = app.Flag("leader-election", "Use leader election for the controller manager.").Short('l').Default("false").OverrideDefaultFromEnvar("LEADER_ELECTION").Bool() - maxReconcileRate = app.Flag("max-reconcile-rate", "The global maximum rate per second at which resources may checked for drift from the desired state.").Default("100").Int() - - namespace = app.Flag("namespace", "Namespace used to set as default scope in default secret store config.").Default("crossplane-system").Envar("POD_NAMESPACE").String() - essTLSCertsPath = app.Flag("ess-tls-cert-dir", "Path of ESS TLS certificates.").Envar("ESS_TLS_CERTS_DIR").String() - enableExternalSecretStores = app.Flag("enable-external-secret-stores", "Enable support for ExternalSecretStores.").Default("false").Envar("ENABLE_EXTERNAL_SECRET_STORES").Bool() - enableManagementPolicies = app.Flag("enable-management-policies", "Enable support for Management Policies.").Default("true").Envar("ENABLE_MANAGEMENT_POLICIES").Bool() - - certsDirSet = false - // we record whether the command-line option "--certs-dir" was supplied - // in the registered PreAction for the flag. - certsDir = app.Flag("certs-dir", "The directory that contains the server key and certificate.").Default(tlsServerCertDir).Envar(certsDirEnvVar).PreAction(func(_ *kingpin.ParseContext) error { - certsDirSet = true - return nil - }).String() - - // now deprecated command-line arguments with the Terraform SDK-based upjet architecture - _ = app.Flag("provider-ttl", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] TTL for the native plugin processes before they are replaced. Changing the default may increase memory consumption.").Hidden().Action(deprecationAction("provider-ttl")).Int() - _ = app.Flag("terraform-version", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform version.").Envar("TERRAFORM_VERSION").Hidden().Action(deprecationAction("terraform-version")).String() - _ = app.Flag("terraform-provider-version", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform provider version.").Envar("TERRAFORM_PROVIDER_VERSION").Hidden().Action(deprecationAction("terraform-provider-version")).String() - _ = app.Flag("terraform-native-provider-path", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform native provider path for shared execution.").Envar("TERRAFORM_NATIVE_PROVIDER_PATH").Hidden().Action(deprecationAction("terraform-native-provider-path")).String() - _ = app.Flag("terraform-provider-source", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform provider source.").Envar("TERRAFORM_PROVIDER_SOURCE").Hidden().Action(deprecationAction("terraform-provider-source")).String() - ) - - kingpin.MustParse(app.Parse(os.Args[1:])) - log.Default().SetOutput(io.Discard) - ctrl.SetLogger(zap.New(zap.WriteTo(io.Discard))) - - zl := zap.New(zap.UseDevMode(*debug)) - logr := logging.NewLogrLogger(zl.WithName("provider-gcp")) - if *debug { - // The controller-runtime runs with a no-op logger by default. It is - // *very* verbose even at info level, so we only provide it a real - // logger when we're running in debug mode. - ctrl.SetLogger(zl) - } - - // currently, we configure the jitter to be the 5% of the poll interval - pollJitter := time.Duration(float64(*pollInterval) * 0.05) - logr.Debug("Starting", "sync-interval", syncInterval.String(), - "poll-interval", pollInterval.String(), "poll-jitter", pollJitter, "max-reconcile-rate", *maxReconcileRate) - - cfg, err := ctrl.GetConfig() - kingpin.FatalIfError(err, "Cannot get API server rest config") - - // Get the TLS certs directory from the environment variables set by - // Crossplane if they're available. - // In older XP versions we used WEBHOOK_TLS_CERT_DIR, in newer versions - // we use TLS_SERVER_CERTS_DIR. If an explicit certs dir is not supplied - // via the command-line options, then these environment variables are used - // instead. - if !certsDirSet { - // backwards-compatibility concerns - xpCertsDir := os.Getenv(certsDirEnvVar) - if xpCertsDir == "" { - xpCertsDir = os.Getenv(tlsServerCertDirEnvVar) - } - if xpCertsDir == "" { - xpCertsDir = os.Getenv(webhookTLSCertDirEnvVar) - } - // we probably don't need this condition but just to be on the - // safe side, if we are missing any kingpin machinery details... - if xpCertsDir != "" { - *certsDir = xpCertsDir - } - } - - mgr, err := ctrl.NewManager(ratelimiter.LimitRESTConfig(cfg, *maxReconcileRate), ctrl.Options{ - LeaderElection: *leaderElection, - LeaderElectionID: "crossplane-leader-election-provider-gcp-datastore", - Cache: cache.Options{ - SyncPeriod: syncInterval, - }, - WebhookServer: webhook.NewServer( - webhook.Options{ - CertDir: *certsDir, - }), - LeaderElectionResourceLock: resourcelock.LeasesResourceLock, - LeaseDuration: func() *time.Duration { d := 60 * time.Second; return &d }(), - RenewDeadline: func() *time.Duration { d := 50 * time.Second; return &d }(), - }) - kingpin.FatalIfError(err, "Cannot create controller manager") - kingpin.FatalIfError(apis.AddToScheme(mgr.GetScheme()), "Cannot add GCP APIs to scheme") - kingpin.FatalIfError(resolverapis.BuildScheme(apis.AddToSchemes), "Cannot register the GCP APIs with the API resolver's runtime scheme") - - metricRecorder := managed.NewMRMetricRecorder() - stateMetrics := statemetrics.NewMRStateMetrics() - - metrics.Registry.MustRegister(metricRecorder) - metrics.Registry.MustRegister(stateMetrics) - - ctx := context.Background() - provider, err := config.GetProvider(ctx, false) - kingpin.FatalIfError(err, "Cannot initialize the provider configuration") - o := tjcontroller.Options{ - Options: xpcontroller.Options{ - Logger: logr, - GlobalRateLimiter: ratelimiter.NewGlobal(*maxReconcileRate), - PollInterval: *pollInterval, - MaxConcurrentReconciles: *maxReconcileRate, - Features: &feature.Flags{}, - MetricOptions: &xpcontroller.MetricOptions{ - PollStateMetricInterval: *pollStateMetricInterval, - MRMetrics: metricRecorder, - MRStateMetrics: stateMetrics, - }, - }, - Provider: provider, - SetupFn: clients.TerraformSetupBuilder(provider.TerraformProvider), - PollJitter: pollJitter, - OperationTrackerStore: tjcontroller.NewOperationStore(logr), - StartWebhooks: *certsDir != "", - } - - if *enableManagementPolicies { - o.Features.Enable(features.EnableBetaManagementPolicies) - logr.Info("Beta feature enabled", "flag", features.EnableBetaManagementPolicies) - } - - if *enableExternalSecretStores { - o.SecretStoreConfigGVK = &v1alpha1.StoreConfigGroupVersionKind - logr.Info("Alpha feature enabled", "flag", features.EnableAlphaExternalSecretStores) - - o.ESSOptions = &tjcontroller.ESSOptions{} - if *essTLSCertsPath != "" { - logr.Info("ESS TLS certificates path is set. Loading mTLS configuration.") - tCfg, err := certificates.LoadMTLSConfig(filepath.Join(*essTLSCertsPath, "ca.crt"), filepath.Join(*essTLSCertsPath, "tls.crt"), filepath.Join(*essTLSCertsPath, "tls.key"), false) - kingpin.FatalIfError(err, "Cannot load ESS TLS config.") - - o.ESSOptions.TLSConfig = tCfg - } - - // Ensure default store config exists. - kingpin.FatalIfError(resource.Ignore(kerrors.IsAlreadyExists, mgr.GetClient().Create(ctx, &v1alpha1.StoreConfig{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - }, - Spec: v1alpha1.StoreConfigSpec{ - // NOTE(turkenh): We only set required spec and expect optional - // ones to properly be initialized with CRD level default values. - SecretStoreConfig: xpv1.SecretStoreConfig{ - DefaultScope: *namespace, - }, - }, - Status: v1alpha1.StoreConfigStatus{}, - })), "cannot create default store config") - } - - kingpin.FatalIfError(conversion.RegisterConversions(o.Provider, mgr.GetScheme()), "Cannot initialize the webhook conversion registry") - kingpin.FatalIfError(controller.Setup_datastore(mgr, o), "Cannot setup GCP controllers") - kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") -} diff --git a/config/externalname.go b/config/externalname.go index e89a20825..7c43f5848 100644 --- a/config/externalname.go +++ b/config/externalname.go @@ -559,6 +559,8 @@ var terraformPluginSDKExternalNameConfigs = map[string]config.ExternalName{ // identityplatform // + // Imported by using the following projects/{{project}}/config + "google_identity_platform_config": config.TemplatedStringAsIdentifier("", "projects/{{ .setup.configuration.project }}/config"), // Imported by using the following format: projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}} "google_identity_platform_default_supported_idp_config": config.IdentifierFromProvider, // Imported by using the following format: projects/{{project}}/inboundSamlConfigs/{{name}} @@ -573,8 +575,6 @@ var terraformPluginSDKExternalNameConfigs = map[string]config.ExternalName{ "google_identity_platform_tenant_inbound_saml_config": config.IdentifierFromProvider, // Imported by using the following: projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}} "google_identity_platform_tenant_oauth_idp_config": config.IdentifierFromProvider, - // Imported by using the following projects/{{project}}/config/{{name}} - "google_identity_platform_project_default_config": config.IdentifierFromProvider, // kms // diff --git a/config/externalnamenottested.go b/config/externalnamenottested.go index ea7aa544b..d5070feb4 100644 --- a/config/externalnamenottested.go +++ b/config/externalnamenottested.go @@ -376,10 +376,6 @@ var ExternalNameNotTestedConfigs = map[string]config.ExternalName{ // identityplatform // - // Imported by using the following projects/{{project}}/config - "google_identity_platform_config": config.TemplatedStringAsIdentifier("", "projects/{{ .setup.configuration.project }}/config"), - // Imported by using the following projects/{{project}}/config/{{name}} - "google_identity_platform_project_default_config": config.IdentifierFromProvider, // kms // diff --git a/config/generated.lst b/config/generated.lst index 6f5263cf3..0aeb7f243 100644 --- a/config/generated.lst +++ b/config/generated.lst @@ -206,7 +206,6 @@ "google_dataproc_job", "google_dataproc_metastore_service", "google_dataproc_workflow_template", - "google_datastore_index", "google_datastream_connection_profile", "google_datastream_private_connection", "google_dialogflow_cx_agent", @@ -250,10 +249,10 @@ "google_iap_web_iam_member", "google_iap_web_type_app_engine_iam_member", "google_iap_web_type_compute_iam_member", + "google_identity_platform_config", "google_identity_platform_default_supported_idp_config", "google_identity_platform_inbound_saml_config", "google_identity_platform_oauth_idp_config", - "google_identity_platform_project_default_config", "google_identity_platform_tenant", "google_identity_platform_tenant_default_supported_idp_config", "google_identity_platform_tenant_inbound_saml_config", diff --git a/config/old-singleton-list-apis.txt b/config/old-singleton-list-apis.txt index e89ef7bc7..499115b63 100644 --- a/config/old-singleton-list-apis.txt +++ b/config/old-singleton-list-apis.txt @@ -144,7 +144,6 @@ google_iap_web_iam_member google_iap_web_type_app_engine_iam_member google_iap_web_type_compute_iam_member google_identity_platform_inbound_saml_config -google_identity_platform_project_default_config google_identity_platform_tenant_inbound_saml_config google_kms_crypto_key google_kms_crypto_key_iam_member diff --git a/config/overrides.go b/config/overrides.go index fad38758d..3d690697b 100644 --- a/config/overrides.go +++ b/config/overrides.go @@ -143,3 +143,13 @@ func descriptionOverrides() tjconfig.ResourceOption { }) } } + +func DeletionProtectionRemove() tjconfig.ResourceOption { + return func(r *tjconfig.Resource) { + if t, ok := r.TerraformResource.Schema["deletion_protection"]; ok { + t.Computed = true + t.Optional = false + t.Default = false + } + } +} diff --git a/config/provider-metadata.yaml b/config/provider-metadata.yaml index ebd61c5dd..5cd65765b 100644 --- a/config/provider-metadata.yaml +++ b/config/provider-metadata.yaml @@ -306,7 +306,7 @@ resources: vpc_network_sources.vpc_subnetwork.vpc_ip_subnetworks: |- - (Optional) - CIDR block IP subnetwork specification. Must be IPv4. + A list of CIDR block IP subnetwork specification. Must be IPv4. importStatements: [] google_access_context_manager_access_level_condition: subCategory: Access Context Manager (VPC Service Controls) @@ -797,6 +797,7 @@ resources: dependencies: google_project.project: |- { + "deletion_policy": "DELETE", "name": "my-project-name", "org_id": "123456789", "project_id": "my-project-name" @@ -1282,9 +1283,11 @@ resources: egress_from.identities: |- - (Optional) - A list of identities that are allowed access through this EgressPolicy. - Should be in the format of email address. The email address should - represent individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. egress_from.identity_type: |- - (Optional) @@ -1337,9 +1340,11 @@ resources: ingress_from.identities: |- - (Optional) - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. ingress_from.identity_type: |- - (Optional) @@ -1650,7 +1655,7 @@ resources: "lifecycle": [ { "ignore_changes": [ - "${status[0].resources}" + "${spec[0].egress_policies}" ] } ], @@ -1676,9 +1681,11 @@ resources: egress_from.identities: |- - (Optional) - A list of identities that are allowed access through this EgressPolicy. - Should be in the format of email address. The email address should - represent individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. egress_from.identity_type: |- - (Optional) @@ -1811,7 +1818,7 @@ resources: "lifecycle": [ { "ignore_changes": [ - "${status[0].resources}" + "${spec[0].ingress_policies}" ] } ], @@ -1839,9 +1846,11 @@ resources: ingress_from.identities: |- - (Optional) - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. ingress_from.identity_type: |- - (Optional) @@ -2032,7 +2041,7 @@ resources: "lifecycle": [ { "ignore_changes": [ - "${status[0].resources}" + "${status[0].egress_policies}" ] } ], @@ -2058,9 +2067,11 @@ resources: egress_from.identities: |- - (Optional) - A list of identities that are allowed access through this EgressPolicy. - Should be in the format of email address. The email address should - represent individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. egress_from.identity_type: |- - (Optional) @@ -2193,7 +2204,7 @@ resources: "lifecycle": [ { "ignore_changes": [ - "${status[0].resources}" + "${status[0].ingress_policies}" ] } ], @@ -2221,9 +2232,11 @@ resources: ingress_from.identities: |- - (Optional) - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. ingress_from.identity_type: |- - (Optional) @@ -2783,6 +2796,7 @@ resources: - name: ad-domain manifest: |- { + "deletion_protection": false, "domain_name": "tfgen.org.com", "locations": [ "us-central1" @@ -2802,6 +2816,13 @@ resources: If CIDR subnets overlap between networks, domain creation will fail. create: '- Default is 60 minutes.' delete: '- Default is 60 minutes.' + deletion_protection: |- + - (Optional) Whether Terraform will be prevented from destroying the domain. Defaults to true. + When aterraform destroy or terraform apply would delete the domain, + the command will fail if this field is not set to false in Terraform state. + When the field is set to true or unset in Terraform state, a terraform apply + or terraform destroy that would delete the domain will fail. + When the field is set to false, deleting the domain is allowed. domain_name: |- - (Required) @@ -2852,6 +2873,7 @@ resources: - name: ad-domain-trust manifest: |- { + "deletion_protection": false, "domain": "test-managed-ad.com", "target_dns_ip_addresses": [ "10.1.0.100" @@ -2912,6 +2934,7 @@ resources: manifest: |- { "authorized_network": "${google_compute_network.peered-network.id}", + "deletion_protection": false, "domain_resource": "${google_active_directory_domain.ad-domain.name}", "labels": { "foo": "bar" @@ -2929,6 +2952,7 @@ resources: "authorized_networks": [ "${google_compute_network.source-network.id}" ], + "deletion_protection": false, "domain_name": "ad.test.hashicorptest.com", "locations": [ "us-central1" @@ -2950,6 +2974,7 @@ resources: google_project.peered-project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "my-peered-project", "org_id": "123456789", "project_id": "my-peered-project", @@ -3447,7 +3472,11 @@ resources: { "cluster_id": "alloydb-pitr-restored", "location": "us-central1", - "network": "${data.google_compute_network.default.id}", + "network_config": [ + { + "network": "${data.google_compute_network.default.id}" + } + ], "restore_continuous_backup_source": [ { "cluster": "${google_alloydb_cluster.source.name}", @@ -3456,7 +3485,7 @@ resources: ] } references: - network: data.google_compute_network.default.id + network_config.network: data.google_compute_network.default.id restore_continuous_backup_source.cluster: google_alloydb_cluster.source.name dependencies: google_alloydb_backup.source: |- @@ -3503,10 +3532,14 @@ resources: { "cluster_id": "alloydb-primary-cluster", "location": "us-central1", - "network": "${google_compute_network.default.id}" + "network_config": [ + { + "network": "${google_compute_network.default.id}" + } + ] } references: - network: google_compute_network.default.id + network_config.network: google_compute_network.default.id dependencies: google_alloydb_instance.primary: |- { @@ -3556,7 +3589,11 @@ resources: "${google_alloydb_instance.primary}" ], "location": "us-east1", - "network": "${google_compute_network.default.id}", + "network_config": [ + { + "network": "${google_compute_network.default.id}" + } + ], "secondary_config": [ { "primary_cluster_name": "${google_alloydb_cluster.primary.name}" @@ -3564,7 +3601,7 @@ resources: ] } references: - network: google_compute_network.default.id + network_config.network: google_compute_network.default.id secondary_config.primary_cluster_name: google_alloydb_cluster.primary.name dependencies: google_alloydb_instance.primary: |- @@ -3709,16 +3746,17 @@ resources: - (Output) Days of the week on which a continuous backup is taken. Output only field. Ignored if passed into the request. - create: '- Default is 30 minutes.' + create: '- Default is 120 minutes.' database_version: |- - (Optional) The database engine major version. This is an optional field and it's populated at the Cluster creation time. This field cannot be changed after cluster creation. - delete: '- Default is 30 minutes.' + delete: '- Default is 120 minutes.' deletion_policy: |- - (Optional) Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + Possible values: DEFAULT, FORCE display_name: |- - (Optional) @@ -3816,11 +3854,6 @@ resources: name: |- - The name of the cluster resource. - network: |- - - - (Optional, Deprecated) - The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - "projects/{projectNumber}/global/networks/{network_id}". network_config: |- - (Optional) @@ -3924,6 +3957,11 @@ resources: state: |- - Output only. The current serving state of the cluster. + subscription_type: |- + - + (Optional) + The subscrition type of cluster. + Possible values are: TRIAL, STANDARD. terraform_labels: |- - The combination of labels configured directly on the resource @@ -3933,10 +3971,30 @@ resources: (Optional) The retention period. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". + trial_metadata: |- + - + Contains information and all metadata related to TRIAL clusters. + Structure is documented below. + trial_metadata.end_time: |- + - + (Optional) + End time of the trial cluster. + trial_metadata.grace_end_time: |- + - + (Optional) + Grace end time of the trial cluster. + trial_metadata.start_time: |- + - + (Optional) + Start time of the trial cluster. + trial_metadata.upgrade_time: |- + - + (Optional) + Upgrade time of the trial cluster to standard cluster. uid: |- - The system-generated UID of the resource. - update: '- Default is 30 minutes.' + update: '- Default is 120 minutes.' weekly_schedule.days_of_week: |- - (Optional) @@ -4029,7 +4087,11 @@ resources: { "cluster_id": "alloydb-primary-cluster", "location": "us-central1", - "network": "${google_compute_network.default.id}" + "network_config": [ + { + "network": "${google_compute_network.default.id}" + } + ] } google_alloydb_cluster.secondary: |- { @@ -4052,7 +4114,11 @@ resources: } ], "location": "us-east1", - "network": "${google_compute_network.default.id}", + "network_config": [ + { + "network": "${data.google_compute_network.default.id}" + } + ], "secondary_config": [ { "primary_cluster_name": "${google_alloydb_cluster.primary.name}" @@ -4102,7 +4168,11 @@ resources: { "cluster_id": "alloydb-primary-cluster", "location": "us-central1", - "network": "${google_compute_network.default.id}" + "network_config": [ + { + "network": "${google_compute_network.default.id}" + } + ] } google_alloydb_cluster.secondary: |- { @@ -4125,7 +4195,11 @@ resources: } ], "location": "us-east1", - "network": "${google_compute_network.default.id}", + "network_config": [ + { + "network": "${data.google_compute_network.default.id}" + } + ], "secondary_config": [ { "primary_cluster_name": "${google_alloydb_cluster.primary.name}" @@ -4264,6 +4338,10 @@ resources: field is only allowed to be set when enable_public_ip is set to true. Structure is documented below. + network_config.enable_outbound_public_ip: |- + - + (Optional) + Enabling outbound public ip for the instance. network_config.enable_public_ip: |- - (Optional) @@ -4307,6 +4385,11 @@ resources: - (Optional) Record wait events during query execution for an instance. + outbound_public_ip_addresses: |- + - + The outbound public IP addresses for the instance. This is available ONLY when + networkConfig.enableOutboundPublicIp is set to true. These IP addresses are used + for outbound connections. psc_instance_config: |- - (Optional) @@ -4418,7 +4501,11 @@ resources: } ], "location": "us-central1", - "network": "${google_compute_network.default.id}" + "network_config": [ + { + "network": "${data.google_compute_network.default.id}" + } + ] } google_alloydb_instance.default: |- { @@ -4474,7 +4561,11 @@ resources: } ], "location": "us-central1", - "network": "${google_compute_network.default.id}" + "network_config": [ + { + "network": "${google_compute_network.default.id}" + } + ] } google_alloydb_instance.default: |- { @@ -5171,23 +5262,147 @@ resources: Name of the Apigee organization. update: '- Default is 20 minutes.' importStatements: [] - google_apigee_endpoint_attachment: + google_apigee_api: subCategory: Apigee - description: Apigee Endpoint Attachment. - name: google_apigee_endpoint_attachment + description: An Apigee API proxy is essentially a layer that sits in front of your backend APIs. It acts as an intermediary between your API consumers (like mobile apps or websites) and your backend services. + name: google_apigee_api + title: google_apigee_api + argumentDocs: + config_bundle: |- + - + (Required) + Path to the config zip bundle. + create: '- Default is 20 minutes.' + delete: '- Default is 20 minutes.' + detect_md5hash: |- + - + (Optional) Detect changes to local config bundle file or changes made outside of Terraform. MD5 hash of the data, encoded using base64. Hash is automatically computed without need for user input. + id: '- an identifier for the resource with format organizations/{{org_id}}/apis/{{name}}' + latest_revision_id: |- + - + The id of the most recently created revision for this API proxy. + md5hash: |- + - + (Computed) Base 64 MD5 hash of the uploaded data. It is speculative as remote does not return hash of the bundle. Remote changes are detected using returned last_modified timestamp. + meta_data: |- + - + Metadata describing the API proxy. + Structure is documented below. + meta_data.created_at: |- + - + (Optional) + Time at which the API proxy was created, in milliseconds since epoch. + meta_data.last_modified_at: |- + - + (Optional) + Time at which the API proxy was most recently modified, in milliseconds since epoch. + meta_data.sub_type: |- + - + (Optional) + The type of entity described + name: |- + - + (Required) + The ID of the API proxy. + org_id: |- + - + (Required) + The Apigee Organization name associated with the Apigee instance. + revision: |- + - + A list of revisions of this API proxy. + importStatements: [] + google_apigee_app_group: + subCategory: Apigee + description: An + name: google_apigee_app_group title: "" examples: - - name: apigee_endpoint_attachment + - name: apigee_app_group manifest: |- { - "endpoint_attachment_id": "test1", - "location": "{google_compute_service_attachment location}", + "channel_id": "storefront", + "channel_uri": "https://my-dev-portal.org/groups/my-group", + "depends_on": [ + "${google_apigee_instance.apigee_instance}" + ], + "display_name": "Test app group", + "name": "my-app-group", "org_id": "${google_apigee_organization.apigee_org.id}", - "service_attachment": "{google_compute_service_attachment id}" + "status": "active" } references: org_id: google_apigee_organization.apigee_org.id dependencies: + google_apigee_instance.apigee_instance: |- + { + "location": "us-central1", + "name": "instance", + "org_id": "${google_apigee_organization.apigee_org.id}", + "peering_cidr_range": "SLASH_22" + } + google_apigee_organization.apigee_org: |- + { + "analytics_region": "us-central1", + "authorized_network": "${google_compute_network.apigee_network.id}", + "depends_on": [ + "${google_service_networking_connection.apigee_vpc_connection}" + ], + "project_id": "${data.google_client_config.current.project}" + } + google_compute_global_address.apigee_range: |- + { + "address_type": "INTERNAL", + "name": "apigee-range", + "network": "${google_compute_network.apigee_network.id}", + "prefix_length": 16, + "purpose": "VPC_PEERING" + } + google_compute_network.apigee_network: |- + { + "name": "apigee-network" + } + google_service_networking_connection.apigee_vpc_connection: |- + { + "network": "${google_compute_network.apigee_network.id}", + "reserved_peering_ranges": [ + "${google_compute_global_address.apigee_range.name}" + ], + "service": "servicenetworking.googleapis.com" + } + - name: apigee_app_group + manifest: |- + { + "attributes": [ + { + "name": "business_unit", + "value": "HR" + }, + { + "name": "department", + "value": "payroll" + } + ], + "channel_id": "storefront", + "channel_uri": "https://my-dev-portal.org/groups/my-group", + "depends_on": [ + "${google_apigee_instance.apigee_instance}" + ], + "display_name": "Test app group", + "name": "my-app-group", + "org_id": "${google_apigee_organization.apigee_org.id}", + "status": "active" + } + references: + org_id: google_apigee_organization.apigee_org.id + dependencies: + google_apigee_instance.apigee_instance: |- + { + "location": "us-central1", + "name": "instance", + "org_id": "${google_apigee_organization.apigee_org.id}", + "peering_cidr_range": "SLASH_22" + } google_apigee_organization.apigee_org: |- { "analytics_region": "us-central1", @@ -5218,51 +5433,324 @@ resources: "service": "servicenetworking.googleapis.com" } argumentDocs: - connection_state: |- + app_group_id: |- - - State of the endpoint attachment connection to the service attachment. - create: '- Default is 30 minutes.' - delete: '- Default is 30 minutes.' - endpoint_attachment_id: |- + Internal identifier that cannot be edited + attributes: |- - - (Required) - ID of the endpoint attachment. - host: |- + (Optional) + A list of attributes + Structure is documented below. + attributes.name: |- - - Host that can be used in either HTTP Target Endpoint directly, or as the host in Target Server. - id: '- an identifier for the resource with format {{org_id}}/endpointAttachments/{{endpoint_attachment_id}}' - location: |- + (Optional) + Key of the attribute + attributes.value: |- - - (Required) - Location of the endpoint attachment. + (Optional) + Value of the attribute + channel_id: |- + - + (Optional) + Channel identifier identifies the owner maintaining this grouping. + channel_uri: |- + - + (Optional) + A reference to the associated storefront/marketplace. + create: '- Default is 20 minutes.' + created_at: |- + - + Created time as milliseconds since epoch. + delete: '- Default is 20 minutes.' + display_name: |- + - + (Optional) + App group name displayed in the UI + id: '- an identifier for the resource with format {{org_id}}/appgroups/{{name}}' + last_modified_at: |- + - + Modified time as milliseconds since epoch. name: |- - - Name of the Endpoint Attachment in the following format: - organizations/{organization}/endpointAttachments/{endpointAttachment}. + (Required) + Name of the AppGroup. Characters you can use in the name are restricted to: A-Z0-9._-$ %. org_id: |- - (Required) - The Apigee Organization associated with the Apigee instance, + The Apigee Organization associated with the Apigee app group, in the format organizations/{{org_name}}. - service_attachment: |- + organization: |- - - (Required) - Format: projects//regions//serviceAttachments/* + App group name displayed in the UI + status: |- + - + (Optional) + Valid values are active or inactive. Note that the status of the AppGroup should be updated via UpdateAppGroupRequest by setting the action as active or inactive. + Possible values are: active, inactive. + update: '- Default is 20 minutes.' importStatements: [] - google_apigee_envgroup: + google_apigee_developer: subCategory: Apigee - description: An - name: google_apigee_envgroup + description: A + name: google_apigee_developer title: "" examples: - - name: env_grp + - name: apigee_developer manifest: |- { - "hostnames": [ - "abc.foo.com" + "depends_on": [ + "${google_apigee_instance.apigee_instance}" ], - "name": "my-envgroup", - "org_id": "${google_apigee_organization.apigee_org.id}" + "email": "john.doe@acme.com", + "first_name": "John", + "last_name": "Doe", + "org_id": "${google_apigee_organization.apigee_org.id}", + "user_name": "john.doe" + } + references: + org_id: google_apigee_organization.apigee_org.id + dependencies: + google_apigee_instance.apigee_instance: |- + { + "location": "us-central1", + "name": "my-instance", + "org_id": "${google_apigee_organization.apigee_org.id}", + "peering_cidr_range": "SLASH_22" + } + google_apigee_organization.apigee_org: |- + { + "analytics_region": "us-central1", + "authorized_network": "${google_compute_network.apigee_network.id}", + "depends_on": [ + "${google_service_networking_connection.apigee_vpc_connection}" + ], + "project_id": "${data.google_client_config.current.project}" + } + google_compute_global_address.apigee_range: |- + { + "address_type": "INTERNAL", + "name": "apigee-range", + "network": "${google_compute_network.apigee_network.id}", + "prefix_length": 16, + "purpose": "VPC_PEERING" + } + google_compute_network.apigee_network: |- + { + "name": "apigee-network" + } + google_service_networking_connection.apigee_vpc_connection: |- + { + "network": "${google_compute_network.apigee_network.id}", + "reserved_peering_ranges": [ + "${google_compute_global_address.apigee_range.name}" + ], + "service": "servicenetworking.googleapis.com" + } + - name: apigee_developer + manifest: |- + { + "attributes": [ + { + "name": "business_unit", + "value": "HR" + }, + { + "name": "department", + "value": "payroll" + } + ], + "depends_on": [ + "${google_apigee_instance.apigee_instance}" + ], + "email": "john.doe@acme.com", + "first_name": "John", + "last_name": "Doe", + "org_id": "${google_apigee_organization.apigee_org.id}", + "user_name": "john.doe" + } + references: + org_id: google_apigee_organization.apigee_org.id + dependencies: + google_apigee_instance.apigee_instance: |- + { + "location": "us-central1", + "name": "my-instance", + "org_id": "${google_apigee_organization.apigee_org.id}", + "peering_cidr_range": "SLASH_22" + } + google_apigee_organization.apigee_org: |- + { + "analytics_region": "us-central1", + "authorized_network": "${google_compute_network.apigee_network.id}", + "depends_on": [ + "${google_service_networking_connection.apigee_vpc_connection}" + ], + "project_id": "${data.google_client_config.current.project}" + } + google_compute_global_address.apigee_range: |- + { + "address_type": "INTERNAL", + "name": "apigee-range", + "network": "${google_compute_network.apigee_network.id}", + "prefix_length": 16, + "purpose": "VPC_PEERING" + } + google_compute_network.apigee_network: |- + { + "name": "apigee-network" + } + google_service_networking_connection.apigee_vpc_connection: |- + { + "network": "${google_compute_network.apigee_network.id}", + "reserved_peering_ranges": [ + "${google_compute_global_address.apigee_range.name}" + ], + "service": "servicenetworking.googleapis.com" + } + argumentDocs: + attributes: |- + - + (Optional) + Developer attributes (name/value pairs). The custom attribute limit is 18. + Structure is documented below. + attributes.name: |- + - + (Optional) + Key of the attribute + attributes.value: |- + - + (Optional) + Value of the attribute + create: '- Default is 20 minutes.' + created_at: |- + - + Time at which the developer was created in milliseconds since epoch. + delete: '- Default is 20 minutes.' + email: |- + - + (Required) + Email address of the developer. This value is used to uniquely identify the developer in Apigee hybrid. Note that the email address has to be in lowercase only.. + first_name: |- + - + (Required) + First name of the developer. + id: '- an identifier for the resource with format {{org_id}}/developers/{{email}}' + last_modified_at: |- + - + Time at which the developer was last modified in milliseconds since epoch. + last_name: |- + - + (Required) + Last name of the developer. + org_id: |- + - + (Required) + The Apigee Organization associated with the Apigee instance, + in the format organizations/{{org_name}}. + organizatio_name: |- + - + Name of the Apigee organization in which the developer resides. + status: |- + - + Status of the developer. Valid values are active and inactive. + update: '- Default is 20 minutes.' + user_name: |- + - + (Required) + User name of the developer. Not used by Apigee hybrid. + importStatements: [] + google_apigee_endpoint_attachment: + subCategory: Apigee + description: Apigee Endpoint Attachment. + name: google_apigee_endpoint_attachment + title: "" + examples: + - name: apigee_endpoint_attachment + manifest: |- + { + "endpoint_attachment_id": "test1", + "location": "{google_compute_service_attachment location}", + "org_id": "${google_apigee_organization.apigee_org.id}", + "service_attachment": "{google_compute_service_attachment id}" + } + references: + org_id: google_apigee_organization.apigee_org.id + dependencies: + google_apigee_organization.apigee_org: |- + { + "analytics_region": "us-central1", + "authorized_network": "${google_compute_network.apigee_network.id}", + "depends_on": [ + "${google_service_networking_connection.apigee_vpc_connection}" + ], + "project_id": "${data.google_client_config.current.project}" + } + google_compute_global_address.apigee_range: |- + { + "address_type": "INTERNAL", + "name": "apigee-range", + "network": "${google_compute_network.apigee_network.id}", + "prefix_length": 16, + "purpose": "VPC_PEERING" + } + google_compute_network.apigee_network: |- + { + "name": "apigee-network" + } + google_service_networking_connection.apigee_vpc_connection: |- + { + "network": "${google_compute_network.apigee_network.id}", + "reserved_peering_ranges": [ + "${google_compute_global_address.apigee_range.name}" + ], + "service": "servicenetworking.googleapis.com" + } + argumentDocs: + connection_state: |- + - + State of the endpoint attachment connection to the service attachment. + create: '- Default is 30 minutes.' + delete: '- Default is 30 minutes.' + endpoint_attachment_id: |- + - + (Required) + ID of the endpoint attachment. + host: |- + - + Host that can be used in either HTTP Target Endpoint directly, or as the host in Target Server. + id: '- an identifier for the resource with format {{org_id}}/endpointAttachments/{{endpoint_attachment_id}}' + location: |- + - + (Required) + Location of the endpoint attachment. + name: |- + - + Name of the Endpoint Attachment in the following format: + organizations/{organization}/endpointAttachments/{endpointAttachment}. + org_id: |- + - + (Required) + The Apigee Organization associated with the Apigee instance, + in the format organizations/{{org_name}}. + service_attachment: |- + - + (Required) + Format: projects//regions//serviceAttachments/* + importStatements: [] + google_apigee_envgroup: + subCategory: Apigee + description: An + name: google_apigee_envgroup + title: "" + examples: + - name: env_grp + manifest: |- + { + "hostnames": [ + "abc.foo.com" + ], + "name": "my-envgroup", + "org_id": "${google_apigee_organization.apigee_org.id}" } references: org_id: google_apigee_organization.apigee_org.id @@ -5376,6 +5864,7 @@ resources: google_project.project: |- { "billing_account": "", + "deletion_policy": "DELETE", "name": "my-project", "org_id": "", "project_id": "my-project" @@ -6020,6 +6509,7 @@ resources: Output only. Resource name of the service attachment created for the instance in the format: projects//regions//serviceAttachments/* Apigee customers can privately forward traffic to this service attachment using the PSC endpoints. + update: '- Default is 20 minutes.' importStatements: [] google_apigee_instance_attachment: subCategory: Apigee @@ -6080,6 +6570,7 @@ resources: google_project.project: |- { "billing_account": "", + "deletion_policy": "DELETE", "name": "my-project", "org_id": "", "project_id": "my-project" @@ -6200,6 +6691,7 @@ resources: google_project.project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "my-project", "org_id": "123456789", "project_id": "my-project" @@ -6454,7 +6946,90 @@ resources: ], "service": "servicenetworking.googleapis.com" } + - name: apigee-nat + manifest: |- + { + "activate": "true", + "instance_id": "${google_apigee_instance.apigee_instance.id}", + "name": "my-nat-address" + } + references: + instance_id: google_apigee_instance.apigee_instance.id + dependencies: + google_apigee_instance.apigee_instance: |- + { + "description": "Terraform-managed Apigee Runtime Instance", + "disk_encryption_key_name": "${google_kms_crypto_key.apigee_key.id}", + "display_name": "apigee-instance", + "location": "us-central1", + "name": "apigee-instance", + "org_id": "${google_apigee_organization.apigee_org.id}" + } + google_apigee_organization.apigee_org: |- + { + "analytics_region": "us-central1", + "authorized_network": "${google_compute_network.apigee_network.id}", + "depends_on": [ + "${google_service_networking_connection.apigee_vpc_connection}", + "${google_kms_crypto_key_iam_member.apigee_sa_keyuser}" + ], + "description": "Terraform-provisioned Apigee Org.", + "display_name": "apigee-org", + "project_id": "${data.google_client_config.current.project}", + "runtime_database_encryption_key_name": "${google_kms_crypto_key.apigee_key.id}" + } + google_compute_global_address.apigee_range: |- + { + "address_type": "INTERNAL", + "name": "apigee-range", + "network": "${google_compute_network.apigee_network.id}", + "prefix_length": 21, + "purpose": "VPC_PEERING" + } + google_compute_network.apigee_network: |- + { + "name": "apigee-network" + } + google_kms_crypto_key.apigee_key: |- + { + "key_ring": "${google_kms_key_ring.apigee_keyring.id}", + "lifecycle": [ + { + "prevent_destroy": true + } + ], + "name": "apigee-key" + } + google_kms_crypto_key_iam_member.apigee_sa_keyuser: |- + { + "crypto_key_id": "${google_kms_crypto_key.apigee_key.id}", + "member": "${google_project_service_identity.apigee_sa.member}", + "role": "roles/cloudkms.cryptoKeyEncrypterDecrypter" + } + google_kms_key_ring.apigee_keyring: |- + { + "location": "us-central1", + "name": "apigee-keyring" + } + google_project_service_identity.apigee_sa: |- + { + "project": "${google_project.project.project_id}", + "provider": "${google-beta}", + "service": "${google_project_service.apigee.service}" + } + google_service_networking_connection.apigee_vpc_connection: |- + { + "network": "${google_compute_network.apigee_network.id}", + "reserved_peering_ranges": [ + "${google_compute_global_address.apigee_range.name}" + ], + "service": "servicenetworking.googleapis.com" + } argumentDocs: + activate: |- + - + (Optional) + Flag that specifies whether the reserved NAT address should be activate. create: '- Default is 30 minutes.' delete: '- Default is 30 minutes.' id: '- an identifier for the resource with format {{instance_id}}/natAddresses/{{name}}' @@ -6473,6 +7048,7 @@ resources: state: |- - State of the NAT IP address. + update: '- Default is 30 minutes.' importStatements: [] google_apigee_organization: subCategory: Apigee @@ -6825,6 +7401,7 @@ resources: google_project.project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "my-project", "org_id": "123456789", "project_id": "my-project" @@ -6922,6 +7499,7 @@ resources: google_project.project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "my-project", "org_id": "123456789", "project_id": "my-project" @@ -7055,7 +7633,6 @@ resources: { "display_name": "sample-key", "name": "key", - "project": "${google_project.basic.name}", "restrictions": [ { "android_key_restrictions": [ @@ -7079,21 +7656,11 @@ resources: } ] } - references: - project: google_project.basic.name - dependencies: - google_project.basic: |- - { - "name": "app", - "org_id": "123456789", - "project_id": "app" - } - name: primary manifest: |- { "display_name": "sample-key", "name": "key", - "project": "${google_project.basic.name}", "restrictions": [ { "api_targets": [ @@ -7114,21 +7681,11 @@ resources: } ] } - references: - project: google_project.basic.name - dependencies: - google_project.basic: |- - { - "name": "app", - "org_id": "123456789", - "project_id": "app" - } - name: primary manifest: |- { "display_name": "sample-key", "name": "key", - "project": "${google_project.basic.name}", "restrictions": [ { "api_targets": [ @@ -7149,37 +7706,17 @@ resources: } ] } - references: - project: google_project.basic.name - dependencies: - google_project.basic: |- - { - "name": "app", - "org_id": "123456789", - "project_id": "app" - } - name: primary manifest: |- { "display_name": "sample-key", - "name": "key", - "project": "${google_project.basic.name}" + "name": "key" } - references: - project: google_project.basic.name - dependencies: - google_project.basic: |- - { - "name": "app", - "org_id": "123456789", - "project_id": "app" - } - name: primary manifest: |- { "display_name": "sample-key", "name": "key", - "project": "${google_project.basic.name}", "restrictions": [ { "api_targets": [ @@ -7200,15 +7737,6 @@ resources: } ] } - references: - project: google_project.basic.name - dependencies: - google_project.basic: |- - { - "name": "app", - "org_id": "123456789", - "project_id": "app" - } argumentDocs: allowed_applications.display_name: |- - @@ -7550,6 +8078,7 @@ resources: google_project.my_project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "tf-test-project", "org_id": "123456789", "project_id": "ae-project" @@ -7669,6 +8198,7 @@ resources: google_project.my_project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "appeng-flex", "org_id": "123456789", "project_id": "appeng-flex" @@ -8912,292 +9442,305 @@ resources: } ] } - - name: example2 + - name: example manifest: |- { "application_id": "example-application", - "attributes": [ - { - "business_owners": [ - { - "display_name": "Alice", - "email": "alice@google.com" - } - ], - "criticality": [ - { - "type": "MISSION_CRITICAL" - } - ], - "developer_owners": [ - { - "display_name": "Bob", - "email": "bob@google.com" - } - ], - "environment": [ - { - "type": "STAGING" - } - ], - "operator_owners": [ - { - "display_name": "Charlie", - "email": "charlie@google.com" - } - ] - } - ], - "description": "Application for testing", - "display_name": "Application Full", - "location": "us-east1", + "location": "global", "scope": [ { - "type": "REGIONAL" + "type": "GLOBAL" } ] } - argumentDocs: - application_id: |- - - - (Required) - Required. The Application identifier. - attributes.business_owners: |- - - - (Optional) - Optional. Business team that ensures user needs are met and value is delivered - Structure is documented below. - attributes.criticality: |- - - - (Optional) - Criticality of the Application, Service, or Workload - Structure is documented below. - attributes.developer_owners: |- - - - (Optional) - Optional. Developer team that owns development and coding. - Structure is documented below. - attributes.environment: |- - - - (Optional) - Environment of the Application, Service, or Workload - Structure is documented below. - attributes.operator_owners: |- - - - (Optional) - Optional. Operator team that ensures runtime and operations. - Structure is documented below. - business_owners.display_name: |- - - - (Optional) - Optional. Contact's name. - business_owners.email: |- - - - (Required) - Required. Email address of the contacts. - create: '- Default is 20 minutes.' - create_time: |- - - - Output only. Create time. - criticality.type: |- - - - (Required) - Criticality type. - Possible values are: MISSION_CRITICAL, HIGH, MEDIUM, LOW. - delete: '- Default is 20 minutes.' - developer_owners.display_name: |- - - - (Optional) - Optional. Contact's name. - developer_owners.email: |- - - - (Required) - Required. Email address of the contacts. - environment.type: |- - - - (Required) - Environment type. - Possible values are: PRODUCTION, STAGING, TEST, DEVELOPMENT. - id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/applications/{{application_id}}' - location: |- - - - (Required) - Part of parent. See documentation of projectsId. - name: |- - - - Identifier. The resource name of an Application. Format: - "projects/{host-project-id}/locations/{location}/applications/{application-id}" - operator_owners.display_name: |- - - - (Optional) - Optional. Contact's name. - operator_owners.email: |- - - - (Required) - Required. Email address of the contacts. - scope: |- - - - (Required) - Scope of an application. - Structure is documented below. - scope.attributes: |- - - - (Optional) - Consumer provided attributes. - Structure is documented below. - scope.description: |- - - - (Optional) - Optional. User-defined description of an Application. - scope.display_name: |- - - - (Optional) - Optional. User-defined name for the Application. - scope.project: |- - - (Optional) The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - scope.type: |- - - - (Required) - Required. Scope Type. - Possible values: - REGIONAL - Possible values are: REGIONAL. - state: |- - - - Output only. Application state. - Possible values: - STATE_UNSPECIFIED - CREATING - ACTIVE - DELETING - uid: |- - - - Output only. A universally unique identifier (in UUID4 format) for the Application. - update: '- Default is 20 minutes.' - update_time: |- - - - Output only. Update time. - importStatements: [] - google_apphub_service: - subCategory: App Hub - description: Service is a network/api interface that exposes some functionality to clients for consumption over the network. - name: google_apphub_service - title: "" - examples: - - name: example - manifest: |- - { - "application_id": "${google_apphub_application.application.application_id}", - "discovered_service": "${data.google_apphub_discovered_service.catalog-service.name}", - "location": "us-central1", - "service_id": "${google_compute_forwarding_rule.forwarding_rule.name}" - } - references: - application_id: google_apphub_application.application.application_id - discovered_service: data.google_apphub_discovered_service.catalog-service.name - service_id: google_compute_forwarding_rule.forwarding_rule.name - dependencies: - google_apphub_application.application: |- - { - "application_id": "example-application-1", - "location": "us-central1", - "scope": [ - { - "type": "REGIONAL" - } - ] - } - google_apphub_service_project_attachment.service_project_attachment: |- - { - "depends_on": [ - "${time_sleep.wait_120s}" - ], - "service_project_attachment_id": "${google_project.service_project.project_id}" - } - google_compute_forwarding_rule.forwarding_rule: |- - { - "all_ports": true, - "backend_service": "${google_compute_region_backend_service.backend.id}", - "ip_version": "IPV4", - "load_balancing_scheme": "INTERNAL", - "name": "l7-ilb-forwarding-rule", - "network": "${google_compute_network.ilb_network.id}", - "project": "${google_project.service_project.project_id}", - "region": "us-central1", - "subnetwork": "${google_compute_subnetwork.ilb_subnet.id}" - } - google_compute_health_check.default: |- - { - "check_interval_sec": 1, - "depends_on": [ - "${time_sleep.wait_120s}" - ], - "name": "l7-ilb-hc", - "project": "${google_project.service_project.project_id}", - "tcp_health_check": [ - { - "port": "80" - } - ], - "timeout_sec": 1 - } - google_compute_network.ilb_network: |- - { - "auto_create_subnetworks": false, - "depends_on": [ - "${time_sleep.wait_120s}" - ], - "name": "l7-ilb-network", - "project": "${google_project.service_project.project_id}" - } - google_compute_region_backend_service.backend: |- - { - "health_checks": [ - "${google_compute_health_check.default.id}" - ], - "name": "l7-ilb-backend-subnet", - "project": "${google_project.service_project.project_id}", - "region": "us-central1" - } - google_compute_subnetwork.ilb_subnet: |- - { - "ip_cidr_range": "10.0.1.0/24", - "name": "l7-ilb-subnet", - "network": "${google_compute_network.ilb_network.id}", - "project": "${google_project.service_project.project_id}", - "region": "us-central1" - } - google_project.service_project: |- - { - "billing_account": "000000-0000000-0000000-000000", - "name": "Service Project", - "org_id": "123456789", - "project_id": "project-1" - } - google_project_service.compute_service_project: |- - { - "project": "${google_project.service_project.project_id}", - "service": "compute.googleapis.com" - } - time_sleep.wait_120s: |- - { - "create_duration": "120s", - "depends_on": [ - "${google_project_service.compute_service_project}" - ] - } - time_sleep.wait_120s_for_resource_ingestion: |- - { - "create_duration": "120s", - "depends_on": [ - "${google_compute_forwarding_rule.forwarding_rule}" - ] - } - - name: example + - name: example2 manifest: |- { - "application_id": "${google_apphub_application.application.application_id}", + "application_id": "example-application", + "attributes": [ + { + "business_owners": [ + { + "display_name": "Alice", + "email": "alice@google.com" + } + ], + "criticality": [ + { + "type": "MISSION_CRITICAL" + } + ], + "developer_owners": [ + { + "display_name": "Bob", + "email": "bob@google.com" + } + ], + "environment": [ + { + "type": "STAGING" + } + ], + "operator_owners": [ + { + "display_name": "Charlie", + "email": "charlie@google.com" + } + ] + } + ], + "description": "Application for testing", + "display_name": "Application Full", + "location": "us-east1", + "scope": [ + { + "type": "REGIONAL" + } + ] + } + argumentDocs: + application_id: |- + - + (Required) + Required. The Application identifier. + attributes.business_owners: |- + - + (Optional) + Optional. Business team that ensures user needs are met and value is delivered + Structure is documented below. + attributes.criticality: |- + - + (Optional) + Criticality of the Application, Service, or Workload + Structure is documented below. + attributes.developer_owners: |- + - + (Optional) + Optional. Developer team that owns development and coding. + Structure is documented below. + attributes.environment: |- + - + (Optional) + Environment of the Application, Service, or Workload + Structure is documented below. + attributes.operator_owners: |- + - + (Optional) + Optional. Operator team that ensures runtime and operations. + Structure is documented below. + business_owners.display_name: |- + - + (Optional) + Optional. Contact's name. + business_owners.email: |- + - + (Required) + Required. Email address of the contacts. + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. Create time. + criticality.type: |- + - + (Required) + Criticality type. + Possible values are: MISSION_CRITICAL, HIGH, MEDIUM, LOW. + delete: '- Default is 20 minutes.' + developer_owners.display_name: |- + - + (Optional) + Optional. Contact's name. + developer_owners.email: |- + - + (Required) + Required. Email address of the contacts. + environment.type: |- + - + (Required) + Environment type. + Possible values are: PRODUCTION, STAGING, TEST, DEVELOPMENT. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/applications/{{application_id}}' + location: |- + - + (Required) + Part of parent. See documentation of projectsId. + name: |- + - + Identifier. The resource name of an Application. Format: + "projects/{host-project-id}/locations/{location}/applications/{application-id}" + operator_owners.display_name: |- + - + (Optional) + Optional. Contact's name. + operator_owners.email: |- + - + (Required) + Required. Email address of the contacts. + scope: |- + - + (Required) + Scope of an application. + Structure is documented below. + scope.attributes: |- + - + (Optional) + Consumer provided attributes. + Structure is documented below. + scope.description: |- + - + (Optional) + Optional. User-defined description of an Application. + scope.display_name: |- + - + (Optional) + Optional. User-defined name for the Application. + scope.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + scope.type: |- + - + (Required) + Required. Scope Type. + Possible values: + REGIONAL + GLOBAL + Possible values are: REGIONAL, GLOBAL. + state: |- + - + Output only. Application state. + Possible values: + STATE_UNSPECIFIED + CREATING + ACTIVE + DELETING + uid: |- + - + Output only. A universally unique identifier (in UUID4 format) for the Application. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. Update time. + importStatements: [] + google_apphub_service: + subCategory: App Hub + description: Service is a network/api interface that exposes some functionality to clients for consumption over the network. + name: google_apphub_service + title: "" + examples: + - name: example + manifest: |- + { + "application_id": "${google_apphub_application.application.application_id}", + "discovered_service": "${data.google_apphub_discovered_service.catalog-service.name}", + "location": "us-central1", + "service_id": "${google_compute_forwarding_rule.forwarding_rule.name}" + } + references: + application_id: google_apphub_application.application.application_id + discovered_service: data.google_apphub_discovered_service.catalog-service.name + service_id: google_compute_forwarding_rule.forwarding_rule.name + dependencies: + google_apphub_application.application: |- + { + "application_id": "example-application-1", + "location": "us-central1", + "scope": [ + { + "type": "REGIONAL" + } + ] + } + google_apphub_service_project_attachment.service_project_attachment: |- + { + "depends_on": [ + "${time_sleep.wait_120s}" + ], + "service_project_attachment_id": "${google_project.service_project.project_id}" + } + google_compute_forwarding_rule.forwarding_rule: |- + { + "all_ports": true, + "backend_service": "${google_compute_region_backend_service.backend.id}", + "ip_version": "IPV4", + "load_balancing_scheme": "INTERNAL", + "name": "l7-ilb-forwarding-rule", + "network": "${google_compute_network.ilb_network.id}", + "project": "${google_project.service_project.project_id}", + "region": "us-central1", + "subnetwork": "${google_compute_subnetwork.ilb_subnet.id}" + } + google_compute_health_check.default: |- + { + "check_interval_sec": 1, + "depends_on": [ + "${time_sleep.wait_120s}" + ], + "name": "l7-ilb-hc", + "project": "${google_project.service_project.project_id}", + "tcp_health_check": [ + { + "port": "80" + } + ], + "timeout_sec": 1 + } + google_compute_network.ilb_network: |- + { + "auto_create_subnetworks": false, + "depends_on": [ + "${time_sleep.wait_120s}" + ], + "name": "l7-ilb-network", + "project": "${google_project.service_project.project_id}" + } + google_compute_region_backend_service.backend: |- + { + "health_checks": [ + "${google_compute_health_check.default.id}" + ], + "name": "l7-ilb-backend-subnet", + "project": "${google_project.service_project.project_id}", + "region": "us-central1" + } + google_compute_subnetwork.ilb_subnet: |- + { + "ip_cidr_range": "10.0.1.0/24", + "name": "l7-ilb-subnet", + "network": "${google_compute_network.ilb_network.id}", + "project": "${google_project.service_project.project_id}", + "region": "us-central1" + } + google_project.service_project: |- + { + "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", + "name": "Service Project", + "org_id": "123456789", + "project_id": "project-1" + } + google_project_service.compute_service_project: |- + { + "project": "${google_project.service_project.project_id}", + "service": "compute.googleapis.com" + } + time_sleep.wait_120s: |- + { + "create_duration": "120s", + "depends_on": [ + "${google_project_service.compute_service_project}" + ] + } + time_sleep.wait_120s_for_resource_ingestion: |- + { + "create_duration": "120s", + "depends_on": [ + "${google_compute_forwarding_rule.forwarding_rule}" + ] + } + - name: example + manifest: |- + { + "application_id": "${google_apphub_application.application.application_id}", "attributes": [ { "business_owners": [ @@ -9314,6 +9857,7 @@ resources: google_project.service_project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "Service Project", "org_id": "123456789", "project_id": "project-1" @@ -9495,6 +10039,7 @@ resources: dependencies: google_project.service_project: |- { + "deletion_policy": "DELETE", "name": "Service Project", "org_id": "123456789", "project_id": "project-1" @@ -9521,6 +10066,7 @@ resources: dependencies: google_project.service_project_full: |- { + "deletion_policy": "DELETE", "name": "Service Project Full", "org_id": "123456789", "project_id": "project-1" @@ -9665,6 +10211,7 @@ resources: google_project.service_project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "Service Project", "org_id": "123456789", "project_id": "project-1" @@ -9819,6 +10366,7 @@ resources: google_project.service_project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "Service Project", "org_id": "123456789", "project_id": "project-1" @@ -9992,6 +10540,14 @@ resources: "location": "us-central1", "repository_id": "my-repository" } + - name: my-repo + manifest: |- + { + "description": "example docker repository", + "format": "DOCKER", + "location": "us", + "repository_id": "my-repository" + } - name: my-repo manifest: |- { @@ -10481,6 +11037,133 @@ resources: "secret": "${google_secret_manager_secret.example-remote-secret.id}", "secret_data": "remote-password" } + - name: upstream_repo + manifest: |- + { + "description": "example upstream repository", + "format": "DOCKER", + "location": "us-central1", + "repository_id": "example-upstream-repo" + } + - name: my-repo + manifest: |- + { + "description": "example remote common repository with docker upstream", + "format": "DOCKER", + "location": "us-central1", + "mode": "REMOTE_REPOSITORY", + "remote_repository_config": [ + { + "common_repository": [ + { + "uri": "${google_artifact_registry_repository.upstream_repo.id}" + } + ], + "description": "pull-through cache of another Artifact Registry repository" + } + ], + "repository_id": "example-common-remote" + } + references: + remote_repository_config.common_repository.uri: google_artifact_registry_repository.upstream_repo.id + - name: upstream_repo + manifest: |- + { + "description": "example upstream repository", + "format": "DOCKER", + "location": "us-central1", + "repository_id": "example-upstream-repo" + } + - name: my-repo + manifest: |- + { + "depends_on": [ + "${google_artifact_registry_repository.upstream_repo}" + ], + "description": "example remote common repository with docker upstream", + "format": "DOCKER", + "location": "us-central1", + "mode": "REMOTE_REPOSITORY", + "remote_repository_config": [ + { + "common_repository": [ + { + "uri": "https://us-central1-docker.pkg.dev/${data.google_project.project.project_id}/example-upstream-repo" + } + ], + "description": "pull-through cache of another Artifact Registry repository by URL" + } + ], + "repository_id": "example-common-remote" + } + - name: my-repo + manifest: |- + { + "description": "example remote custom docker repository with credentials", + "format": "DOCKER", + "location": "us-central1", + "mode": "REMOTE_REPOSITORY", + "remote_repository_config": [ + { + "common_repository": [ + { + "uri": "https://registry-1.docker.io" + } + ], + "description": "custom common docker remote with credentials", + "disable_upstream_validation": true, + "upstream_credentials": [ + { + "username_password_credentials": [ + { + "password_secret_version": "${google_secret_manager_secret_version.example-remote-secret_version.name}", + "username": "remote-username" + } + ] + } + ] + } + ], + "repository_id": "example-docker-custom-remote" + } + references: + remote_repository_config.upstream_credentials.username_password_credentials.password_secret_version: google_secret_manager_secret_version.example-remote-secret_version.name + dependencies: + google_secret_manager_secret.example-remote-secret: |- + { + "replication": [ + { + "auto": [ + {} + ] + } + ], + "secret_id": "example-secret" + } + google_secret_manager_secret_iam_member.secret-access: |- + { + "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-artifactregistry.iam.gserviceaccount.com", + "role": "roles/secretmanager.secretAccessor", + "secret_id": "${google_secret_manager_secret.example-remote-secret.id}" + } + google_secret_manager_secret_version.example-remote-secret_version: |- + { + "secret": "${google_secret_manager_secret.example-remote-secret.id}", + "secret_data": "remote-password" + } + - name: my-repo + manifest: |- + { + "description": "example docker repository with vulnerability scanning config", + "format": "DOCKER", + "location": "us-central1", + "repository_id": "my-repository", + "vulnerability_scanning_config": [ + { + "enablement_config": "INHERITED" + } + ] + } argumentDocs: apt_repository.public_repository: |- - @@ -10526,6 +11209,13 @@ resources: (Optional) If true, the cleanup pipeline is prevented from deleting versions in this repository. + common_repository.uri: |- + - + (Required) + One of: + a. Artifact Registry Repository resource, e.g. projects/UPSTREAM_PROJECT_ID/locations/REGION/repositories/UPSTREAM_REPOSITORY + b. URI to the registry, e.g. "https://registry-1.docker.io" + c. URI to Artifact Registry Repository, e.g. "https://REGION-docker.pkg.dev/UPSTREAM_PROJECT_ID/UPSTREAM_REPOSITORY" condition.newer_than: |- - (Optional) @@ -10577,7 +11267,7 @@ resources: docker_repository.custom_repository: |- - (Optional) - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. docker_repository.public_repository: |- - @@ -10612,7 +11302,12 @@ resources: location: |- - (Optional) - The name of the location this repository is located in. + The name of the repository's location. In addition to specific regions, + special values for multi-region locations are asia, europe, and us. + See here, + or use the + google_artifact_registry_locations + data source for possible values. maven_config: |- - (Optional) @@ -10634,7 +11329,7 @@ resources: maven_repository.custom_repository: |- - (Optional) - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. maven_repository.public_repository: |- - @@ -10663,7 +11358,7 @@ resources: npm_repository.custom_repository: |- - (Optional) - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. npm_repository.public_repository: |- - @@ -10677,7 +11372,7 @@ resources: python_repository.custom_repository: |- - (Optional) - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. python_repository.public_repository: |- - @@ -10695,6 +11390,11 @@ resources: (Optional) Specific settings for an Apt remote repository. Structure is documented below. + remote_repository_config.common_repository: |- + - + (Optional) + Specific settings for an Artifact Registory remote repository. + Structure is documented below. remote_repository_config.description: |- - (Optional) @@ -10786,6 +11486,24 @@ resources: (Optional) A reference to the repository resource, for example: "projects/p1/locations/us-central1/repository/repo1". + vulnerability_scanning_config: |- + - + (Optional) + Configuration for vulnerability scanning of artifacts stored in this repository. + Structure is documented below. + vulnerability_scanning_config.enablement_config: |- + - + (Optional) + This configures whether vulnerability scanning is automatically performed for artifacts pushed to this repository. + Possible values are: INHERITED, DISABLED. + vulnerability_scanning_config.enablement_state: |- + - + (Output) + This field returns whether scanning is active for this repository. + vulnerability_scanning_config.enablement_state_reason: |- + - + (Output) + This provides an explanation for the state of scanning on this repository. yum_repository.public_repository: |- - (Optional) @@ -10826,8 +11544,12 @@ resources: google_artifact_registry_repository_iam_member: ': Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the repository are preserved.' google_artifact_registry_repository_iam_policy: ': Authoritative. Sets the IAM policy for the repository and replaces any existing policy already attached.' location: |- - - (Optional) The name of the location this repository is located in. - Used to find the parent resource to bind the IAM policy to. If not specified, + - (Optional) The name of the repository's location. In addition to specific regions, + special values for multi-region locations are asia, europe, and us. + See here, + or use the + google_artifact_registry_locations + data source for possible values. Used to find the parent resource to bind the IAM policy to. If not specified, the value will be parsed from the identifier of the parent resource. If no location is provided in the parent identifier and no location is specified, it is taken from the provider configuration. member/members: |- @@ -10908,7 +11630,7 @@ resources: "provisioned_resources_parent": "folders/519620126891", "resource_settings": [ { - "display_name": "folder-display-name", + "display_name": "{{name}}", "resource_type": "CONSUMER_FOLDER" }, { @@ -10919,7 +11641,12 @@ resources: "resource_type": "KEYRING" } ], - "violation_notifications_enabled": true + "violation_notifications_enabled": true, + "workload_options": [ + { + "kaj_enrollment_type": "KEY_ACCESS_TRANSPARENCY_OFF" + } + ] } - name: primary manifest: |- @@ -10955,6 +11682,43 @@ resources: } references: provider: google-beta + - name: primary + manifest: |- + { + "billing_account": "billingAccounts/000000-0000000-0000000-000000", + "compliance_regime": "ASSURED_WORKLOADS_FOR_PARTNERS", + "display_name": "display", + "labels": { + "label-one": "value-one" + }, + "location": "europe-west8", + "organization": "123456789", + "partner": "SOVEREIGN_CONTROLS_BY_PSN", + "partner_permissions": [ + { + "assured_workloads_monitoring": true, + "data_logs_viewer": true, + "service_access_approver": true + } + ], + "partner_services_billing_account": "billingAccounts/01BF3F-2C6DE5-30C607", + "provider": "${google-beta}", + "resource_settings": [ + { + "resource_type": "CONSUMER_FOLDER" + }, + { + "resource_type": "ENCRYPTION_KEYS_PROJECT" + }, + { + "resource_id": "ring", + "resource_type": "KEYRING" + } + ], + "violation_notifications_enabled": true + } + references: + provider: google-beta argumentDocs: billing_account: |- - @@ -10963,7 +11727,7 @@ resources: compliance_regime: |- - (Required) - Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT, IRS_1075 compliance_status: |- - Output only. Count of active Violations in the Workload. @@ -11023,7 +11787,7 @@ resources: partner: |- - (Optional) - Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM partner_permissions: |- - (Optional) @@ -11040,6 +11804,10 @@ resources: - (Optional) Optional. Allow partner to view access approval logs. + partner_services_billing_account: |- + - + (Optional) + Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. provisioned_resources_parent: |- - (Optional) @@ -11074,9 +11842,492 @@ resources: - (Optional) Optional. Indicates whether the e-mail notification for a violation is enabled for a workload. This value will be by default True, and if not present will be considered as true. This should only be updated via updateWorkload call. Any Changes to this field during the createWorkload call will not be honored. This will always be true while creating the workload. + workload_options: |- + - + (Optional) + Optional. Used to specify certain options for a workload during workload creation - currently only supporting KAT Optionality for Regional Controls workloads. + workload_options.kaj_enrollment_type: |- + - + (Optional) + Indicates type of KAJ enrollment for the workload. Currently, only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not enroll in KAT-level KAJ enrollment for Regional Controls workloads. Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, KEY_ACCESS_TRANSPARENCY_OFF + importStatements: [] + google_backup_dr_backup_plan: + subCategory: Backup and DR Service + description: A backup plan defines when and how to back up a resource, including the backup's schedule, retention, and location. + name: google_backup_dr_backup_plan + title: "" + examples: + - name: my-backup-plan-1 + manifest: |- + { + "backup_plan_id": "backup-plan-simple-test", + "backup_rules": [ + { + "backup_retention_days": 5, + "rule_id": "rule-1", + "standard_schedule": [ + { + "backup_window": [ + { + "end_hour_of_day": 24, + "start_hour_of_day": 0 + } + ], + "hourly_frequency": 6, + "recurrence_type": "HOURLY", + "time_zone": "UTC" + } + ] + } + ], + "backup_vault": "${google_backup_dr_backup_vault.my_backup_vault.id}", + "location": "us-central1", + "provider": "${google-beta}", + "resource_type": "compute.googleapis.com/Instance" + } + references: + backup_vault: google_backup_dr_backup_vault.my_backup_vault.id + provider: google-beta + dependencies: + google_backup_dr_backup_vault.my_backup_vault: |- + { + "backup_minimum_enforced_retention_duration": "100000s", + "backup_vault_id": "backup-vault-simple-test", + "location": "us-central1", + "provider": "${google-beta}" + } + argumentDocs: + backup_plan_id: |- + - + (Required) + The ID of the backup plan + backup_rules: |- + - + (Required) + The backup rules for this BackupPlan. There must be at least one BackupRule message. + Structure is documented below. + backup_rules.backup_retention_days: |- + - + (Required) + Configures the duration for which backup data will be kept. The value should be greater than or equal to minimum enforced retention of the backup vault. + backup_rules.rule_id: |- + - + (Required) + The unique ID of this BackupRule. The rule_id is unique per BackupPlan. + backup_rules.standard_schedule: |- + - + (Required) + StandardSchedule defines a schedule that runs within the confines of a defined window of days. + Structure is documented below. + backup_vault: |- + - + (Required) + Backup vault where the backups gets stored using this Backup plan. + backup_vault_service_account: |- + - + The Google Cloud Platform Service Account to be used by the BackupVault for taking backups. + backup_window.description: |- + - + (Optional) + The description allows for additional details about BackupPlan and its use cases to be provided. + backup_window.end_hour_of_day: |- + - + (Optional) + The hour of the day (1-24) when the window ends, for example, if the value of end hour of the day is 10, that means the backup window end time is 10:00. + The end hour of the day should be greater than the start + backup_window.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + backup_window.start_hour_of_day: |- + - + (Required) + The hour of the day (0-23) when the window starts, for example, if the value of the start hour of the day is 6, that means the backup window starts at 6:00. + create: '- Default is 60 minutes.' + create_time: |- + - + When the BackupPlan was created. + delete: '- Default is 60 minutes.' + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/backupPlans/{{backup_plan_id}}' + location: |- + - + (Required) + The location for the backup plan + name: |- + - + The name of backup plan resource created + resource_type: |- + - + (Required) + The resource type to which the BackupPlan will be applied. Examples include, "compute.googleapis.com/Instance" and "storage.googleapis.com/Bucket". + standard_schedule.backup_window: |- + - + (Optional) + A BackupWindow defines the window of the day during which backup jobs will run. Jobs are queued at the beginning of the window and will be marked as + NOT_RUN if they do not start by the end of the window. + Structure is documented below. + standard_schedule.days_of_month: |- + - + (Optional) + Specifies days of months like 1, 5, or 14 on which jobs will run. + standard_schedule.days_of_week: |- + - + (Optional) + Specifies days of week like MONDAY or TUESDAY, on which jobs will run. This is required for recurrence_type, WEEKLY and is not applicable otherwise. + Each value may be one of: DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY. + standard_schedule.hourly_frequency: |- + - + (Optional) + Specifies frequency for hourly backups. An hourly frequency of 2 means jobs will run every 2 hours from start time till end time defined. + This is required for recurrence_type, HOURLY and is not applicable otherwise. + standard_schedule.months: |- + - + (Optional) + Specifies values of months + Each value may be one of: MONTH_UNSPECIFIED, JANUARY, FEBRUARY, MARCH, APRIL, MAY, JUNE, JULY, AUGUST, SEPTEMBER, OCTOBER, NOVEMBER, DECEMBER. + standard_schedule.recurrence_type: |- + - + (Required) + RecurrenceType enumerates the applicable periodicity for the schedule. + Possible values are: HOURLY, DAILY, WEEKLY, MONTHLY, YEARLY. + standard_schedule.time_zone: |- + - + (Required) + The time zone to be used when interpreting the schedule. + standard_schedule.week_day_of_month: |- + - + (Optional) + Specifies a week day of the month like FIRST SUNDAY or LAST MONDAY, on which jobs will run. + Structure is documented below. + update_time: |- + - + When the BackupPlan was last updated. + week_day_of_month.day_of_week: |- + - + (Required) + Specifies the day of the week. + Possible values are: DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. + week_day_of_month.week_of_month: |- + - + (Required) + WeekOfMonth enumerates possible weeks in the month, e.g. the first, third, or last week of the month. + Possible values are: WEEK_OF_MONTH_UNSPECIFIED, FIRST, SECOND, THIRD, FOURTH, LAST. + importStatements: [] + google_backup_dr_backup_plan_association: + subCategory: Backup and DR Service + description: A Backup and DR BackupPlanAssociation. + name: google_backup_dr_backup_plan_association + title: "" + examples: + - name: my-backup-plan-association + manifest: |- + { + "backup_plan": "${google_backup_dr_backup_plan.bp1.name}", + "backup_plan_association_id": "my-bpa", + "location": "us-central1", + "provider": "${google-beta}", + "resource": "${google_compute_instance.myinstance.id}", + "resource_type": "compute.googleapis.com/Instance" + } + references: + backup_plan: google_backup_dr_backup_plan.bp1.name + provider: google-beta + resource: google_compute_instance.myinstance.id + dependencies: + google_backup_dr_backup_plan.bp1: |- + { + "backup_plan_id": "bp-bpa-test", + "backup_rules": [ + { + "backup_retention_days": 2, + "rule_id": "rule-1", + "standard_schedule": [ + { + "backup_window": [ + { + "end_hour_of_day": 18, + "start_hour_of_day": 12 + } + ], + "hourly_frequency": 6, + "recurrence_type": "HOURLY", + "time_zone": "UTC" + } + ] + } + ], + "backup_vault": "${google_backup_dr_backup_vault.bv1.id}", + "location": "us-central1", + "provider": "${google-beta}", + "resource_type": "compute.googleapis.com/Instance" + } + google_backup_dr_backup_vault.bv1: |- + { + "backup_minimum_enforced_retention_duration": "100000s", + "backup_vault_id": "bv-bpa", + "force_delete": "true", + "location": "us-central1", + "provider": "${google-beta}" + } + google_compute_instance.myinstance: |- + { + "boot_disk": [ + { + "initialize_params": [ + { + "image": "debian-cloud/debian-11", + "labels": { + "my_label": "value" + } + } + ] + } + ], + "machine_type": "n2-standard-2", + "name": "test-instance", + "network_interface": [ + { + "access_config": [ + {} + ], + "network": "default" + } + ], + "provider": "${google-beta}", + "scratch_disk": [ + { + "interface": "NVME" + } + ], + "service_account": [ + { + "email": "${google_service_account.mySA.email}", + "scopes": [ + "cloud-platform" + ] + } + ], + "zone": "us-central1-a" + } + google_service_account.mySA: |- + { + "account_id": "my-custom", + "display_name": "Custom SA for VM Instance", + "provider": "${google-beta}" + } + argumentDocs: + backup_plan: |- + - + (Required) + The BP with which resource needs to be created + backup_plan_association_id: |- + - + (Required) + The id of backupplan association + create: '- Default is 60 minutes.' + create_time: |- + - + The time when the instance was created + data_source: |- + - + Resource name of data source which will be used as storage location for backups taken + delete: '- Default is 60 minutes.' + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/backupPlanAssociations/{{backup_plan_association_id}}' + last_backup_error.code: |- + - + (Output) + The status code, which should be an enum value of [google.rpc.Code] + last_backup_error.message: |- + - + (Output) + A developer-facing error message, which should be in English. + last_successful_backup_consistency_time: |- + - + The point in time when the last successful backup was captured from the source + location: |- + - + (Required) + The location for the backupplan association + name: |- + - + The name of backup plan association resource created + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + resource: |- + - + (Required) + The resource for which BPA needs to be created + resource_type: |- + - + (Required) + The resource type of workload on which backupplan is applied + rules_config_info: |- + - + Message for rules config info + Structure is documented below. + rules_config_info.last_backup_error: |- + - + (Output) + google.rpc.Status object to store the last backup error + Structure is documented below. + rules_config_info.last_backup_state: |- + - + (Output) + State of last backup taken. + rules_config_info.rule_id: |- + - + (Output) + Backup Rule id fetched from backup plan. + update_time: |- + - + The time when the instance was updated. + importStatements: [] + google_backup_dr_backup_vault: + subCategory: Backup and DR Service + description: Container to store and organize immutable and indelible backups. + name: google_backup_dr_backup_vault + title: "" + examples: + - name: backup-vault-test + manifest: |- + { + "access_restriction": "WITHIN_ORGANIZATION", + "allow_missing": "true", + "annotations": { + "annotations1": "bar1", + "annotations2": "baz1" + }, + "backup_minimum_enforced_retention_duration": "100000s", + "backup_vault_id": "backup-vault-test", + "description": "This is a second backup vault built by Terraform.", + "force_update": "true", + "ignore_backup_plan_references": "true", + "ignore_inactive_datasources": "true", + "labels": { + "bar": "baz1", + "foo": "bar1" + }, + "location": "us-central1" + } + argumentDocs: + access_restriction: |- + - + (Optional) + Access restriction for the backup vault. Default value is WITHIN_ORGANIZATION if not provided during creation. + Default value is WITHIN_ORGANIZATION. + Possible values are: ACCESS_RESTRICTION_UNSPECIFIED, WITHIN_PROJECT, WITHIN_ORGANIZATION, UNRESTRICTED, WITHIN_ORG_BUT_UNRESTRICTED_FOR_BA. + allow_missing: |- + - + (Optional) + Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + annotations: |- + - + (Optional) + Optional. User annotations. See https://google.aip.dev/128#annotations + Stores small amounts of arbitrary data. + Note: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field effective_annotations for all of the annotations present on the resource. + backup_count: |- + - + Output only. The number of backups in this backup vault. + backup_minimum_enforced_retention_duration: |- + - + (Required) + Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + backup_vault_id: |- + - + (Required) + Required. ID of the requesting object. + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. The time when the instance was created. + deletable: |- + - + Output only. Set to true when there are no backups nested under this resource. + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + Optional. The description of the BackupVault instance (2048 characters or less). + effective_annotations: |- + - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + effective_time: |- + - + (Optional) + Optional. Time after which the BackupVault resource is locked. + etag: |- + - + Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + force_delete: |- + - + (Optional, Deprecated) + If set, the following restrictions against deletion of the backup vault instance can be overridden: + force_update: |- + - + (Optional) + If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}' + ignore_backup_plan_references: |- + - + (Optional) + If set, the following restrictions against deletion of the backup vault instance can be overridden: + ignore_inactive_datasources: |- + - + (Optional) + If set, the following restrictions against deletion of the backup vault instance can be overridden: + labels: |- + - + (Optional) + Optional. Resource labels to represent user provided metadata. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + The GCP location for the backup vault. + name: |- + - + Output only. Identifier. The resource name. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + service_account: |- + - + Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + state: |- + - + Output only. The BackupVault resource instance state. + Possible values: + STATE_UNSPECIFIED + CREATING + ACTIVE + DELETING + ERROR + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + total_stored_bytes: |- + - + Output only. Total size of the storage used by all backup resources. + uid: |- + - + Output only. Output only Immutable after resource creation until resource deletion. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. The time when the instance was updated. importStatements: [] google_backup_dr_management_server: - subCategory: Backup and DR + subCategory: Backup and DR Service description: A Backup and DR Management Server (Also referred as Management Console) name: google_backup_dr_management_server title: "" @@ -11089,17 +12340,10 @@ resources: ], "location": "us-central1", "name": "ms-console", - "networks": [ - { - "network": "${google_compute_network.default.id}", - "peering_mode": "PRIVATE_SERVICE_ACCESS" - } - ], "provider": "${google-beta}", "type": "BACKUP_RESTORE" } references: - networks.network: google_compute_network.default.id provider: google-beta dependencies: google_compute_global_address.private_ip_address: |- @@ -11151,7 +12395,7 @@ resources: The name of management server (management console) networks: |- - - (Required) + (Optional) Network details to create management server (management console). Structure is documented below. networks.network: |- @@ -11164,18 +12408,18 @@ resources: Type of Network peeringMode Default value is PRIVATE_SERVICE_ACCESS. Possible values are: PRIVATE_SERVICE_ACCESS. - networks.project: |- + oauth2_client_id: |- + - + The oauth2ClientId of management console. + project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. - networks.type: |- + type: |- - (Optional) The type of management server (management console). Default value is BACKUP_RESTORE. Possible values are: BACKUP_RESTORE. - oauth2_client_id: |- - - - The oauth2ClientId of management console. importStatements: [] google_beyondcorp_app_connection: subCategory: BeyondCorp @@ -11544,6 +12788,87 @@ resources: - Server-defined URI for this resource. importStatements: [] + google_beyondcorp_security_gateway: + subCategory: BeyondCorp + description: Deployment of Security Gateway. + name: google_beyondcorp_security_gateway + title: "" + examples: + - name: example + manifest: |- + { + "display_name": "My Security Gateway resource", + "hubs": [ + { + "region": "us-central1" + } + ], + "location": "global", + "security_gateway_id": "default" + } + argumentDocs: + /a-z-/: . + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. Timestamp when the resource was created. + delete: '- Default is 20 minutes.' + display_name: |- + - + (Optional) + Optional. An arbitrary user-provided name for the SecurityGateway. + Cannot exceed 64 characters. + external_ips: |- + - + Output only. IP addresses that will be used for establishing + connection to the endpoints. + hubs: |- + - + (Optional) + Optional. Map of Hubs that represents regional data path deployment with GCP region + as a key. + Structure is documented below. + hubs.internet_gateway: |- + - + (Optional) + Internet Gateway configuration. + Structure is documented below. + hubs.region: '- (Required) The identifier for this object. Format specified above.' + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/securityGateways/{{security_gateway_id}}' + internet_gateway.assigned_ips: |- + - + (Output) + Output only. List of IP addresses assigned to the Cloud NAT. + location: |- + - + (Required) + Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. + name: |- + - + Identifier. Name of the resource. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + security_gateway_id: |- + - + (Required) + Optional. User-settable SecurityGateway resource ID. + state: |- + - + Output only. The operational state of the SecurityGateway. + Possible values: + STATE_UNSPECIFIED + CREATING + UPDATING + DELETING + RUNNING + DOWN + ERROR + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. Timestamp when the resource was last modified. + importStatements: [] google_biglake_catalog: subCategory: Biglake description: Catalogs are top-level containers for Databases and Tables. @@ -11865,6 +13190,21 @@ resources: "display_name": "my_data_exchange", "location": "US" } + - name: data_exchange + manifest: |- + { + "data_exchange_id": "dcr_data_exchange", + "description": "example dcr data exchange", + "display_name": "dcr_data_exchange", + "location": "US", + "sharing_environment_config": [ + { + "dcr_exchange_config": [ + {} + ] + } + ] + } argumentDocs: create: '- Default is 20 minutes.' data_exchange_id: |- @@ -11907,6 +13247,20 @@ resources: project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + sharing_environment_config: |- + - + (Optional) + Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + Structure is documented below. + sharing_environment_config.dcr_exchange_config: |- + - + (Optional) + Data Clean Room (DCR), used for privacy-safe and secured data sharing. + sharing_environment_config.default_exchange_config: |- + - + (Optional) + Default Analytics Hub data exchange, used for secured data sharing. update: '- Default is 20 minutes.' importStatements: [] google_bigquery_analytics_hub_data_exchange_iam_policy: @@ -12029,57 +13383,77 @@ resources: "friendly_name": "my_listing", "location": "US" } + - name: listing + manifest: |- + { + "bigquery_dataset": [ + { + "dataset": "${google_bigquery_dataset.listing.id}", + "selected_resources": [ + { + "table": "${google_bigquery_table.listing.id}" + } + ] + } + ], + "data_exchange_id": "${google_bigquery_analytics_hub_data_exchange.listing.data_exchange_id}", + "description": "example dcr data exchange", + "display_name": "dcr_listing", + "listing_id": "dcr_listing", + "location": "US", + "restricted_export_config": [ + { + "enabled": true + } + ] + } + references: + bigquery_dataset.dataset: google_bigquery_dataset.listing.id + bigquery_dataset.selected_resources.table: google_bigquery_table.listing.id + data_exchange_id: google_bigquery_analytics_hub_data_exchange.listing.data_exchange_id + dependencies: + google_bigquery_analytics_hub_data_exchange.listing: |- + { + "data_exchange_id": "dcr_data_exchange", + "description": "example dcr data exchange", + "display_name": "dcr_data_exchange", + "location": "US", + "sharing_environment_config": [ + { + "dcr_exchange_config": [ + {} + ] + } + ] + } + google_bigquery_dataset.listing: |- + { + "dataset_id": "dcr_listing", + "description": "example dcr data exchange", + "friendly_name": "dcr_listing", + "location": "US" + } + google_bigquery_table.listing: |- + { + "dataset_id": "${google_bigquery_dataset.listing.dataset_id}", + "deletion_protection": false, + "schema": "[\n {\n \"name\": \"name\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"post_abbr\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"date\",\n \"type\": \"DATE\",\n \"mode\": \"NULLABLE\"\n }\n]\n", + "table_id": "dcr_listing" + } argumentDocs: bigquery_dataset: |- - (Required) Shared dataset i.e. BigQuery dataset source. Structure is documented below. - bigquery_dataset.categories: |- - - - (Optional) - Categories of the listing. Up to two categories are allowed. - bigquery_dataset.data_provider: |- - - - (Optional) - Details of the data provider who owns the source data. - Structure is documented below. bigquery_dataset.dataset: |- - (Required) Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 - bigquery_dataset.description: |- - - - (Optional) - Short description of the listing. The description must not contain Unicode non-characters and C0 and C1 control codes except tabs (HT), new lines (LF), carriage returns (CR), and page breaks (FF). - bigquery_dataset.documentation: |- - - - (Optional) - Documentation describing the listing. - bigquery_dataset.icon: |- - - - (Optional) - Base64 encoded image representing the listing. - bigquery_dataset.primary_contact: |- + bigquery_dataset.selected_resources: |- - (Optional) - Email or URL of the primary point of contact of the listing. - bigquery_dataset.project: |- - - (Optional) The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - bigquery_dataset.publisher: |- - - - (Optional) - Details of the publisher who owns the listing and who can share the source data. - Structure is documented below. - bigquery_dataset.request_access: |- - - - (Optional) - Email or URL of the request access of the listing. Subscribers can use this reference to request access. - bigquery_dataset.restricted_export_config: |- - - - (Optional) - If set, restricted export configuration will be propagated and enforced on the linked dataset. + Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. Structure is documented below. create: '- Default is 20 minutes.' data_exchange_id: |- @@ -12123,10 +13497,60 @@ resources: - (Optional) If true, enable restricted export. + restricted_export_config.restrict_direct_table_access: |- + - + (Output) + If true, restrict direct table access(read api/tabledata.list) on linked table. restricted_export_config.restrict_query_result: |- - (Optional) If true, restrict export of query result derived from restricted linked dataset table. + selected_resources.categories: |- + - + (Optional) + Categories of the listing. Up to two categories are allowed. + selected_resources.data_provider: |- + - + (Optional) + Details of the data provider who owns the source data. + Structure is documented below. + selected_resources.description: |- + - + (Optional) + Short description of the listing. The description must not contain Unicode non-characters and C0 and C1 control codes except tabs (HT), new lines (LF), carriage returns (CR), and page breaks (FF). + selected_resources.documentation: |- + - + (Optional) + Documentation describing the listing. + selected_resources.icon: |- + - + (Optional) + Base64 encoded image representing the listing. + selected_resources.primary_contact: |- + - + (Optional) + Email or URL of the primary point of contact of the listing. + selected_resources.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + selected_resources.publisher: |- + - + (Optional) + Details of the publisher who owns the listing and who can share the source data. + Structure is documented below. + selected_resources.request_access: |- + - + (Optional) + Email or URL of the request access of the listing. Subscribers can use this reference to request access. + selected_resources.restricted_export_config: |- + - + (Optional) + If set, restricted export configuration will be propagated and enforced on the linked dataset. + Structure is documented below. + selected_resources.table: |- + - + (Optional) + Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" update: '- Default is 20 minutes.' importStatements: [] google_bigquery_analytics_hub_listing_iam_policy: @@ -12321,7 +13745,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "POSTGRES_11", - "deletion_protection": "true", + "deletion_protection": true, "name": "my-database-instance", "region": "us-central1", "settings": [ @@ -12357,7 +13781,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "POSTGRES_11", - "deletion_protection": "true", + "deletion_protection": true, "name": "my-database-instance", "region": "us-central1", "settings": [ @@ -12418,17 +13842,17 @@ resources: "region": "us-central1" } - name: bq-connection-cmek - manifest: "{\n \"cloud_sql\": [\n {\n \"credential\": [\n {\n \"password\": \"${google_sql_user.user.password}\",\n \"username\": \"${google_sql_user.user.name}\"\n }\n ],\n \"database\": \"${google_sql_database.db.name}\",\n \"instance_id\": \"${google_sql_database_instance.instance.connection_name}\",\n \"type\": \"POSTGRES\"\n }\n ],\n \"description\": \"a riveting description\",\n \"friendly_name\": \"\U0001F44B\",\n \"kms_key_name\": \"projects/project/locations/us-central1/keyRings/us-central1/cryptoKeys/bq-key\",\n \"location\": \"US\"\n}" + manifest: "{\n \"cloud_sql\": [\n {\n \"credential\": [\n {\n \"password\": \"${google_sql_user.user.password}\",\n \"username\": \"${google_sql_user.user.name}\"\n }\n ],\n \"database\": \"${google_sql_database.db.name}\",\n \"instance_id\": \"${google_sql_database_instance.instance.connection_name}\",\n \"type\": \"POSTGRES\"\n }\n ],\n \"depends_on\": [\n \"${google_kms_crypto_key_iam_member.key_sa_user}\"\n ],\n \"description\": \"a riveting description\",\n \"friendly_name\": \"\U0001F44B\",\n \"kms_key_name\": \"projects/project/locations/us-central1/keyRings/us-central1/cryptoKeys/bq-key\",\n \"location\": \"US\"\n}" references: cloud_sql.credential.password: google_sql_user.user.password cloud_sql.credential.username: google_sql_user.user.name cloud_sql.database: google_sql_database.db.name cloud_sql.instance_id: google_sql_database_instance.instance.connection_name dependencies: - google_project_iam_member.key_sa_user: |- + google_kms_crypto_key_iam_member.key_sa_user: |- { + "crypto_key_id": "projects/project/locations/us-central1/keyRings/us-central1/cryptoKeys/bq-key", "member": "serviceAccount:${data.google_bigquery_default_service_account.bq_sa.email}", - "project": "${data.google_project.project.project_id}", "role": "roles/cloudkms.cryptoKeyEncrypterDecrypter" } google_sql_database.db: |- @@ -12439,7 +13863,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "POSTGRES_11", - "deletion_protection": "true", + "deletion_protection": true, "name": "my-database-instance", "region": "us-central1", "settings": [ @@ -12743,6 +14167,58 @@ resources: "project": "${data.google_project.project.project_id}", "role": "roles/iam.serviceAccountTokenCreator" } + - name: query_config_cmek + manifest: |- + { + "data_source_id": "scheduled_query", + "depends_on": [ + "${google_project_iam_member.permissions}" + ], + "destination_dataset_id": "${google_bigquery_dataset.my_dataset.dataset_id}", + "display_name": "", + "encryption_configuration": [ + { + "kms_key_name": "${google_kms_crypto_key.crypto_key.id}" + } + ], + "location": "asia-northeast1", + "params": { + "destination_table_name_template": "my_table", + "query": "SELECT name FROM tabl WHERE x = 'y'", + "write_disposition": "WRITE_APPEND" + }, + "schedule": "first sunday of quarter 00:00" + } + references: + destination_dataset_id: google_bigquery_dataset.my_dataset.dataset_id + encryption_configuration.kms_key_name: google_kms_crypto_key.crypto_key.id + dependencies: + google_bigquery_dataset.my_dataset: |- + { + "dataset_id": "example_dataset", + "depends_on": [ + "${google_project_iam_member.permissions}" + ], + "description": "bar", + "friendly_name": "foo", + "location": "asia-northeast1" + } + google_kms_crypto_key.crypto_key: |- + { + "key_ring": "${google_kms_key_ring.key_ring.id}", + "name": "example-key" + } + google_kms_key_ring.key_ring: |- + { + "location": "us", + "name": "example-keyring" + } + google_project_iam_member.permissions: |- + { + "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com", + "project": "${data.google_project.project.project_id}", + "role": "roles/iam.serviceAccountTokenCreator" + } - name: salesforce_config manifest: |- { @@ -12754,9 +14230,7 @@ resources: "assets": "[\"asset-a\",\"asset-b\"]", "connector.authentication.oauth.clientId": "client-id", "connector.authentication.oauth.clientSecret": "client-secret", - "connector.authentication.password": "password", - "connector.authentication.securityToken": "security-token", - "connector.authentication.username": "username" + "connector.authentication.oauth.myDomain": "MyDomainName" }, "schedule": "first sunday of quarter 00:00" } @@ -12806,6 +14280,15 @@ resources: - (Required) If true, email notifications will be sent on transfer run failures. + encryption_configuration: |- + - + (Optional) + Represents the encryption configuration for a transfer. + Structure is documented below. + encryption_configuration.kms_key_name: |- + - + (Required) + The name of the KMS key used for encrypting BigQuery data. id: '- an identifier for the resource with format {{name}}' location: |- - @@ -13284,12 +14767,37 @@ resources: "friendly_name": "test", "location": "aws-us-east-1" } + - name: dataset + manifest: |- + { + "dataset_id": "example_dataset", + "description": "This is a test description", + "external_catalog_dataset_options": [ + { + "default_storage_location_uri": "gs://test_dataset/tables", + "parameters": { + "dataset_owner": "test_dataset_owner" + } + } + ], + "friendly_name": "test", + "location": "US", + "provider": "${google-beta}" + } + references: + provider: google-beta argumentDocs: access: |- - (Optional) An array of objects that define dataset access for one or more entities. Structure is documented below. + access.condition: |- + - + (Optional) + Condition for the binding. If CEL expression in this field is true, this + access binding will be considered. + Structure is documented below. access.dataset: |- - (Optional) @@ -13345,6 +14853,25 @@ resources: needs to be granted again via an update operation. Structure is documented below. allAuthenticatedUsers: ': All authenticated BigQuery users.' + condition.description: |- + - + (Optional) + Description of the expression. This is a longer text which describes the expression, + e.g. when hovered over it in a UI. + condition.expression: |- + - + (Required) + Textual representation of an expression in Common Expression Language syntax. + condition.location: |- + - + (Optional) + String indicating the location of the expression for error reporting, e.g. a file + name and a position in the file. + condition.title: |- + - + (Optional) + Title for the expression, i.e. a short string describing its purpose. + This can be used e.g. in UIs which allow to enter the expression. create: '- Default is 20 minutes.' creation_time: |- - @@ -13440,6 +14967,22 @@ resources: etag: |- - A hash of the resource. + external_catalog_dataset_options: |- + - + (Optional, Beta) + Options defining open source compatible datasets living in the BigQuery catalog. Contains + metadata of open source database, schema or namespace represented by the current dataset. + Structure is documented below. + external_catalog_dataset_options.default_storage_location_uri: |- + - + (Optional, Beta) + The storage location URI for all tables in the dataset. Equivalent to hive metastore's + database locationUri. Maximum length of 1024 characters. + external_catalog_dataset_options.parameters: |- + - + (Optional, Beta) + A map of key value pairs defining the parameters and properties of the open source schema. + Maximum size of 2Mib. external_dataset_reference: |- - (Optional) @@ -13685,6 +15228,31 @@ resources: } argumentDocs: allAuthenticatedUsers: ': All authenticated BigQuery users.' + condition: |- + - + (Optional) + Condition for the binding. If CEL expression in this field is true, this + access binding will be considered. + Structure is documented below. + condition.description: |- + - + (Optional) + Description of the expression. This is a longer text which describes the expression, + e.g. when hovered over it in a UI. + condition.expression: |- + - + (Required) + Textual representation of an expression in Common Expression Language syntax. + condition.location: |- + - + (Optional) + String indicating the location of the expression for error reporting, e.g. a file + name and a position in the file. + condition.title: |- + - + (Optional) + Title for the expression, i.e. a short string describing its purpose. + This can be used e.g. in UIs which allow to enter the expression. create: '- Default is 20 minutes.' dataset: |- - @@ -14118,7 +15686,7 @@ resources: { "destination_encryption_configuration": [ { - "kms_key_name": "${google_kms_crypto_key.crypto_key.id}" + "kms_key_name": "example-key" } ], "destination_table": [ @@ -14143,12 +15711,11 @@ resources: } ], "depends_on": [ - "google_project_iam_member.encrypt_role" + "google_kms_crypto_key_iam_member.encrypt_role" ], "job_id": "job_copy" } references: - copy.destination_encryption_configuration.kms_key_name: google_kms_crypto_key.crypto_key.id copy.destination_table.dataset_id: google_bigquery_table.dest.dataset_id copy.destination_table.project_id: google_bigquery_table.dest.project copy.destination_table.table_id: google_bigquery_table.dest.table_id @@ -14176,11 +15743,11 @@ resources: "dataset_id": "${google_bigquery_dataset.dest.dataset_id}", "deletion_protection": false, "depends_on": [ - "google_project_iam_member.encrypt_role" + "google_kms_crypto_key_iam_member.encrypt_role" ], "encryption_configuration": [ { - "kms_key_name": "${google_kms_crypto_key.crypto_key.id}" + "kms_key_name": "example-key" } ], "schema": "[\n {\n \"name\": \"name\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"post_abbr\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"date\",\n \"type\": \"DATE\",\n \"mode\": \"NULLABLE\"\n }\n]\n", @@ -14194,20 +15761,10 @@ resources: "schema": "[\n {\n \"name\": \"name\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"post_abbr\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"date\",\n \"type\": \"DATE\",\n \"mode\": \"NULLABLE\"\n }\n]\n", "table_id": "job_copy_${count.index}_table" } - google_kms_crypto_key.crypto_key: |- - { - "key_ring": "${google_kms_key_ring.key_ring.id}", - "name": "example-key" - } - google_kms_key_ring.key_ring: |- - { - "location": "global", - "name": "example-keyring" - } - google_project_iam_member.encrypt_role: |- + google_kms_crypto_key_iam_member.encrypt_role: |- { + "crypto_key_id": "example-key", "member": "serviceAccount:bq-${data.google_project.project.number}@bigquery-encryption.iam.gserviceaccount.com", - "project": "${data.google_project.project.project_id}", "role": "roles/cloudkms.cryptoKeyEncrypterDecrypter" } - name: job @@ -14857,11 +16414,6 @@ resources: (Optional) The geographic location where the transfer config should reside. Examples: US, EU, asia-northeast1. The default value is US. - multi_region_auxiliary: |- - - - (Optional, Deprecated) - Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). - If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region. name: |- - (Required) @@ -15438,51 +16990,58 @@ resources: field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API. - allow_resource_tags_on_deletion: |- - - (Optional) If set to true, it allows table - deletion when there are still resource tags attached. The default value is - false. avro_options.use_avro_logical_types: |- - (Optional) - If is set to true, indicates whether + - (Optional) If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER). - bigtable_options.column_family: (Optional) - A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below. - bigtable_options.column_family.column: (Optional) - A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below. - bigtable_options.column_family.column.encoding: '(Optional) - The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. ''encoding'' can also be set at the column family level. However, the setting at this level takes precedence if ''encoding'' is set at both levels.' - bigtable_options.column_family.column.field_name: (Optional) - If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries. - bigtable_options.column_family.column.only_read_latest: (Optional) - If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels. - bigtable_options.column_family.column.qualifier_encoded: (Optional) - Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName. - bigtable_options.column_family.column.qualifier_string: (Optional) - Qualifier string. - bigtable_options.column_family.column.type: '(Optional) - The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". ''type'' can also be set at the column family level. However, the setting at this level takes precedence if ''type'' is set at both levels.' - bigtable_options.column_family.encoding: '(Optional) - The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in ''columns'' and specifying an encoding for it.' - bigtable_options.column_family.family_id: (Optional) - Identifier of the column family. - bigtable_options.column_family.only_read_latest: (Optional) - If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column. - bigtable_options.column_family.type: '(Optional) - The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in ''columns'' and specifying a type for it.' - bigtable_options.ignore_unspecified_column_families: (Optional) - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false. - bigtable_options.output_column_families_as_json: (Optional) - If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false. - bigtable_options.read_rowkey_as_string: (Optional) - If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false. + biglake_configuration: '- (Optional) Specifies the configuration of a BigLake managed table. Structure is documented below' + biglake_configuration.connection_id: |- + - (Required) The connection specifying the credentials to be used to + read and write to external storage, such as Cloud Storage. The connection_id can + have the form ".." or + projects//locations//connections/". + biglake_configuration.file_format: '- (Required) The file format the table data is stored in.' + biglake_configuration.storage_uri: |- + - (Required) The fully qualified location prefix of the external folder where table data + is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/" + biglake_configuration.table_format: '- (Required) The table format the metadata only snapshots are stored in.' + bigtable_options.column_family: '- (Optional) A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the ''type'' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.' + bigtable_options.column_family.column: '- (Optional) A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.' + bigtable_options.column_family.column.encoding: '- (Optional) The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. ''encoding'' can also be set at the column family level. However, the setting at this level takes precedence if ''encoding'' is set at both levels.' + bigtable_options.column_family.column.field_name: '- (Optional) If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.' + bigtable_options.column_family.column.only_read_latest: '- (Optional) If this is set, only the latest version of value in this column are exposed. ''onlyReadLatest'' can also be set at the column family level. However, the setting at this level takes precedence if ''onlyReadLatest'' is set at both levels.' + bigtable_options.column_family.column.qualifier_encoded: '- (Optional) Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.' + bigtable_options.column_family.column.qualifier_string: '- (Optional) Qualifier string.' + bigtable_options.column_family.column.type: '- (Optional) The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". ''type'' can also be set at the column family level. However, the setting at this level takes precedence if ''type'' is set at both levels.' + bigtable_options.column_family.encoding: '- (Optional) The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in ''columns'' and specifying an encoding for it.' + bigtable_options.column_family.family_id: '- (Optional) Identifier of the column family.' + bigtable_options.column_family.only_read_latest: '- (Optional) If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in ''columns'' and specifying a different setting for that column.' + bigtable_options.column_family.type: '- (Optional) The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in ''columns'' and specifying a type for it.' + bigtable_options.ignore_unspecified_column_families: '- (Optional) If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.' + bigtable_options.output_column_families_as_json: '- (Optional) If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.' + bigtable_options.read_rowkey_as_string: '- (Optional) If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.' clustering: |- - (Optional) Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order. column_references.referenced_column: |- - : (Required) The column in the primary key that are + - (Required) The column in the primary key that are referenced by the referencingColumn - column_references.referencing_column: ': (Required) The column that composes the foreign key.' + column_references.referencing_column: '- (Required) The column that composes the foreign key.' creation_time: '- The time when this table was created, in milliseconds since the epoch.' csv_options.allow_jagged_rows: |- - (Optional) - Indicates if BigQuery should accept rows + - (Optional) Indicates if BigQuery should accept rows that are missing trailing optional columns. csv_options.allow_quoted_newlines: |- - (Optional) - Indicates if BigQuery should allow + - (Optional) Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false. csv_options.encoding: |- - (Optional) - The character encoding of the data. The supported + - (Optional) The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. - csv_options.field_delimiter: (Optional) - The separator for fields in a CSV file. + csv_options.field_delimiter: '- (Optional) The separator for fields in a CSV file.' csv_options.quote: |- - (Required) - The value that is used to quote data sections in a + - (Required) The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. @@ -15490,7 +17049,7 @@ resources: limitations with Terraform default values, this value is required to be explicitly set. csv_options.skip_leading_rows: |- - (Optional) - The number of rows at the top of a CSV + - (Optional) The number of rows at the top of a CSV file that BigQuery will skip when reading the data. dataset_id: |- - (Required) The dataset ID to create the table in. @@ -15520,30 +17079,74 @@ resources: milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed. + external_catalog_table_options: |- + - (Optional, [Beta] + (https://terraform.io/docs/providers/google/guides/provider_versions.html)) + Options defining open source compatible table. + Structure is documented below. + external_catalog_table_options.connection_id: |- + - (Optional, [Beta] + (https://terraform.io/docs/providers/google/guides/provider_versions.html)) + The connection specifying the credentials to be used to read external storage, + such as Azure Blob, Cloud Storage, or S3. The connection is needed to read the + open source table from BigQuery Engine. The connection_id can have the form + .. or projects//locations//connections/. + external_catalog_table_options.parameters: |- + - (Optional, [Beta] + (https://terraform.io/docs/providers/google/guides/provider_versions.html)) + A map of key value pairs defining the parameters and properties of the open + source table. Corresponds with hive meta store table parameters. Maximum size + of 4Mib. + external_catalog_table_options.storage_descriptor: |- + - (Optional, [Beta] + (https://terraform.io/docs/providers/google/guides/provider_versions.html)) + A storage descriptor containing information about the physical storage of this + table. + Structure is documented below. + external_catalog_table_options.storage_descriptor.input_format: |- + - (Optional, [Beta] + (https://terraform.io/docs/providers/google/guides/provider_versions.html)) + Specifies the fully qualified class name of the InputFormat (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"). + The maximum length is 128 characters. + external_catalog_table_options.storage_descriptor.location_uri: |- + - (Optional, [Beta] + (https://terraform.io/docs/providers/google/guides/provider_versions.html)) + The physical location of the table (e.g. 'gs://spark-dataproc-data/pangea-data/case_sensitive/' or 'gs://spark-dataproc-data/pangea-data/*'). + The maximum length is 2056 bytes. + external_catalog_table_options.storage_descriptor.output_format: |- + - (Optional, [Beta] + (https://terraform.io/docs/providers/google/guides/provider_versions.html)) + Specifies the fully qualified class name of the OutputFormat (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"). + The maximum length is 128 characters. + external_catalog_table_options.storage_descriptor.serde_info: |- + - (Optional, [Beta] + (https://terraform.io/docs/providers/google/guides/provider_versions.html)) + Serializer and deserializer information. + Structure is documented below. external_data_configuration: |- - (Optional) Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below. external_data_configuration.autodetect: |- - - (Required) - Let BigQuery try to autodetect the schema + - (Required) Let BigQuery try to autodetect the schema and format of the table. external_data_configuration.avro_options: |- - (Optional) - Additional options if source_format is set to + - (Optional) Additional options if source_format is set to "AVRO". Structure is documented below. external_data_configuration.bigtable_options: |- - (Optional) - Additional properties to set if + - (Optional) Additional properties to set if source_format is set to "BIGTABLE". Structure is documented below. external_data_configuration.compression: |- - (Optional) - The compression type of the data source. + - (Optional) The compression type of the data source. Valid values are "NONE" or "GZIP". external_data_configuration.connection_id: |- - (Optional) - The connection specifying the credentials to be used to read + - (Optional) The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}. external_data_configuration.csv_options: |- - (Optional) - Additional properties to set if + - (Optional) Additional properties to set if source_format is set to "CSV". Structure is documented below. external_data_configuration.external_data_configuration.connection_id: |- , the @@ -15554,32 +17157,32 @@ resources: By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs external_data_configuration.google_sheets_options: |- - (Optional) - Additional options if + - (Optional) Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below. external_data_configuration.hive_partitioning_options: |- - (Optional) - When set, configures hive partitioning + - (Optional) When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below. external_data_configuration.ignore_unknown_values: |- - (Optional) - Indicates if BigQuery should + - (Optional) Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. - external_data_configuration.json_extension: '(Optional) - Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the JSON source format. Valid values are: GEOJSON.' + external_data_configuration.json_extension: '- (Optional) Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the JSON source format. Valid values are: GEOJSON.' external_data_configuration.json_options: |- - (Optional) - Additional properties to set if + - (Optional) Additional properties to set if source_format is set to "JSON". Structure is documented below. external_data_configuration.max_bad_records: |- - (Optional) - The maximum number of bad records that + - (Optional) The maximum number of bad records that BigQuery can ignore when reading data. external_data_configuration.metadata_cache_mode: '- (Optional) Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.' external_data_configuration.object_metadata: '- (Optional) Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.' external_data_configuration.parquet_options: |- - (Optional) - Additional properties to set if + - (Optional) Additional properties to set if source_format is set to "PARQUET". Structure is documented below. external_data_configuration.reference_file_schema_uri: '- (Optional) When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.' external_data_configuration.schema: |- @@ -15595,7 +17198,7 @@ resources: datasource, after creation the computed schema will be stored in google_bigquery_table.schema external_data_configuration.source_format: |- - (Optional) - The data format. Please see sourceFormat under + - (Optional) The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly". @@ -15603,32 +17206,32 @@ resources: - (Required) A list of the fully-qualified URIs that point to your data in Google Cloud. foreign_keys.column_references: |- - : (Required) The pair of the foreign key column and primary key column. + - (Required) The pair of the foreign key column and primary key column. Structure is documented below. - foreign_keys.name: ': (Optional) Set only if the foreign key constraint is named.' + foreign_keys.name: '- (Optional) Set only if the foreign key constraint is named.' foreign_keys.referenced_table: |- - : (Required) The table that holds the primary key + - (Required) The table that holds the primary key and is referenced by this foreign key. Structure is documented below. friendly_name: '- (Optional) A descriptive name for the table.' google_sheets_options.range: |- - (Optional) - Range of a sheet to query from. Only used when + - (Optional) Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20" google_sheets_options.skip_leading_rows: |- - (Optional) - The number of rows at the top of the sheet + - (Optional) The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set. hive_partitioning_options.mode: |- - (Optional) - When set, what mode of hive partitioning to use when + - (Optional) When set, what mode of hive partitioning to use when reading data. The following modes are supported. hive_partitioning_options.require_partition_filter: |- - (Optional) If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. hive_partitioning_options.source_uri_prefix: |- - (Optional) - When hive partition detection is requested, + - (Optional) When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro @@ -15636,8 +17239,8 @@ resources: partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}. - id: '- an identifier for the resource with format projects/{{project}}/datasets/{{dataset}}/tables/{{name}}' - json_options.encoding: (Optional) - The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + id: '- An identifier for the resource with format projects/{{project}}/datasets/{{dataset}}/tables/{{name}}' + json_options.encoding: '- (Optional) The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.' kms_key_version: '- The self link or full name of the kms key version used to encrypt this table.' labels: '- (Optional) A mapping of labels to assign to the resource.' last_modified_time: '- The time when this table was last modified, in milliseconds since the epoch.' @@ -15656,16 +17259,16 @@ resources: - (Optional) The maximum frequency at which this materialized view will be refreshed. The default value is 1800000 max_staleness: |- - : (Optional) The maximum staleness of data that could be + - (Optional) The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type. num_bytes: '- The size of this table in bytes, excluding any data in the streaming buffer.' num_long_term_bytes: '- The number of bytes in the table that are considered "long-term storage".' num_rows: '- The number of rows of data in this table, excluding any data in the streaming buffer.' - parquet_options.enable_list_inference: (Optional) - Indicates whether to use schema inference specifically for Parquet LIST logical type. - parquet_options.enum_as_string: (Optional) - Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default. - primary_key.columns: ': (Required) The columns that are composed of the primary key constraint.' + parquet_options.enable_list_inference: '- (Optional) Indicates whether to use schema inference specifically for Parquet LIST logical type.' + parquet_options.enum_as_string: '- (Optional) Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.' + primary_key.columns: '- (Required) The columns that are composed of the primary key constraint.' project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -15681,10 +17284,10 @@ resources: range_partitioning.range: |- - (Required) Information required to partition based on ranges. Structure is documented below. - referenced_table.dataset_id: ': (Required) The ID of the dataset containing this table.' - referenced_table.project_id: ': (Required) The ID of the project containing this table.' + referenced_table.dataset_id: '- (Required) The ID of the dataset containing this table.' + referenced_table.project_id: '- (Required) The ID of the project containing this table.' referenced_table.table_id: |- - : (Required) The ID of the table. The ID must contain only + - (Required) The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as @@ -15701,6 +17304,24 @@ resources: expected to be the short name, for example "Production". schema: '- (Optional) A JSON schema for the table.' self_link: '- The URI of the created resource.' + serde_info.name: |- + - (Optional, [Beta] + (https://terraform.io/docs/providers/google/guides/provider_versions.html)) + Name of the SerDe. + The maximum length is 256 characters. + serde_info.parameters: |- + - (Optional, [Beta] + (https://terraform.io/docs/providers/google/guides/provider_versions.html)) + Key-value pairs that define the initialization parameters for the + serialization library. + Maximum size 10 Kib. + serde_info.serialization_library: |- + - (Required, [Beta] + (https://terraform.io/docs/providers/google/guides/provider_versions.html)) + Specifies a fully-qualified class name of the serialization library that is + responsible for the translation of data between table representation and the + underlying low-level input and output format structures. + The maximum length is 256 characters. table_constraints: |- - (Optional) Defines the primary key and foreign keys. Structure is documented below. @@ -15722,11 +17343,11 @@ resources: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below. table_replication_info.replication_interval_ms: |- - (Optional) - The interval at which the source + - (Optional) The interval at which the source materialized view is polled for updates. The default is 300000. - table_replication_info.source_dataset_id: (Required) - The ID of the source dataset. - table_replication_info.source_project_id: (Required) - The ID of the source project. - table_replication_info.source_table_id: (Required) - The ID of the source materialized view. + table_replication_info.source_dataset_id: '- (Required) The ID of the source dataset.' + table_replication_info.source_project_id: '- (Required) The ID of the source project.' + table_replication_info.source_table_id: '- (Required) The ID of the source materialized view.' terraform_labels: |- - The combination of labels configured directly on the resource and default labels configured on the provider. @@ -15734,7 +17355,7 @@ resources: - (Optional) If specified, configures time-based partitioning for this table. Structure is documented below. time_partitioning.expiration_ms: |- - - (Optional) Number of milliseconds for which to keep the + - (Optional) Number of milliseconds for which to keep the storage for a partition. time_partitioning.field: |- - (Optional) The field used to determine how to create a time-based @@ -15834,7 +17455,7 @@ resources: "zone": "us-central1-c" } ], - "deletion_protection": "true", + "deletion_protection": true, "name": "bt-instance" } - name: ap @@ -15863,7 +17484,7 @@ resources: "zone": "us-central1-b" } ], - "deletion_protection": "true", + "deletion_protection": true, "name": "bt-instance" } - name: ap @@ -15903,7 +17524,7 @@ resources: "zone": "us-central1-c" } ], - "deletion_protection": "true", + "deletion_protection": true, "name": "bt-instance" } - name: ap @@ -15937,7 +17558,7 @@ resources: "zone": "us-central1-b" } ], - "deletion_protection": "true", + "deletion_protection": true, "name": "bt-instance" } argumentDocs: @@ -16343,6 +17964,11 @@ resources: references: policy_data: data.google_iam_policy.admin.policy_data argumentDocs: + condition: '- (Optional) An IAM Condition for a given binding. Structure is documented below.' + condition.description: '- (Optional) An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.' + condition.expression: '- (Required) Textual representation of an expression in Common Expression Language syntax.' + condition.policy_data: '- (Required) The policy data generated by a google_iam_policy data source.' + condition.title: '- (Required) A title for the expression, i.e. a short string describing its purpose.' etag: '- (Computed) The etag of the instances''s IAM policy.' google_bigtable_instance_iam_binding: ': Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the instance are preserved.' google_bigtable_instance_iam_member: ': Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the instance are preserved.' @@ -16351,7 +17977,6 @@ resources: member/members: |- - (Required) Identities that will be granted the privilege in role. Each entry can have one of the following values: - policy_data: '- (Required) The policy data generated by a google_iam_policy data source.' project: |- - (Optional) The project in which the instance belongs. If it is not provided, Terraform will use the provider default. @@ -16381,7 +18006,12 @@ resources: "family": "family-first" }, { - "family": "family-second" + "family": "family-second", + "type": "intsum" + }, + { + "family": "family-third", + "type": " {\n\t\t\t\t\t\"aggregateType\": {\n\t\t\t\t\t\t\"max\": {},\n\t\t\t\t\t\t\"inputType\": {\n\t\t\t\t\t\t\t\"int64Type\": {\n\t\t\t\t\t\t\t\t\"encoding\": {\n\t\t\t\t\t\t\t\t\t\"bigEndianBytes\": {}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n" } ], "instance_name": "${google_bigtable_instance.instance.name}", @@ -16434,6 +18064,7 @@ resources: - (Optional) A list of predefined keys to split the table on. !> Warning: Modifying the split_keys of an existing table will cause Terraform to delete/recreate the entire google_bigtable_table resource. + type: '- (Optional) The type of the column family.' update: '- Default is 20 minutes.' importStatements: [] google_bigtable_table_iam_policy: @@ -16809,7 +18440,7 @@ resources: Optional. If creditTypesTreatment is INCLUDE_SPECIFIED_CREDITS, this is a list of credit types to be subtracted from gross cost to determine the spend for threshold calculations. See a list of acceptable credit type values. If creditTypesTreatment is not INCLUDE_SPECIFIED_CREDITS, this field must be empty. - Note: If the field has a value in the config and needs to be removed, the field has to be an emtpy array in the config. + Note: If the field has a value in the config and needs to be removed, the field has to be an empty array in the config. budget_filter.credit_types_treatment: |- - (Optional) @@ -16862,7 +18493,7 @@ resources: the parent account, usage from the parent account will be included. If the field is omitted, the report will include usage from the parent account and all subaccounts, if they exist. - Note: If the field has a value in the config and needs to be removed, the field has to be an emtpy array in the config. + Note: If the field has a value in the config and needs to be removed, the field has to be an empty array in the config. create: '- Default is 20 minutes.' custom_period.end_date: |- - @@ -16986,6 +18617,7 @@ resources: dependencies: google_project.project: |- { + "deletion_policy": "DELETE", "lifecycle": [ { "ignore_changes": [ @@ -17006,7 +18638,7 @@ resources: For example, "012345-567890-ABCDEF" or "". create: '- Default is 20 minutes.' delete: '- Default is 20 minutes.' - id: '- an identifier for the resource with format projects/{{project}}/billingInfo' + id: '- an identifier for the resource with format projects/{{project}}' project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -18097,6 +19729,9 @@ resources: - (Output) Reason for provisioning failures. + san_dnsnames: |- + - + The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) scope: |- - (Optional) @@ -18752,6 +20387,466 @@ resources: A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". importStatements: [] + google_chronicle_data_access_label: + subCategory: Chronicle + description: A DataAccessLabel is a label on events to define user access to data. + name: google_chronicle_data_access_label + title: "" + examples: + - name: example + manifest: |- + { + "data_access_label_id": "label-id", + "description": "label-description", + "instance": "00000000-0000-0000-0000-000000000000", + "location": "us", + "provider": "google-beta", + "udm_query": "principal.hostname=\"google.com\"" + } + argumentDocs: + author: |- + - + Output only. The user who created the data access label. + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. The time at which the data access label was created. + data_access_label_id: |- + - + (Required) + Required. The ID to use for the data access label, which will become the label's + display name and the final component of the label's resource name. It must + only contain ASCII lowercase letters, numbers, and dashes; it must begin + with a letter, and it must not exceed 1000 characters. + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + Optional. A description of the data access label for a human reader. + display_name: |- + - + Output only. The short name displayed for the label as it appears on event data. This is same as data access label id. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/instances/{{instance}}/dataAccessLabels/{{data_access_label_id}}' + instance: |- + - + (Required) + The unique identifier for the Chronicle instance, which is the same as the customer ID. + last_editor: |- + - + Output only. The user who last updated the data access label. + location: |- + - + (Required) + The location of the resource. This is the geographical region where the Chronicle instance resides, such as "us" or "europe-west2". + name: |- + - + The unique resource name of the data access label. This unique identifier is generated using values provided for the URL parameters. + Format: + projects/{project}/locations/{location}/instances/{instance}/dataAccessLabels/{data_access_label_id} + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + udm_query: |- + - + (Required) + A UDM query over event data. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. The time at which the data access label was last updated. + importStatements: [] + google_chronicle_data_access_scope: + subCategory: Chronicle + description: A DataAccessScope is a boolean expression of data access labels used to restrict access to data for users. + name: google_chronicle_data_access_scope + title: "" + examples: + - name: example + manifest: |- + { + "allowed_data_access_labels": [ + { + "log_type": "GCP_CLOUDAUDIT" + }, + { + "log_type": "GITHUB" + } + ], + "data_access_scope_id": "scope-id", + "description": "scope-description", + "instance": "00000000-0000-0000-0000-000000000000", + "location": "us", + "provider": "google-beta" + } + - name: example + manifest: |- + { + "allowed_data_access_labels": [ + { + "data_access_label": "${resource.google_chronicle_data_access_label.custom_data_access_label.data_access_label_id}" + } + ], + "data_access_scope_id": "scope-id", + "description": "scope-description", + "instance": "00000000-0000-0000-0000-000000000000", + "location": "us", + "provider": "google-beta" + } + references: + allowed_data_access_labels.data_access_label: resource.google_chronicle_data_access_label.custom_data_access_label.data_access_label_id + dependencies: + google_chronicle_data_access_label.custom_data_access_label: |- + { + "data_access_label_id": "label-id", + "instance": "00000000-0000-0000-0000-000000000000", + "location": "us", + "provider": "google-beta", + "udm_query": "principal.hostname=\"google.com\"" + } + - name: example + manifest: |- + { + "allowed_data_access_labels": [ + { + "asset_namespace": "my-namespace" + } + ], + "data_access_scope_id": "scope-id", + "description": "scope-description", + "instance": "00000000-0000-0000-0000-000000000000", + "location": "us", + "provider": "google-beta" + } + - name: example + manifest: |- + { + "allowed_data_access_labels": [ + { + "ingestion_label": [ + { + "ingestion_label_key": "ingestion_key", + "ingestion_label_value": "ingestion_value" + } + ] + } + ], + "data_access_scope_id": "scope-id", + "description": "scope-description", + "instance": "00000000-0000-0000-0000-000000000000", + "location": "us", + "provider": "google-beta" + } + - name: example + manifest: |- + { + "allow_all": true, + "data_access_scope_id": "scope-id", + "denied_data_access_labels": [ + { + "log_type": "GCP_CLOUDAUDIT" + }, + { + "data_access_label": "${resource.google_chronicle_data_access_label.custom_data_access_label.data_access_label_id}" + }, + { + "ingestion_label": [ + { + "ingestion_label_key": "ingestion_key", + "ingestion_label_value": "ingestion_value" + } + ] + }, + { + "asset_namespace": "my-namespace" + } + ], + "description": "scope-description", + "instance": "00000000-0000-0000-0000-000000000000", + "location": "us", + "provider": "google-beta" + } + references: + denied_data_access_labels.data_access_label: resource.google_chronicle_data_access_label.custom_data_access_label.data_access_label_id + dependencies: + google_chronicle_data_access_label.custom_data_access_label: |- + { + "data_access_label_id": "label-id", + "instance": "00000000-0000-0000-0000-000000000000", + "location": "us", + "provider": "google-beta", + "udm_query": "principal.hostname=\"google.com\"" + } + argumentDocs: + allow_all: |- + - + (Optional) + Optional. Whether or not the scope allows all labels, allow_all and + allowed_data_access_labels are mutually exclusive and one of them must be + present. denied_data_access_labels can still be used along with allow_all. + When combined with denied_data_access_labels, access will be granted to all + data that doesn't have labels mentioned in denied_data_access_labels. E.g.: + A customer with scope with denied labels A and B and allow_all will be able + to see all data except data labeled with A and data labeled with B and data + with labels A and B. + allowed_data_access_labels: |- + - + (Optional) + The allowed labels for the scope. There has to be at + least one label allowed for the scope to be valid. + The logical operator for evaluation of the allowed labels is OR. + Either allow_all or allowed_data_access_labels needs to be provided. + E.g.: A customer with scope with allowed labels A and B will be able + to see data with labeled with A or B or (A and B). + Structure is documented below. + allowed_data_access_labels.asset_namespace: |- + - + (Optional) + The asset namespace configured in the forwarder + of the customer's events. + allowed_data_access_labels.data_access_label: |- + - + (Optional) + The name of the data access label. + allowed_data_access_labels.display_name: |- + - + (Output) + Output only. The display name of the label. + Data access label and log types's name + will match the display name of the resource. + The asset namespace will match the namespace itself. + The ingestion key value pair will match the key of the tuple. + allowed_data_access_labels.ingestion_label: |- + - + (Optional) + Representation of an ingestion label type. + Structure is documented below. + allowed_data_access_labels.log_type: |- + - + (Optional) + The name of the log type. + author: |- + - + Output only. The user who created the data access scope. + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. The time at which the data access scope was created. + data_access_scope_id: |- + - + (Required) + Required. The user provided scope id which will become the last part of the name + of the scope resource. + Needs to be compliant with https://google.aip.dev/122 + delete: '- Default is 20 minutes.' + denied_data_access_labels: |- + - + (Optional) + Optional. The denied labels for the scope. + The logical operator for evaluation of the denied labels is AND. + E.g.: A customer with scope with denied labels A and B won't be able + to see data labeled with A and data labeled with B + and data with labels A and B. + Structure is documented below. + denied_data_access_labels.asset_namespace: |- + - + (Optional) + The asset namespace configured in the forwarder + of the customer's events. + denied_data_access_labels.data_access_label: |- + - + (Optional) + The name of the data access label. + denied_data_access_labels.display_name: |- + - + (Output) + Output only. The display name of the label. + Data access label and log types's name + will match the display name of the resource. + The asset namespace will match the namespace itself. + The ingestion key value pair will match the key of the tuple. + denied_data_access_labels.ingestion_label: |- + - + (Optional) + Representation of an ingestion label type. + Structure is documented below. + denied_data_access_labels.log_type: |- + - + (Optional) + The name of the log type. + description: |- + - + (Optional) + Optional. A description of the data access scope for a human reader. + display_name: |- + - + Output only. The name to be used for display to customers of the data access scope. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/instances/{{instance}}/dataAccessScopes/{{data_access_scope_id}}' + ingestion_label.ingestion_label_key: |- + - + (Required) + Required. The key of the ingestion label. Always required. + ingestion_label.ingestion_label_value: |- + - + (Optional) + Optional. The value of the ingestion label. Optional. An object + with no provided value and some key provided would match + against the given key and ANY value. + instance: |- + - + (Required) + The unique identifier for the Chronicle instance, which is the same as the customer ID. + last_editor: |- + - + Output only. The user who last updated the data access scope. + location: |- + - + (Required) + The location of the resource. This is the geographical region where the Chronicle instance resides, such as "us" or "europe-west2". + name: |- + - + The unique full name of the data access scope. This unique identifier is generated using values provided for the URL parameters. + Format: + projects/{project}/locations/{location}/instances/{instance}/dataAccessScopes/{data_access_scope_id} + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. The time at which the data access scope was last updated. + importStatements: [] + google_chronicle_watchlist: + subCategory: Chronicle + description: A watchlist is a list of entities that allows for bulk operations over the included entities. + name: google_chronicle_watchlist + title: "" + examples: + - name: example + manifest: |- + { + "description": "watchlist-description", + "display_name": "watchlist_name", + "entity_population_mechanism": [ + { + "manual": [ + {} + ] + } + ], + "instance": "00000000-0000-0000-0000-000000000000", + "location": "us", + "multiplying_factor": 1, + "provider": "google-beta", + "watchlist_id": "watchlist-id", + "watchlist_user_preferences": [ + { + "pinned": true + } + ] + } + - name: example + manifest: |- + { + "description": "watchlist-description", + "display_name": "watchlist-name", + "entity_population_mechanism": [ + { + "manual": [ + {} + ] + } + ], + "instance": "00000000-0000-0000-0000-000000000000", + "location": "us", + "multiplying_factor": 1, + "provider": "google-beta", + "watchlist_user_preferences": [ + { + "pinned": true + } + ] + } + argumentDocs: + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. Time the watchlist was created. + delete: '- Default is 20 minutes.' + display_name: |- + - + (Required) + Required. Display name of the watchlist. + Note that it must be at least one character and less than 63 characters + (https://google.aip.dev/148). + entity_count: |- + - + Count of different types of entities in the watchlist. + Structure is documented below. + entity_count.asset: |- + - + (Output) + Output only. Count of asset type entities in the watchlist. + entity_count.user: |- + - + (Output) + Output only. Count of user type entities in the watchlist. + entity_population_mechanism: |- + - + (Required) + Mechanism to populate entities in the watchlist. + Structure is documented below. + entity_population_mechanism.description: |- + - + (Optional) + Optional. Description of the watchlist. + entity_population_mechanism.manual: |- + - + (Optional) + Entities are added manually. + entity_population_mechanism.multiplying_factor: |- + - + (Optional) + Optional. Weight applied to the risk score for entities + in this watchlist. + The default is 1.0 if it is not specified. + entity_population_mechanism.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + entity_population_mechanism.watchlist_id: |- + - + (Optional) + Optional. The ID to use for the watchlist, + which will become the final component of the watchlist's resource name. + This value should be 4-63 characters, and valid characters + are /a-z-/. + entity_population_mechanism.watchlist_user_preferences: |- + - + (Optional) + A collection of user preferences for watchlist UI configuration. + Structure is documented below. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/instances/{{instance}}/watchlists/{{watchlist_id}}' + instance: |- + - + (Required) + The unique identifier for the Chronicle instance, which is the same as the customer ID. + location: |- + - + (Required) + The location of the resource. This is the geographical region where the Chronicle instance resides, such as "us" or "europe-west2". + name: |- + - + Identifier. Resource name of the watchlist. This unique identifier is generated using values provided for the URL parameters. + Format: + projects/{project}/locations/{location}/instances/{instance}/watchlists/{watchlist} + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. Time the watchlist was last updated. + watchlist_user_preferences.pinned: |- + - + (Optional) + Optional. Whether the watchlist is pinned on the dashboard. + importStatements: [] google_cloud_asset_folder_feed: subCategory: Cloud Asset Inventory description: Describes a Cloud Asset Inventory feed used to to listen to asset updates. @@ -18792,6 +20887,7 @@ resources: dependencies: google_folder.my_folder: |- { + "deletion_protection": false, "display_name": "Networking", "parent": "organizations/123456789" } @@ -19520,6 +21616,44 @@ resources: - Last update timestamp in RFC 3339 text format. importStatements: [] + google_cloud_quotas_quota_adjuster_settings: + subCategory: Cloud Quotas + description: QuotaAdjusterSettings represents the preferred quota configuration specified for a project, folder or organization. + name: google_cloud_quotas_quota_adjuster_settings + title: "" + examples: + - name: adjuster_settings + manifest: |- + { + "enablement": "ENABLED", + "parent": "projects/104740170505", + "provider": "${google-beta}" + } + references: + provider: google-beta + argumentDocs: + create: '- Default is 20 minutes.' + delete: '- Default is 20 minutes.' + effective_container: |- + - + Fields to capture the hierarchy enablement. + The container (org/folder/project) that determines if the quota adjuster is set for this project/folder/org. We use the nearest-ancestor to determine the effective container. + The nearest ancestor (including this container) with enabled set (either true or false) will be returned. + effective_enablement: |- + - + Based on the effective container`s setting above, determines Whether this container has the quota adjuster enabled. + enablement: |- + - + (Required) + Required. The configured value of the enablement at the given resource. + Possible values are: ENABLED, DISABLED. + id: '- an identifier for the resource with format {{parent}}/locations/global/quotaAdjusterSettings' + parent: |- + - + (Required) + The parent of the quota preference. Allowed parents are "projects/[project-id / number]" or "folders/[folder-id / number]" or "organizations/[org-id / number]". + update: '- Default is 20 minutes.' + importStatements: [] google_cloud_quotas_quota_preference: subCategory: Cloud Quotas description: QuotaPreference represents the preferred quota configuration specified for a project, folder or organization. @@ -19873,6 +22007,55 @@ resources: } ] } + - name: default + manifest: |- + { + "location": "us-central1", + "metadata": [ + { + "annotations": { + "run.googleapis.com/launch-stage": "BETA" + } + } + ], + "name": "cloudrun-srv", + "provider": "${google-beta}", + "template": [ + { + "metadata": [ + { + "annotations": { + "autoscaling.knative.dev/maxScale": "1", + "run.googleapis.com/cpu-throttling": "false" + } + } + ], + "spec": [ + { + "containers": [ + { + "image": "gcr.io/cloudrun/hello", + "resources": [ + { + "limits": { + "cpu": "4", + "memory": "16Gi", + "nvidia.com/gpu": "1" + } + } + ] + } + ], + "node_selector": { + "run.googleapis.com/accelerator": "nvidia-l4" + } + } + ] + } + ] + } + references: + provider: google-beta - name: default manifest: |- { @@ -19906,7 +22089,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "MYSQL_5_7", - "deletion_protection": "true", + "deletion_protection": true, "name": "cloudrun-sql", "region": "us-east1", "settings": [ @@ -20009,13 +22192,6 @@ resources: } ], "location": "us-central1", - "metadata": [ - { - "annotations": { - "run.googleapis.com/launch-stage": "BETA" - } - } - ], "name": "cloudrun-srv", "provider": "${google-beta}", "template": [ @@ -20471,6 +22647,7 @@ resources: explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + run.googleapis.com/accelerator: sets the type of GPU required by the Revision to run. run.googleapis.com/binary-authorization: sets the Binary Authorization. run.googleapis.com/binary-authorization-breakglass: sets the Binary Authorization breakglass. run.googleapis.com/client-name: sets the client name calling the Cloud Run API. @@ -20570,6 +22747,11 @@ resources: (Required) Containers defines the unit of execution for this Revision. Structure is documented below. + spec.node_selector: |- + - + (Optional, Beta) + Node Selector describes the hardware requirements of the resources. + Use the following node selector keys to configure features on a Revision: spec.service_account_name: |- - (Optional) @@ -20746,12 +22928,12 @@ resources: This must match the Name of a Volume. volumes.csi: |- - - (Optional, Beta) + (Optional) A filesystem specified by the Container Storage Interface (CSI). Structure is documented below. volumes.empty_dir: |- - - (Optional, Beta) + (Optional) Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). Structure is documented below. volumes.name: |- @@ -20760,10 +22942,9 @@ resources: Volume's name. volumes.nfs: |- - - (Optional, Beta) + (Optional) A filesystem backed by a Network File System share. This filesystem requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Structure is documented below. volumes.secret: |- - @@ -20825,6 +23006,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "location": "us-central1", "name": "cloudrun-job", "template": [ @@ -20844,6 +23026,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "lifecycle": [ { "ignore_changes": [ @@ -20880,6 +23063,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "location": "us-central1", "name": "cloudrun-job", "template": [ @@ -20965,7 +23149,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "MYSQL_5_7", - "deletion_protection": "true", + "deletion_protection": true, "name": "cloudrun-sql", "region": "us-central1", "settings": [ @@ -20977,6 +23161,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "location": "us-central1", "name": "cloudrun-job", "template": [ @@ -21030,6 +23215,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "launch_stage": "GA", "location": "us-central1", "name": "cloudrun-job", @@ -21065,6 +23251,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "depends_on": [ "${google_secret_manager_secret_version.secret-version-data}", "${google_secret_manager_secret_iam_member.secret-access}" @@ -21140,10 +23327,9 @@ resources: - name: default manifest: |- { - "launch_stage": "BETA", + "deletion_protection": false, "location": "us-central1", "name": "cloudrun-job", - "provider": "${google-beta}", "template": [ { "template": [ @@ -21175,11 +23361,10 @@ resources: } ] } - references: - provider: google-beta - name: default manifest: |- { + "deletion_protection": false, "location": "us-central1", "name": "cloudrun-job", "provider": "${google-beta}", @@ -21343,11 +23528,16 @@ resources: Number of executions created for this job. expire_time: |- - - For a deleted resource, the time after which it will be permamently deleted. + For a deleted resource, the time after which it will be permanently deleted. gcs.bucket: |- - (Required) Name of the cloud storage bucket to back the volume. The resource service account must have permission to access the bucket. + gcs.mount_options: |- + - + (Optional, Beta) + A list of flags to pass to the gcsfuse command for configuring this volume. + Flags should be passed without leading dashes. gcs.read_only: |- - (Optional) @@ -21419,6 +23609,13 @@ resources: - (Optional) Arbitrary version identifier for the API client. + network_interfaces.deletion_protection: |- + - (Optional) Whether Terraform will be prevented from destroying the job. Defaults to true. + When aterraform destroy or terraform apply would delete the job, + the command will fail if this field is not set to false in Terraform state. + When the field is set to true or unset in Terraform state, a terraform apply + or terraform destroy that would delete the job will fail. + When the field is set to false, deleting the job is allowed. network_interfaces.labels: |- - (Optional) @@ -21656,13 +23853,13 @@ resources: Structure is documented below. volumes.empty_dir: |- - - (Optional, Beta) + (Optional) Ephemeral storage used as a shared volume. Structure is documented below. volumes.gcs: |- - - (Optional, Beta) - Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + (Optional) + Cloud Storage bucket mounted as a volume using GCSFuse. Structure is documented below. volumes.name: |- - @@ -21670,8 +23867,8 @@ resources: Volume's name. volumes.nfs: |- - - (Optional, Beta) - NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + (Optional) + NFS share mounted as a volume. Structure is documented below. volumes.secret: |- - @@ -21745,6 +23942,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "ingress": "INGRESS_TRAFFIC_ALL", "location": "us-central1", "name": "cloudrun-service", @@ -21761,6 +23959,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "ingress": "INGRESS_TRAFFIC_ALL", "location": "us-central1", "name": "cloudrun-service", @@ -21785,6 +23984,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "depends_on": [ "${google_secret_manager_secret_version.secret-version-data}" ], @@ -21881,7 +24081,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "MYSQL_5_7", - "deletion_protection": "true", + "deletion_protection": true, "name": "cloudrun-sql", "region": "us-central1", "settings": [ @@ -21893,6 +24093,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "location": "us-central1", "name": "cloudrun-service", "template": [ @@ -21942,6 +24143,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "launch_stage": "GA", "location": "us-central1", "name": "cloudrun-service", @@ -21973,6 +24175,48 @@ resources: - name: default manifest: |- { + "deletion_protection": false, + "ingress": "INGRESS_TRAFFIC_ALL", + "launch_stage": "BETA", + "location": "us-central1", + "name": "cloudrun-service", + "provider": "${google-beta}", + "template": [ + { + "containers": [ + { + "image": "us-docker.pkg.dev/cloudrun/container/hello", + "resources": [ + { + "limits": { + "cpu": "4", + "memory": "16Gi", + "nvidia.com/gpu": "1" + }, + "startup_cpu_boost": true + } + ] + } + ], + "node_selector": [ + { + "accelerator": "nvidia-l4" + } + ], + "scaling": [ + { + "max_instance_count": 1 + } + ] + } + ] + } + references: + provider: google-beta + - name: default + manifest: |- + { + "deletion_protection": false, "location": "us-central1", "name": "cloudrun-service", "template": [ @@ -22010,6 +24254,7 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "depends_on": [ "${google_secret_manager_secret_version.secret-version-data}" ], @@ -22080,11 +24325,10 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "ingress": "INGRESS_TRAFFIC_ALL", - "launch_stage": "BETA", "location": "us-central1", "name": "cloudrun-service", - "provider": "${google-beta}", "template": [ { "containers": [ @@ -22140,12 +24384,10 @@ resources: } ] } - references: - provider: google-beta - name: default manifest: |- { - "launch_stage": "BETA", + "deletion_protection": false, "location": "us-central1", "name": "cloudrun-service", "template": [ @@ -22187,8 +24429,8 @@ resources: - name: default manifest: |- { + "deletion_protection": false, "ingress": "INGRESS_TRAFFIC_ALL", - "launch_stage": "BETA", "location": "us-central1", "name": "cloudrun-service", "template": [ @@ -22253,6 +24495,70 @@ resources: ], "tier": "BASIC_HDD" } + - name: default + manifest: |- + { + "deletion_protection": false, + "depends_on": [ + "${time_sleep.wait_for_mesh}" + ], + "launch_stage": "BETA", + "location": "us-central1", + "name": "cloudrun-service", + "provider": "${google-beta}", + "template": [ + { + "containers": [ + { + "image": "us-docker.pkg.dev/cloudrun/container/hello" + } + ], + "service_mesh": [ + { + "mesh": "${google_network_services_mesh.mesh.id}" + } + ] + } + ] + } + references: + provider: google-beta + template.service_mesh.mesh: google_network_services_mesh.mesh.id + dependencies: + google_network_services_mesh.mesh: |- + { + "name": "network-services-mesh", + "provider": "${google-beta}" + } + time_sleep.wait_for_mesh: |- + { + "create_duration": "1m", + "depends_on": [ + "${google_network_services_mesh.mesh}" + ] + } + - name: default + manifest: |- + { + "deletion_protection": false, + "description": "The serving URL of this service will not perform any IAM check when invoked", + "ingress": "INGRESS_TRAFFIC_ALL", + "invoker_iam_disabled": true, + "location": "us-central1", + "name": "cloudrun-service", + "provider": "${google-beta}", + "template": [ + { + "containers": [ + { + "image": "us-docker.pkg.dev/cloudrun/container/hello" + } + ] + } + ] + } + references: + provider: google-beta argumentDocs: binary_authorization.breakglass_justification: |- - @@ -22407,11 +24713,16 @@ resources: A system-generated fingerprint for this version of the resource. May be used to detect modification conflict during updates. expire_time: |- - - For a deleted resource, the time after which it will be permamently deleted. + For a deleted resource, the time after which it will be permanently deleted. gcs.bucket: |- - (Required) GCS Bucket name + gcs.mount_options: |- + - + (Optional, Beta) + A list of flags to pass to the gcsfuse command for configuring this volume. + Flags should be passed without leading dashes. gcs.read_only: |- - (Optional) @@ -22529,7 +24840,23 @@ resources: - (Optional) Network tags applied to this Cloud Run service. - nfs.annotations: |- + nfs.path: |- + - + (Required) + Path that is exported by the NFS server. + nfs.read_only: |- + - + (Optional) + If true, mount the NFS volume as read only + nfs.server: |- + - + (Required) + Hostname or IP address of the NFS server + node_selector.accelerator: |- + - + (Required) + The GPU to attach to an instance. See https://cloud.google.com/run/docs/configuring/services/gpu for configuring GPU. + node_selector.annotations: |- - (Optional) Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. @@ -22538,38 +24865,49 @@ resources: This field follows Kubernetes annotations' namespacing, limits, and rules. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field effective_annotations for all of the annotations present on the resource. - nfs.binary_authorization: |- + node_selector.binary_authorization: |- - (Optional) Settings for the Binary Authorization feature. Structure is documented below. - nfs.client: |- + node_selector.client: |- - (Optional) Arbitrary identifier for the API client. - nfs.client_version: |- + node_selector.client_version: |- - (Optional) Arbitrary version identifier for the API client. - nfs.custom_audiences: |- + node_selector.custom_audiences: |- - (Optional) One or more custom audiences that you want this service to support. Specify each custom audience as the full URL in a string. The custom audiences are encoded in the token and used to authenticate requests. For more information, see https://cloud.google.com/run/docs/configuring/custom-audiences. - nfs.default_uri_disabled: |- + node_selector.default_uri_disabled: |- - (Optional, Beta) Disables public resolution of the default URI of this service. - nfs.description: |- + node_selector.deletion_protection: |- + - (Optional) Whether Terraform will be prevented from destroying the service. Defaults to true. + When aterraform destroy or terraform apply would delete the service, + the command will fail if this field is not set to false in Terraform state. + When the field is set to true or unset in Terraform state, a terraform apply + or terraform destroy that would delete the service will fail. + When the field is set to false, deleting the service is allowed. + node_selector.description: |- - (Optional) User-provided description of the Service. This field currently has a 512-character limit. - nfs.ingress: |- + node_selector.ingress: |- - (Optional) Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. Possible values are: INGRESS_TRAFFIC_ALL, INGRESS_TRAFFIC_INTERNAL_ONLY, INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER. - nfs.labels: |- + node_selector.invoker_iam_disabled: |- + - + (Optional) + Disables IAM permission check for run.routes.invoke for callers of this service. This feature is available by invitation only. For more information, visit https://cloud.google.com/run/docs/securing/managing-access#invoker_check. + node_selector.labels: |- - (Optional) Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, @@ -22578,34 +24916,22 @@ resources: All system labels in v1 now have a corresponding field in v2 Service. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource. - nfs.launch_stage: |- + node_selector.launch_stage: |- - (Optional) The launch stage as defined by Google Cloud Platform Launch Stages. Cloud Run supports ALPHA, BETA, and GA. If no value is specified, GA is assumed. Set the launch stage to a preview stage on input to allow use of preview features in that stage. On read (or output), describes whether the resource uses preview features. For example, if ALPHA is provided as input, but only BETA and GA-level features are used, this field will be BETA on output. Possible values are: UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED. - nfs.path: |- - - - (Required) - Path that is exported by the NFS server. - nfs.project: |- + node_selector.project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. - nfs.read_only: |- + node_selector.scaling: |- - (Optional) - If true, mount the NFS volume as read only - nfs.scaling: |- - - - (Optional, Beta) Scaling settings that apply to the whole service Structure is documented below. - nfs.server: |- - - - (Required) - Hostname or IP address of the NFS server - nfs.traffic: |- + node_selector.traffic: |- - (Optional) Specifies how to distribute traffic over a collection of Revisions belonging to the Service. If traffic is empty or not provided, defaults to 100% traffic to the latest Ready Revision. @@ -22635,7 +24961,7 @@ resources: resources.limits: |- - (Optional) - Only memory and CPU are supported. Use key cpu for CPU limit and memory for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + Only memory, CPU, and nvidia.com/gpu are supported. Use key cpu for CPU limit, memory for memory limit, nvidia.com/gpu for gpu limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go resources.startup_cpu_boost: |- - (Optional) @@ -22643,11 +24969,12 @@ resources: scaling.max_instance_count: |- - (Optional) - Maximum number of serving instances that this resource should have. + Maximum number of serving instances that this resource should have. Must not be less than minimum instance count. If absent, Cloud Run will calculate + a default value based on the project's available container instances quota in the region and specified instance size. scaling.min_instance_count: |- - (Optional) - Minimum number of serving instances that this resource should have. + Minimum number of serving instances that this resource should have. Defaults to 0. Must not be greater than maximum instance count. secret.default_mode: |- - (Optional) @@ -22661,6 +24988,10 @@ resources: - (Required) The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. + service_mesh.mesh: |- + - + (Optional) + The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. startup_probe.failure_threshold: |- - (Optional) @@ -22736,6 +25067,11 @@ resources: (Optional) Sets the maximum number of requests that each serving instance can receive. If not specified or 0, defaults to 80 when requested CPU >= 1 and defaults to 1 when requested CPU < 1. + template.node_selector: |- + - + (Optional, Beta) + Node Selector describes the hardware requirements of the resources. + Structure is documented below. template.revision: |- - (Optional) @@ -22749,6 +25085,11 @@ resources: - (Optional) Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. + template.service_mesh: |- + - + (Optional, Beta) + Enables Cloud Service Mesh for this Revision. + Structure is documented below. template.session_affinity: |- - (Optional) @@ -22859,6 +25200,9 @@ resources: uri: |- - The main URI in which this Service is serving traffic. + urls: |- + - + All URLs serving traffic for this Service. value_source.secret_key_ref: |- - (Optional) @@ -22887,13 +25231,13 @@ resources: Structure is documented below. volumes.empty_dir: |- - - (Optional, Beta) + (Optional) Ephemeral storage used as a shared volume. Structure is documented below. volumes.gcs: |- - (Optional) - Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. Structure is documented below. volumes.name: |- - @@ -23378,6 +25722,128 @@ resources: } ] } + - name: http_target_oidc + manifest: |- + { + "http_target": [ + { + "header_overrides": [ + { + "header": [ + { + "key": "AddSomethingElse", + "value": "MyOtherValue" + } + ] + }, + { + "header": [ + { + "key": "AddMe", + "value": "MyValue" + } + ] + } + ], + "http_method": "POST", + "oidc_token": [ + { + "audience": "https://oidc.example.com", + "service_account_email": "${google_service_account.oidc_service_account.email}" + } + ], + "uri_override": [ + { + "host": "oidc.example.com", + "path_override": [ + { + "path": "/users/1234" + } + ], + "port": 8443, + "query_override": [ + { + "query_params": "qparam1=123\u0026qparam2=456" + } + ], + "scheme": "HTTPS", + "uri_override_enforce_mode": "IF_NOT_EXISTS" + } + ] + } + ], + "location": "us-central1", + "name": "cloud-tasks-queue-http-target-oidc" + } + references: + http_target.oidc_token.service_account_email: google_service_account.oidc_service_account.email + dependencies: + google_service_account.oidc_service_account: |- + { + "account_id": "example-oidc", + "display_name": "Tasks Queue OIDC Service Account" + } + - name: http_target_oauth + manifest: |- + { + "http_target": [ + { + "header_overrides": [ + { + "header": [ + { + "key": "AddSomethingElse", + "value": "MyOtherValue" + } + ] + }, + { + "header": [ + { + "key": "AddMe", + "value": "MyValue" + } + ] + } + ], + "http_method": "POST", + "oauth_token": [ + { + "scope": "openid https://www.googleapis.com/auth/userinfo.email", + "service_account_email": "${google_service_account.oauth_service_account.email}" + } + ], + "uri_override": [ + { + "host": "oauth.example.com", + "path_override": [ + { + "path": "/users/1234" + } + ], + "port": 8443, + "query_override": [ + { + "query_params": "qparam1=123\u0026qparam2=456" + } + ], + "scheme": "HTTPS", + "uri_override_enforce_mode": "IF_NOT_EXISTS" + } + ] + } + ], + "location": "us-central1", + "name": "cloud-tasks-queue-http-target-oauth" + } + references: + http_target.oauth_token.service_account_email: google_service_account.oauth_service_account.email + dependencies: + google_service_account.oauth_service_account: |- + { + "account_id": "example-oauth", + "display_name": "Tasks Queue OAuth Service Account" + } argumentDocs: app_engine_routing_override: |- - @@ -23406,6 +25872,62 @@ resources: By default, the task is sent to the version which is the default version when the task is attempted. create: '- Default is 20 minutes.' delete: '- Default is 20 minutes.' + header_overrides.header: |- + - + (Required) + Header embodying a key and a value. + Structure is documented below. + header_overrides.header.key: |- + - + (Required) + The Key of the header. + header_overrides.header.value: |- + - + (Required) + The Value of the header. + http_target: |- + - + (Optional) + Modifies HTTP target for HTTP tasks. + Structure is documented below. + http_target.header_overrides: |- + - + (Optional) + HTTP target headers. + This map contains the header field names and values. + Headers will be set when running the CreateTask and/or BufferTask. + These headers represent a subset of the headers that will be configured for the task's HTTP request. + Some HTTP request headers will be ignored or replaced. + Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + Structure is documented below. + http_target.http_method: |- + - + (Optional) + The HTTP method to use for the request. + When specified, it overrides HttpRequest for the task. + Note that if the value is set to GET the body of the task will be ignored at execution time. + Possible values are: HTTP_METHOD_UNSPECIFIED, POST, GET, HEAD, PUT, DELETE, PATCH, OPTIONS. + http_target.oauth_token: |- + - + (Optional) + If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + Structure is documented below. + http_target.oidc_token: |- + - + (Optional) + If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + Structure is documented below. + http_target.uri_override: |- + - + (Optional) + URI override. + When specified, overrides the execution URI for all the tasks in the queue. + Structure is documented below. id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/queues/{{name}}' location: |- - @@ -23415,9 +25937,38 @@ resources: - (Optional) The queue name. + oauth_token.scope: |- + - + (Optional) + OAuth scope to be used for generating OAuth access token. + If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + oauth_token.service_account_email: |- + - + (Required) + Service account email to be used for generating OAuth token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + oidc_token.audience: |- + - + (Optional) + Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + oidc_token.service_account_email: |- + - + (Required) + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + path_override.path: |- + - + (Optional) + The URI path (e.g., /users/1234). Default is an empty string. project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + query_override.query_params: |- + - + (Optional) + The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. rate_limits: |- - (Optional) @@ -23498,6 +26049,47 @@ resources: This field may contain any value between 0.0 and 1.0, inclusive. 0.0 is the default and means that no operations are logged. update: '- Default is 20 minutes.' + uri_override.host: |- + - + (Optional) + Host override. + When specified, replaces the host part of the task URL. + For example, if the task URL is "https://www.google.com", and host value + is set to "example.net", the overridden URI will be changed to "https://example.net". + Host value cannot be an empty string (INVALID_ARGUMENT). + uri_override.path_override: |- + - + (Optional) + URI path. + When specified, replaces the existing path of the task URL. + Setting the path value to an empty string clears the URI path segment. + Structure is documented below. + uri_override.port: |- + - + (Optional) + Port override. + When specified, replaces the port part of the task URI. + For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + Note that the port value must be a positive integer. + Setting the port to 0 (Zero) clears the URI port. + uri_override.query_override: |- + - + (Optional) + URI query. + When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + Structure is documented below. + uri_override.scheme: |- + - + (Optional) + Scheme override. + When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + Possible values are: HTTP, HTTPS. + uri_override.uri_override_enforce_mode: |- + - + (Optional) + URI Override Enforce Mode + When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + Possible values are: ALWAYS, IF_NOT_EXISTS. importStatements: [] google_cloud_tasks_queue_iam_policy: subCategory: Cloud Tasks @@ -25421,11 +28013,11 @@ resources: worker_config.disk_size_gb: |- - (Optional) - Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If 0 is specified, Cloud Build will use a standard disk size. + Size of the disk attached to the worker, in GB. See diskSizeGb. Specify a value of up to 1000. If 0 is specified, Cloud Build will use a standard disk size. worker_config.machine_type: |- - (Optional) - Machine type of a worker, such as n1-standard-1. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use n1-standard-1. + Machine type of a worker, such as n1-standard-1. See machineType. If left blank, Cloud Build will use n1-standard-1. worker_config.no_external_ip: |- - (Optional) @@ -26839,6 +29431,10 @@ resources: - (Optional) Optional. The label to use when selecting Pods for the Deployment and Service resources. This label must already be present in both resources. + gateway_service_mesh.route_destinations: |- + - + (Optional) + Optional. Route destinations allow configuring the Gateway API HTTPRoute to be deployed to additional clusters. This option is available for multi-cluster service mesh set ups that require the route to exist in the clusters that call the service. If unspecified, the HTTPRoute will only be deployed to the Target cluster. gateway_service_mesh.route_update_wait_time: |- - (Optional) @@ -26924,6 +29520,14 @@ resources: - (Optional) Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job. + route_destinations.destination_ids: |- + - + (Required) + Required. The clusters where the Gateway API HTTPRoute resource will be deployed to. Valid entries include the associated entities IDs configured in the Target resource and "@self" to include the Target cluster. + route_destinations.propagate_service: |- + - + (Optional) + Optional. Whether to propagate the Kubernetes Service to the route destination clusters. The Service will always be deployed to the Target cluster even if the HTTPRoute is not. This option may be used to facilitiate successful DNS lookup in the route destination clusters. Can only be set to true if destinations are specified. runtime_config.cloud_run: |- - (Optional) @@ -27155,6 +29759,26 @@ resources: - (Optional) Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is projects/{project}/locations/{location}/memberships/{membership_name}. + anthos_clusters.membership: |- + - + (Optional) + Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is projects/{project}/locations/{location}/memberships/{membership_name}. + associated_entities: |- + - + (Optional) + Optional. Map of entity IDs to their associated entities. Associated entities allows specifying places other than the deployment target for specific features. For example, the Gateway API canary can be configured to deploy the HTTPRoute to a different cluster(s) than the deployment cluster using associated entities. An entity ID must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: ^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$. + associated_entities.anthos_clusters: |- + - + (Optional) + Optional. Information specifying Anthos clusters as associated entities. + associated_entities.entity_id: |- + - + (Required) + The name for the key in the map for which this object is mapped to in the API + associated_entities.gke_clusters: |- + - + (Optional) + Optional. Information specifying GKE clusters as associated entities. create: '- Default is 20 minutes.' create_time: |- - @@ -27229,6 +29853,18 @@ resources: - (Optional) Optional. If set, used to configure a proxy to the Kubernetes server. + gke_clusters.cluster: |- + - + (Optional) + Optional. Information specifying a GKE Cluster. Format is projects/{project_id}/locations/{location_id}/clusters/{cluster_id}. + gke_clusters.internal_ip: |- + - + (Optional) + Optional. If true, cluster is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when cluster is a private GKE cluster. + gke_clusters.proxy_url: |- + - + (Optional) + Optional. If set, used to configure a proxy to the Kubernetes server. id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/targets/{{name}}' labels: |- - @@ -29131,7 +31767,7 @@ resources: secret_environment_variables.project_id: |- - (Required) - Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. + Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. secret_environment_variables.secret: |- - (Required) @@ -29147,7 +31783,7 @@ resources: secret_volumes.project_id: |- - (Required) - Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. + Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. secret_volumes.secret: |- - (Required) @@ -29605,7 +32241,7 @@ resources: { "config": [ { - "enable_private_ip_environment": true + "enable_private_environment": true } ], "name": "example-environment", @@ -29642,6 +32278,34 @@ resources: "name": "example-environment", "region": "us-central1" } + - name: example + manifest: |- + { + "config": [ + { + "node_config": [ + { + "composer_network_attachment": "${google_compute_network_attachment.example.id}" + } + ] + } + ], + "name": "example-environment", + "region": "us-central1" + } + references: + config.node_config.composer_network_attachment: google_compute_network_attachment.example.id + dependencies: + google_compute_network_attachment.example: |- + { + "lifecycle": [ + { + "ignore_changes": [ + "${producer_accept_lists}" + ] + } + ] + } - name: test manifest: |- { @@ -29667,11 +32331,6 @@ resources: - (Required) cidr_block must be specified in CIDR notation. - cidr_blocks.data_retention_config: |- - - - (Optional, Cloud Composer 2.0.23 or newer only) - Configuration setting for airflow data rentention mechanism. Structure is - documented below. cidr_blocks.display_name: |- - (Optional) @@ -29700,8 +32359,8 @@ resources: The Kubernetes Engine cluster used to run this environment. config.data_retention_config: |- - - (Optional, Cloud Composer 2.0.23 or later only) - Configuration setting for Airflow database retention mechanism. Structure is + (Optional, Cloud Composer 2.0.23 or newer only) + Configuration setting for airflow data rentention mechanism. Structure is documented below. config.database_config: |- - @@ -29710,12 +32369,12 @@ resources: by Apache Airflow software. config.enable_private_builds_only: |- - - (Optional, Beta, Cloud Composer 3 only) + (Optional, Cloud Composer 3 only) If true, builds performed during operations that install Python packages have only private connectivity to Google services. If false, the builds also have access to the internet. config.enable_private_environment: |- - - (Optional, Beta, Cloud Composer 3 only) + (Optional, Cloud Composer 3 only) If true, a private Composer environment will be created. config.encryption_config: |- - @@ -29781,7 +32440,7 @@ resources: (Optional) The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. - create: '- Default is 60 minutes.' + create: '- Default is 120 minutes.' dag_processor.count: |- - (Required) @@ -29802,6 +32461,14 @@ resources: (Optional) The configuration setting for Task Logs. Structure is documented below. + data_retention_config.task_logs_retention_config.master_authorized_networks_config: |- + - + (Optional) + Configuration options for the master authorized networks feature. Enabled + master authorized networks will disallow all external traffic to access + Kubernetes master through HTTPS except traffic from the given CIDR blocks, + Google Compute Engine Public IPs and Google Prod IPs. Structure is + documented below. data_retention_config.task_logs_retention_config.storage_mode: |- - (Optional) @@ -29817,7 +32484,7 @@ resources: (Optional) Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. - delete: '- Default is 6 minutes.' + delete: '- Default is 30 minutes.' effective_labels: |- - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. @@ -29905,12 +32572,12 @@ resources: Name of the environment node_config.composer_internal_ipv4_cidr_block: |- - - (Optional, Beta, Cloud Composer 3 only) + (Optional, Cloud Composer 3 only) /20 IPv4 cidr range that will be used by Composer internal components. Cannot be updated. node_config.composer_network_attachment: |- - - (Optional, Beta, Cloud Composer 3 only) + (Optional, Cloud Composer 3 only) PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment and point Cloud Composer environment to use. It is possible to share network attachment among many environments, provided enough IP addresses are available. @@ -30095,8 +32762,7 @@ resources: separated by a hyphen, for example "core-dags_are_paused_at_creation". software_config.cloud_data_lineage_integration: |- - - (Optional, Beta, - Cloud Composer environments in versions composer-2.1.2-airflow-..* and newer) + (Optional, Cloud Composer environments in versions composer-2.1.2-airflow-..* and newer) The configuration for Cloud Data Lineage integration. Structure is documented below. ? software_config.composer-(([0-9]+)(\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?)?|latest)-airflow-(([0-9]+)((\.[0-9]+)(\.[0-9]+)?)?(-build\.[0-9]+)?) @@ -30137,7 +32803,7 @@ resources: The number of schedulers for Airflow. software_config.web_server_plugins_mode: |- - - (Optional, Beta, Cloud Composer 3 only) + (Optional, Cloud Composer 3 only) Web server plugins configuration. Can be either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. storage_config: |- - @@ -30162,7 +32828,7 @@ resources: - (Required) The amount of memory (GB) for a single Airflow triggerer. - update: '- Default is 60 minutes.' + update: '- Default is 120 minutes.' web_server.cpu: |- - (Optional) @@ -30206,7 +32872,7 @@ resources: The amount of storage (GB) for a single Airflow worker. workloads_config.dag_processor: |- - - (Optional, Beta, Cloud Composer 3 only) + (Optional, Cloud Composer 3 only) Configuration for resources used by DAG processor. workloads_config.scheduler: |- - @@ -30239,12 +32905,10 @@ resources: }, "environment": "${google_composer_environment.environment.name}", "name": "test-config-map", - "provider": "${google-beta}", "region": "us-central1" } references: environment: google_composer_environment.environment.name - provider: google-beta dependencies: google_composer_environment.environment: |- { @@ -30258,7 +32922,6 @@ resources: } ], "name": "test-environment", - "provider": "${google-beta}", "region": "us-central1" } argumentDocs: @@ -30609,6 +33272,10 @@ resources: If the self_link is provided then zone and project are extracted from the self link. If only the name is used then zone and project must be defined as properties on the resource or provider. + interface: |- + - + (Optional) + The disk interface used for attaching this disk. mode: |- - (Optional) @@ -30990,7 +33657,7 @@ resources: scale_down_control.max_scaled_down_replicas: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. scale_down_control.time_window_sec: |- - @@ -31000,7 +33667,7 @@ resources: scale_in_control.max_scaled_in_replicas: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. scale_in_control.time_window_sec: |- - @@ -31404,6 +34071,7 @@ resources: { "iap": [ { + "enabled": true, "oauth2_client_id": "abc", "oauth2_client_secret": "xyz" } @@ -31604,7 +34272,15 @@ resources: "name": "backend-service", "outlier_detection": [ { - "consecutive_errors": 2 + "consecutive_errors": 2, + "consecutive_gateway_failure": 5, + "enforcing_consecutive_errors": 100, + "enforcing_consecutive_gateway_failure": 0, + "enforcing_success_rate": 100, + "max_ejection_percent": 10, + "success_rate_minimum_hosts": 5, + "success_rate_request_volume": 100, + "success_rate_stdev_factor": 1900 } ], "provider": "${google-beta}", @@ -31624,6 +34300,43 @@ resources: "name": "health-check", "provider": "${google-beta}" } + - name: default + manifest: |- + { + "health_checks": [ + "${google_compute_health_check.health_check.id}" + ], + "load_balancing_scheme": "EXTERNAL_MANAGED", + "locality_lb_policy": "RING_HASH", + "name": "backend-service", + "provider": "${google-beta}", + "session_affinity": "STRONG_COOKIE_AFFINITY", + "strong_session_affinity_cookie": [ + { + "name": "mycookie", + "ttl": [ + { + "nanos": 1111, + "seconds": 11 + } + ] + } + ] + } + references: + health_checks: google_compute_health_check.health_check.id + provider: google-beta + dependencies: + google_compute_health_check.health_check: |- + { + "http_health_check": [ + { + "port": 80 + } + ], + "name": "health-check", + "provider": "${google-beta}" + } - name: default manifest: |- { @@ -31683,6 +34396,13 @@ resources: ], "name": "health-check" } + - name: default + manifest: |- + { + "ip_address_selection_policy": "IPV6_ONLY", + "load_balancing_scheme": "EXTERNAL_MANAGED", + "name": "backend-service" + } argumentDocs: LEAST_REQUEST: |- : An O(1) algorithm which selects two random healthy @@ -31709,7 +34429,8 @@ resources: is selected in round robin order. WEIGHTED_MAGLEV: |- : Per-instance weighted Load Balancing via health check - reported weights. If set, the Backend Service must + reported weights. Only applicable to loadBalancingScheme + EXTERNAL. If set, the Backend Service must configure a non legacy HTTP-based Health Check, and health check replies are expected to contain non-standard HTTP response header field @@ -31720,7 +34441,7 @@ resources: instance either reported a valid weight or had UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. - This field is applicable to either: + locality_lb_policy is applicable to either: affinity_cookie_ttl_sec: |- - (Optional) @@ -31762,7 +34483,6 @@ resources: and CONNECTION (for TCP/SSL). See the Backend Services Overview for an explanation of load balancing modes. - From version 6.0.0 default value will be UTILIZATION to match default GCP value. Default value is UTILIZATION. Possible values are: UTILIZATION, RATE, CONNECTION. backend.capacity_scaler: |- @@ -32137,13 +34857,17 @@ resources: (Optional) Settings for enabling Cloud Identity Aware Proxy Structure is documented below. - iap.oauth2_client_id: |- + iap.enabled: |- - (Required) + Whether the serving infrastructure will authenticate and authorize all incoming requests. + iap.oauth2_client_id: |- + - + (Optional) OAuth2 Client ID for IAP iap.oauth2_client_secret: |- - - (Required) + (Optional) OAuth2 Client Secret for IAP Note: This property is sensitive and will not be displayed in the plan. iap.oauth2_client_secret_sha256: |- @@ -32163,6 +34887,11 @@ resources: (Required) Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. + ip_address_selection_policy: |- + - + (Optional) + Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. load_balancing_scheme: |- - (Optional) @@ -32241,8 +34970,6 @@ resources: Settings controlling eviction of unhealthy hosts from the load balancing pool. Applicable backend service types can be a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. - Default values are enforce by GCP without providing them. Structure is documented below. outlier_detection.base_ejection_time: |- - @@ -32386,7 +35113,25 @@ resources: (Optional) Type of session affinity to use. The default is NONE. Session affinity is not applicable if the protocol is UDP. - Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE. + Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, STRONG_COOKIE_AFFINITY. + strong_session_affinity_cookie: |- + - + (Optional) + Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + Structure is documented below. + strong_session_affinity_cookie.name: |- + - + (Optional) + Name of the cookie. + strong_session_affinity_cookie.path: |- + - + (Optional) + Path to set for the cookie. + strong_session_affinity_cookie.ttl: |- + - + (Optional) + Lifetime of the cookie. + Structure is documented below. timeout_sec: |- - (Optional) @@ -32634,7 +35379,7 @@ resources: async_primary_disk: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. async_primary_disk.disk: |- - @@ -32903,7 +35648,7 @@ resources: storage_pool: |- - (Optional) - The URL of the storage pool in which the new disk is created. + The URL or the name of the storage pool in which the new disk is created. For example: terraform_labels: |- - @@ -33174,6 +35919,15 @@ resources: Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. + interface.ipv6_address: |- + - + (Optional) + IPv6 address of the interface in the external VPN gateway. This IPv6 + address can be either from your on-premise gateway or another Cloud + provider's VPN gateway, it cannot be an IP address from Google Compute + Engine. Must specify an IPv6 address (not IPV4-mapped) using any format + described in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format + is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0). label_fingerprint: |- - The fingerprint used for optimistic locking of this resource. Used @@ -33283,7 +36037,7 @@ resources: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. allow.protocol: |- - @@ -33310,7 +36064,7 @@ resources: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. deny.protocol: |- - @@ -33495,7 +36249,7 @@ resources: importStatements: [] google_compute_firewall_policy_association: subCategory: Compute Engine - description: Applies a hierarchical firewall policy to a target resource + description: Allows associating hierarchical firewall policies with the target where they are applied. name: google_compute_firewall_policy_association title: "" examples: @@ -33503,19 +36257,25 @@ resources: manifest: |- { "attachment_target": "${google_folder.folder.name}", - "firewall_policy": "${google_compute_firewall_policy.default.id}", + "firewall_policy": "${google_compute_firewall_policy.policy.id}", "name": "my-association" } references: attachment_target: google_folder.folder.name - firewall_policy: google_compute_firewall_policy.default.id + firewall_policy: google_compute_firewall_policy.policy.id dependencies: - google_compute_firewall_policy.default: |- + google_compute_firewall_policy.policy: |- { "description": "Example Resource", - "parent": "organizations/12345", + "parent": "organizations/123456789", "short_name": "my-policy" } + google_folder.folder: |- + { + "deletion_protection": false, + "display_name": "my-folder", + "parent": "organizations/123456789" + } argumentDocs: attachment_target: |- - @@ -33526,7 +36286,7 @@ resources: firewall_policy: |- - (Required) - The firewall policy ID of the association. + The firewall policy of the resource. id: '- an identifier for the resource with format locations/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}' name: |- - @@ -33538,11 +36298,11 @@ resources: importStatements: [] google_compute_firewall_policy_rule: subCategory: Compute Engine - description: The Compute FirewallPolicyRule resource + description: Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). name: google_compute_firewall_policy_rule title: "" examples: - - name: primary + - name: policy_rule manifest: |- { "action": "allow", @@ -33600,7 +36360,8 @@ resources: } google_folder.folder: |- { - "display_name": "policy", + "deletion_protection": false, + "display_name": "folder", "parent": "organizations/123456789" } google_network_security_address_group.basic_global_networksecurity_address_group: |- @@ -33611,7 +36372,7 @@ resources: "208.80.154.224/32" ], "location": "global", - "name": "policy", + "name": "address", "parent": "organizations/123456789", "type": "IPV4" } @@ -33621,11 +36382,15 @@ resources: (Required) The Action to perform when the client connection triggers the rule. Valid actions are "allow", "deny", "goto_next" and "apply_security_profile_group". create: '- Default is 20 minutes.' + creation_timestamp: |- + - + Creation timestamp in RFC3339 text format. delete: '- Default is 20 minutes.' direction: |- - (Required) - The direction in which this rule applies. Possible values: INGRESS, EGRESS + The direction in which this rule applies. + Possible values are: INGRESS, EGRESS. firewall_policy: |- - (Required) @@ -33641,27 +36406,38 @@ resources: layer4_configs.disabled: |- - (Optional) - Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. + Denotes whether the firewall policy rule is disabled. + When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + If this is unspecified, the firewall policy rule will be enabled. layer4_configs.enable_logging: |- - (Optional) - Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + Logs may be exported to BigQuery or Pub/Sub. + Note: you cannot enable logging on "goto_next" rules. layer4_configs.ip_protocol: |- - (Required) - The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. + The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. layer4_configs.ports: |- - (Optional) - An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``. + An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. + Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. layer4_configs.security_profile_group: |- - (Optional) - A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. + A fully-qualified URL of a SecurityProfile resource instance. + Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. layer4_configs.target_resources: |- - (Optional) - A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. + A list of network resource URLs to which this rule applies. + This field allows you to control which network's VMs get this rule. + If this field is left blank, all VMs within the organization will receive the rule. layer4_configs.target_service_accounts: |- - (Optional) @@ -33669,64 +36445,475 @@ resources: layer4_configs.tls_inspect: |- - (Optional) - Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + Boolean flag indicating if the traffic should be TLS decrypted. + Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. match: |- - (Required) A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. match.dest_address_groups: |- - (Optional) - Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules. + Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. match.dest_fqdns: |- - (Optional) - Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress. + Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. match.dest_ip_ranges: |- - (Optional) - CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 256. + CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. match.dest_region_codes: |- - (Optional) - The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress. + Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. match.dest_threat_intelligences: |- - (Optional) - Name of the Google Cloud Threat Intelligence list. + Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. match.layer4_configs: |- - (Required) Pairs of IP protocols and ports that the rule should match. + Structure is documented below. match.src_address_groups: |- - (Optional) - Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules. + Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. match.src_fqdns: |- - (Optional) - Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress. + Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. match.src_ip_ranges: |- - (Optional) - CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 256. + CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. match.src_region_codes: |- - (Optional) - The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress. + Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. match.src_threat_intelligences: |- - (Optional) - Name of the Google Cloud Threat Intelligence list. + Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. priority: |- - (Required) - An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. + An integer indicating the priority of a rule in the list. + The priority must be a positive value between 0 and 2147483647. + Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. rule_tuple_count: |- - Calculation of the complexity of a single firewall policy rule. update: '- Default is 20 minutes.' importStatements: [] + google_compute_firewall_policy_with_rules: + subCategory: Compute Engine + description: The Compute FirewallPolicy with rules resource. + name: google_compute_firewall_policy_with_rules + title: "" + examples: + - name: firewall-policy-with-rules + manifest: |- + { + "description": "Terraform test", + "parent": "organizations/123456789", + "provider": "${google-beta}", + "rule": [ + { + "action": "allow", + "description": "tcp rule", + "direction": "EGRESS", + "enable_logging": true, + "match": [ + { + "dest_address_groups": [ + "${google_network_security_address_group.address_group_1.id}" + ], + "dest_fqdns": [ + "www.yyy.com", + "www.zzz.com" + ], + "dest_ip_ranges": [ + "11.100.0.1/32" + ], + "dest_region_codes": [ + "HK", + "IN" + ], + "dest_threat_intelligences": [ + "iplist-search-engines-crawlers", + "iplist-tor-exit-nodes" + ], + "layer4_config": [ + { + "ip_protocol": "tcp", + "ports": [ + 8080, + 7070 + ] + } + ] + } + ], + "priority": 1000, + "target_resources": [ + "https://www.googleapis.com/compute/beta/projects/${data.google_project.project.name}/global/networks/default" + ] + }, + { + "action": "deny", + "description": "udp rule", + "direction": "INGRESS", + "disabled": true, + "enable_logging": false, + "match": [ + { + "layer4_config": [ + { + "ip_protocol": "udp" + } + ], + "src_address_groups": [ + "${google_network_security_address_group.address_group_1.id}" + ], + "src_fqdns": [ + "www.abc.com", + "www.def.com" + ], + "src_ip_ranges": [ + "0.0.0.0/0" + ], + "src_region_codes": [ + "US", + "CA" + ], + "src_threat_intelligences": [ + "iplist-known-malicious-ips", + "iplist-public-clouds" + ] + } + ], + "priority": 2000 + }, + { + "action": "apply_security_profile_group", + "description": "security profile group rule", + "direction": "INGRESS", + "enable_logging": false, + "match": [ + { + "layer4_config": [ + { + "ip_protocol": "tcp" + } + ], + "src_ip_ranges": [ + "0.0.0.0/0" + ] + } + ], + "priority": 3000, + "rule_name": "tcp rule", + "security_profile_group": "//networksecurity.googleapis.com/${google_network_security_security_profile_group.security_profile_group_1.id}", + "target_service_accounts": [ + "test@google.com" + ], + "tls_inspect": true + } + ], + "short_name": "tf-fw-org-policy-with-rules" + } + references: + provider: google-beta + rule.match.dest_address_groups: google_network_security_address_group.address_group_1.id + rule.match.src_address_groups: google_network_security_address_group.address_group_1.id + dependencies: + google_network_security_address_group.address_group_1: |- + { + "capacity": 100, + "description": "Global address group", + "items": [ + "208.80.154.224/32" + ], + "location": "global", + "name": "tf-address-group", + "parent": "organizations/123456789", + "provider": "${google-beta}", + "type": "IPV4" + } + google_network_security_security_profile.security_profile_1: |- + { + "location": "global", + "name": "tf-security-profile", + "parent": "organizations/123456789", + "provider": "${google-beta}", + "type": "THREAT_PREVENTION" + } + google_network_security_security_profile_group.security_profile_group_1: |- + { + "description": "my description", + "name": "tf-security-profile-group", + "parent": "organizations/123456789", + "provider": "${google-beta}", + "threat_prevention_profile": "${google_network_security_security_profile.security_profile_1.id}" + } + argumentDocs: + create: '- Default is 20 minutes.' + creation_timestamp: |- + - + Creation timestamp in RFC3339 text format. + delete: '- Default is 20 minutes.' + fingerprint: |- + - + Fingerprint of the resource. This field is used internally during updates of this resource. + id: '- an identifier for the resource with format locations/global/firewallPolicies/{{policy_id}}' + layer4_config.description: |- + - + (Optional) + An optional description of this resource. + layer4_config.ip_protocol: |- + - + (Required) + The IP protocol to which this rule applies. The protocol + type is required when creating a firewall rule. + This value can either be one of the following well + known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), + or the IP protocol number. + layer4_config.ports: |- + - + (Optional) + An optional list of ports to which this rule applies. This field + is only applicable for UDP or TCP protocol. Each entry must be + either an integer or a range. If not specified, this rule + applies to connections through any port. + Example inputs include: ["22"], ["80","443"], and + ["12345-12349"]. + match.dest_address_groups: |- + - + (Optional) + Address groups which should be matched against the traffic destination. + Maximum number of destination address groups is 10. + match.dest_fqdns: |- + - + (Optional) + Fully Qualified Domain Name (FQDN) which should be matched against + traffic destination. Maximum number of destination fqdn allowed is 100. + match.dest_ip_ranges: |- + - + (Optional) + Destination IP address range in CIDR format. Required for + EGRESS rules. + match.dest_region_codes: |- + - + (Optional) + Region codes whose IP addresses will be used to match for destination + of traffic. Should be specified as 2 letter country code defined as per + ISO 3166 alpha-2 country codes. ex."US" + Maximum number of destination region codes allowed is 5000. + match.dest_threat_intelligences: |- + - + (Optional) + Names of Network Threat Intelligence lists. + The IPs in these lists will be matched against traffic destination. + match.layer4_config: |- + - + (Required) + Pairs of IP protocols and ports that the rule should match. + Structure is documented below. + match.src_address_groups: |- + - + (Optional) + Address groups which should be matched against the traffic source. + Maximum number of source address groups is 10. + match.src_fqdns: |- + - + (Optional) + Fully Qualified Domain Name (FQDN) which should be matched against + traffic source. Maximum number of source fqdn allowed is 100. + match.src_ip_ranges: |- + - + (Optional) + Source IP address range in CIDR format. Required for + INGRESS rules. + match.src_region_codes: |- + - + (Optional) + Region codes whose IP addresses will be used to match for source + of traffic. Should be specified as 2 letter country code defined as per + ISO 3166 alpha-2 country codes. ex."US" + Maximum number of source region codes allowed is 5000. + match.src_threat_intelligences: |- + - + (Optional) + Names of Network Threat Intelligence lists. + The IPs in these lists will be matched against traffic source. + parent: |- + - + (Required) + The parent of this FirewallPolicy in the Cloud Resource Hierarchy. + Format: organizations/{organization_id} or folders/{folder_id} + policy_id: |- + - + The unique identifier for the resource. This identifier is defined by the server. + predefined_rules: |- + - + A list of pre-define firewall policy rules. + Structure is documented below. + predefined_rules.action: |- + - + (Output) + The Action to perform when the client connection triggers the rule. Can currently be either + "allow", "deny", "apply_security_profile_group" or "goto_next". + predefined_rules.description: |- + - + (Output) + A description of the rule. + predefined_rules.direction: |- + - + (Output) + The direction in which this rule applies. If unspecified an INGRESS rule is created. + predefined_rules.disabled: |- + - + (Output) + Denotes whether the firewall policy rule is disabled. When set to true, + the firewall policy rule is not enforced and traffic behaves as if it did + not exist. If this is unspecified, the firewall policy rule will be + enabled. + predefined_rules.enable_logging: |- + - + (Output) + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the + configured export destination in Stackdriver. + predefined_rules.match: |- + - + (Output) + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. + predefined_rules.priority: |- + - + (Output) + An integer indicating the priority of a rule in the list. The priority must be a value + between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the + highest priority and 2147483647 is the lowest priority. + predefined_rules.rule_name: |- + - + (Output) + An optional name for the rule. This field is not a unique identifier + and can be updated. + predefined_rules.security_profile_group: |- + - + (Output) + A fully-qualified URL of a SecurityProfile resource instance. + Example: + https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action is 'apply_security_profile_group'. + predefined_rules.target_resources: |- + - + (Output) + A list of network resource URLs to which this rule applies. + This field allows you to control which network's VMs get + this rule. If this field is left blank, all VMs + within the organization will receive the rule. + predefined_rules.target_service_accounts: |- + - + (Output) + A list of service accounts indicating the sets of + instances that are applied with this rule. + predefined_rules.tls_inspect: |- + - + (Output) + Boolean flag indicating if the traffic should be TLS decrypted. + It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + rule: |- + - + (Required) + A list of firewall policy rules. + Structure is documented below. + rule.action: |- + - + (Required) + The Action to perform when the client connection triggers the rule. Can currently be either + "allow", "deny", "apply_security_profile_group" or "goto_next". + rule.description: |- + - + (Optional) + A description of the rule. + rule.direction: |- + - + (Optional) + The direction in which this rule applies. If unspecified an INGRESS rule is created. + Possible values are: INGRESS, EGRESS. + rule.disabled: |- + - + (Optional) + Denotes whether the firewall policy rule is disabled. When set to true, + the firewall policy rule is not enforced and traffic behaves as if it did + not exist. If this is unspecified, the firewall policy rule will be + enabled. + rule.enable_logging: |- + - + (Optional) + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the + configured export destination in Stackdriver. + rule.match: |- + - + (Required) + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. + rule.priority: |- + - + (Required) + An integer indicating the priority of a rule in the list. The priority must be a value + between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the + highest priority and 2147483647 is the lowest priority. + rule.rule_name: |- + - + (Optional) + An optional name for the rule. This field is not a unique identifier + and can be updated. + rule.security_profile_group: |- + - + (Optional) + A fully-qualified URL of a SecurityProfile resource instance. + Example: + https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action is 'apply_security_profile_group'. + rule.target_resources: |- + - + (Optional) + A list of network resource URLs to which this rule applies. + This field allows you to control which network's VMs get + this rule. If this field is left blank, all VMs + within the organization will receive the rule. + rule.target_service_accounts: |- + - + (Optional) + A list of service accounts indicating the sets of + instances that are applied with this rule. + rule.tls_inspect: |- + - + (Optional) + Boolean flag indicating if the traffic should be TLS decrypted. + It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + rule_tuple_count: |- + - + Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples. + self_link: |- + - + Server-defined URL for the resource. + self_link_with_id: |- + - + Server-defined URL for this resource with the resource id. + short_name: |- + - + (Required) + A textual name of the security policy. + update: '- Default is 20 minutes.' + importStatements: [] google_compute_forwarding_rule: subCategory: Compute Engine description: A ForwardingRule resource. @@ -36268,6 +39455,7 @@ resources: { "load_balancing_scheme": "EXTERNAL_MANAGED", "name": "global-rule", + "network_tier": "PREMIUM", "port_range": "80", "target": "${google_compute_target_http_proxy.default.id}" } @@ -36794,6 +39982,9 @@ resources: (Required) The value that the label must match. The value has a maximum length of 1024 characters. + forwarding_rule_id: |- + - + The unique identifier number for the resource. This identifier is defined by the server. id: '- an identifier for the resource with format projects/{{project}}/global/forwardingRules/{{name}}' ip_address: |- - @@ -36896,6 +40087,19 @@ resources: be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided. + network_tier: |- + - + (Optional) + This signifies the networking tier used for configuring + this load balancer and can only take the following values: + PREMIUM, STANDARD. + For regional ForwardingRule, the valid values are PREMIUM and + STANDARD. For GlobalForwardingRule, the valid value is + PREMIUM. + If this field is not specified, it is assumed to be PREMIUM. + If IPAddress is specified, this value must be equal to the + networkTier of the Address. + Possible values are: PREMIUM, STANDARD. no_automate_dns_zone: |- - (Optional) @@ -37449,6 +40653,57 @@ resources: } references: provider: google-beta + - name: http-health-check-with-source-regions + manifest: |- + { + "check_interval_sec": 30, + "http_health_check": [ + { + "port": 80, + "port_specification": "USE_FIXED_PORT" + } + ], + "name": "http-health-check", + "source_regions": [ + "us-west1", + "us-central1", + "us-east5" + ] + } + - name: https-health-check-with-source-regions + manifest: |- + { + "check_interval_sec": 30, + "https_health_check": [ + { + "port": 80, + "port_specification": "USE_FIXED_PORT" + } + ], + "name": "https-health-check", + "source_regions": [ + "us-west1", + "us-central1", + "us-east5" + ] + } + - name: tcp-health-check-with-source-regions + manifest: |- + { + "check_interval_sec": 30, + "name": "tcp-health-check", + "source_regions": [ + "us-west1", + "us-central1", + "us-east5" + ], + "tcp_health_check": [ + { + "port": 80, + "port_specification": "USE_FIXED_PORT" + } + ] + } argumentDocs: USE_FIXED_PORT: ': The port number in port is used for health checking.' USE_NAMED_PORT: ': The portName is used for health checking.' @@ -37478,7 +40733,7 @@ resources: grpc_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. grpc_health_check.grpc_service_name: |- - @@ -37509,7 +40764,7 @@ resources: http_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. http_health_check.host: |- - @@ -37553,7 +40808,7 @@ resources: http2_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. http2_health_check.host: |- - @@ -37597,7 +40852,7 @@ resources: https_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. https_health_check.host: |- - @@ -37675,7 +40930,7 @@ resources: ssl_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. ssl_health_check.port: |- - @@ -37715,7 +40970,7 @@ resources: tcp_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. tcp_health_check.port: |- - @@ -37936,43 +41191,73 @@ resources: manifest: |- { "name": "example-image", - "raw_disk": [ + "source_disk": "${google_compute_disk.persistent.id}" + } + references: + source_disk: google_compute_disk.persistent.id + dependencies: + google_compute_disk.persistent: |- { - "source": "https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz" + "image": "${data.google_compute_image.debian.self_link}", + "name": "example-disk", + "size": 10, + "type": "pd-ssd", + "zone": "us-central1-a" } - ] - } - name: example manifest: |- { "guest_os_features": [ { - "type": "SECURE_BOOT" + "type": "UEFI_COMPATIBLE" }, { - "type": "MULTI_IP_SUBNET" + "type": "VIRTIO_SCSI_MULTIQUEUE" + }, + { + "type": "GVNIC" + }, + { + "type": "SEV_CAPABLE" + }, + { + "type": "SEV_LIVE_MIGRATABLE_V2" } ], "name": "example-image", - "raw_disk": [ + "source_disk": "${google_compute_disk.persistent.id}" + } + references: + source_disk: google_compute_disk.persistent.id + dependencies: + google_compute_disk.persistent: |- { - "source": "https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz" + "image": "${data.google_compute_image.debian.self_link}", + "name": "example-disk", + "size": 10, + "type": "pd-ssd", + "zone": "us-central1-a" } - ] - } - name: example manifest: |- { "name": "example-sl-image", - "raw_disk": [ - { - "source": "https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz" - } - ], + "source_disk": "${google_compute_disk.persistent.id}", "storage_locations": [ "us-central1" ] } + references: + source_disk: google_compute_disk.persistent.id + dependencies: + google_compute_disk.persistent: |- + { + "image": "${data.google_compute_image.debian.self_link}", + "name": "example-disk", + "size": 10, + "type": "pd-ssd", + "zone": "us-central1-a" + } argumentDocs: archive_size_bytes: |- - @@ -38013,7 +41298,7 @@ resources: - (Required) The type of supported feature. Read Enabling guest operating system features to see a list of available options. - Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. + Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, IDPF, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. id: '- an identifier for the resource with format projects/{{project}}/global/images/{{name}}' image_encryption_key: |- - @@ -38246,9 +41531,12 @@ resources: See the docs for how to become verified as a domain owner. advanced_machine_features: (Optional) - Configure Nested Virtualisation and Simultaneous Hyper Threading on this VM. Structure is documented below - advanced_machine_features.enable_nested_virtualization: (Optional) Defines whether the instance should have nested virtualization enabled. Defaults to false. - advanced_machine_features.threads_per_core: (Optional) he number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. - advanced_machine_features.visible_core_count: (Optional) The number of physical cores to expose to an instance. visible cores info (VC). + advanced_machine_features.enable_nested_virtualization: '- (Optional) Defines whether the instance should have nested virtualization enabled. Defaults to false.' + advanced_machine_features.enable_uefi_networking: '- (Optional) Whether to enable UEFI networking for instance creation.' + advanced_machine_features.performance_monitoring_unit: '- (Optional) The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL.' + advanced_machine_features.threads_per_core: '- (Optional) The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1.' + advanced_machine_features.turbo_mode: '- (Optional) Turbo frequency mode to use for the instance. Supported modes are currently either ALL_CORE_MAX or unset (default).' + advanced_machine_features.visible_core_count: '- (Optional) The number of physical cores to expose to an instance. visible cores info (VC).' alias_ip_range.ip_cidr_range: |- - The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by @@ -38323,12 +41611,16 @@ resources: packets with non-matching source or destination IPs. This defaults to false. confidential_instance_config: (Optional) - Enable Confidential Mode on this VM. Structure is documented below - confidential_instance_config.confidential_instance_type: '(Optional) Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. TDX is only available in beta.' + confidential_instance_config.confidential_instance_type: '(Optional) Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM.' confidential_instance_config.enable_confidential_compute: (Optional) Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, on_host_maintenance can be set to MIGRATE if min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. cpu_platform: '- The CPU platform used by this instance.' create: '- Default is 20 minutes.' - current_status: '- The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see Instance life cycle.`,' - custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY_MB: ', e.g. custom-6-20480 for 6 vCPU and 20GB of RAM.' + creation_timestamp: '- Creation timestamp in RFC3339 text format.' + current_status: '- The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see Instance life cycle.' + custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY_MB: |- + , e.g. custom-6-20480 for 6 vCPU and 20GB of RAM. + Because of current API limitations some custom machine types may get converted to different machine types (such as an equivalent standard type) and cause non-empty plans in your configuration. Use + lifecycle.ignore_changes on machine_type in these cases. delete: '- Default is 20 minutes.' deletion_protection: |- - (Optional) Enable deletion protection on this instance. Defaults to false. @@ -38336,7 +41628,7 @@ resources: description: '- (Optional) A brief description of this resource.' desired_status: |- - (Optional) Desired status of the instance. Either - "RUNNING" or "TERMINATED". + "RUNNING", "SUSPENDED" or "TERMINATED". disk.0.disk_encryption_key_sha256: |- - The RFC 4648 base64 encoded SHA-256 hash of the [customer-supplied encryption key] @@ -38350,11 +41642,10 @@ resources: guest_accelerator: |- - (Optional) List of the type and count of accelerator cards attached to the instance. Structure documented below. Note: GPU accelerators can only be used with on_host_maintenance option set to TERMINATE. - Note: This field uses attr-as-block mode to avoid - breaking users during the 0.12 upgrade. To explicitly send a list - of zero objects you must use the following syntax: - example=[] - For more details about this behavior, see this section. + Note: As of 6.0.0, argument syntax + is no longer supported for this field in favor of block syntax. + To dynamically set a list of guest accelerators, use dynamic blocks. + To set an empty list, use a single guest_accelerator block with count = 0. guest_accelerator.count: (Required) - The number of the guest accelerator cards exposed to this instance. guest_accelerator.type: (Required) - The accelerator type resource to expose to this instance. E.g. nvidia-tesla-k80. hostname: |- @@ -38395,11 +41686,12 @@ resources: for an update of throughput every 4 hours. To update your hyperdisk more frequently, you'll need to manually delete and recreate it. initialize_params.resource_manager_tags: '- (Optional) A tag is a key-value pair that can be attached to a Google Cloud resource. You can use tags to conditionally allow or deny policies based on whether a resource has a specific tag. This value is not returned by the API. In Terraform, this value cannot be updated and changing it will recreate the resource.' + initialize_params.resource_policies: '- (Optional) A list of self_links of resource policies to attach to the instance''s boot disk. Modifying this list will cause the instance to recreate, so any external values are not set until the user specifies this field. Currently a max of 1 resource policy is supported.' initialize_params.size: |- - (Optional) The size of the image in gigabytes. If not specified, it will inherit the size of its base image. initialize_params.storage_pool: |- - - (Optional) The URL of the storage pool in which the new disk is created. + - (Optional) The URL or the name of the storage pool in which the new disk is created. For example: initialize_params.type: '- (Optional) The GCE disk type. Such as pd-standard, pd-balanced or pd-ssd.' instance_id: '- The server-assigned unique identifier of this instance.' @@ -38421,6 +41713,7 @@ resources: ipv6_access_type: |- - One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. + key_revocation_action_type: '- (optional) Action to be taken when a customer''s encryption key is revoked. Supports STOP and NONE, with NONE being the default.' label_fingerprint: '- The unique fingerprint of the labels.' labels: |- - (Optional) A map of key/value label pairs to assign to the instance. @@ -38472,7 +41765,7 @@ resources: is not accessible from the Internet. If omitted, ssh provisioners will not work unless Terraform can send traffic to the instance's network (e.g. via tunnel or because it is running on another cloud instance on that network). - This block can be repeated multiple times. Structure documented below. + This block can be specified once per network_interface. Structure documented below. network_interface.alias_ip_range: |- - (Optional) An array of alias IP ranges for this network interface. Can only be specified for network @@ -38489,7 +41782,7 @@ resources: network_interface.network_ip: |- - (Optional) The private IP address to assign to the instance. If empty, the address will be automatically assigned. - network_interface.nic_type: '- (Optional) The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET.' + network_interface.nic_type: '- (Optional) The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET, IDPF. In the beta provider the additional values of MRDMA and IRDMA are supported.' network_interface.queue_count: '- (Optional) The networking queue count that''s specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified.' network_interface.security_policy: '- (Optional) Beta A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.' network_interface.stack_type: '- (Optional) The stack type for this network interface to identify whether the IPv6 feature is enabled or not. Values are IPV4_IPV6 or IPV4_ONLY. If not specified, IPV4_ONLY will be used.' @@ -38503,7 +41796,7 @@ resources: in custom subnet mode, specifying the subnetwork is required. network_interface.subnetwork_project: |- - (Optional) The project in which the subnetwork belongs. - If the subnetwork is a self_link, this field is ignored in favor of the project + If the subnetwork is a self_link, this field is set to the project defined in the subnetwork self_link. If the subnetwork is a name and this field is not provided, the provider project is used. network_performance_config: |- @@ -38545,6 +41838,8 @@ resources: - (Optional) Specifies if the instance should be restarted if it was terminated by Compute Engine (not a user). Defaults to true. + scheduling.availability_domain: '- (Optional) Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance.' + scheduling.host_error_timeout_seconds: '- (Optional) Beta Specifies the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used.' scheduling.instance_termination_action: '- (Optional) Describe the type of termination action for VM. Can be STOP or DELETE. Read more on here' scheduling.local_ssd_recovery_timeout: |- - (Optional) (https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the maximum amount of time a Local Ssd Vm should wait while recovery of the Local Ssd state is attempted. Its value should be in between 0 and 168 hours with hour granularity and the default value being 1 hour. Structure is documented below. @@ -38946,7 +42241,6 @@ resources: { "base_instance_name": "tf-sr-igm-instance", "name": "tf-sr-igm", - "provider": "${google-beta}", "standby_policy": [ { "initial_delay_sec": 30, @@ -38965,7 +42259,6 @@ resources: "zone": "us-central1-a" } references: - provider: google-beta version.instance_template: google_compute_instance_template.sr-igm.self_link argumentDocs: all_instances_config: |- @@ -39026,7 +42319,7 @@ resources: - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. self_link: '- The URL of the created resource.' - standby_policy: '- (Optional Beta) The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the official documentation and API' + standby_policy: '- (Optional) The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the official documentation.' standby_policy.initial_delay_sec: '- (Optional) - Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.' standby_policy.mode: '- (Optional) - Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have full control over which VMs are stopped and suspended in the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes.' stateful.has_stateful_config: '- A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions.' @@ -39059,8 +42352,8 @@ resources: - (Optional), The number of instances (calculated as percentage) which are managed for this version. Conflicts with fixed. Note that when using percent, rounding will be in favor of explicitly set target_size values; a managed instance group with 2 instances and 2 versions, one of which has a target_size.percent of 60 will create 2 instances of that version. - target_stopped_size: '- (Optional Beta) The target number of stopped instances for this managed instance group.' - target_suspended_size: '- (Optional Beta) The target number of suspended instances for this managed instance group.' + target_stopped_size: '- (Optional) The target number of stopped instances for this managed instance group.' + target_suspended_size: '- (Optional) The target number of suspended instances for this managed instance group.' update: '- Default is 15 minutes.' update_policy: '- (Optional) The update policy for this managed instance group. Structure is documented below. For more information, see the official documentation and API.' update_policy.max_surge_fixed: '- (Optional), Specifies a fixed number of VM instances. This must be a positive integer. Conflicts with max_surge_percent. Both cannot be 0.' @@ -39077,7 +42370,7 @@ resources: version deals with a specific instance template, allowing canary release scenarios. Structure is documented below. version.instance_template: '- (Required) - The full URL to an instance template from which all new instances of this version will be created. It is recommended to reference instance templates through their unique id (self_link_unique attribute).' - version.name: '- (Required) - Version name.' + version.name: '- (Optional) - Version name.' version.target_size: '- (Optional) - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.' version_target.version_target: '- A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances'' target version are specified by version field on Instance Group Manager.' wait_for_instances: |- @@ -39187,7 +42480,7 @@ resources: } google_container_cluster.my_cluster: |- { - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "ip_allocation_policy": [ { @@ -39225,7 +42518,7 @@ resources: } google_container_cluster.my_cluster: |- { - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "ip_allocation_policy": [ { @@ -39550,9 +42843,12 @@ resources: this instance template. This field can take the following values: PREMIUM, STANDARD or FIXED_STANDARD. If this field is not specified, it is assumed to be PREMIUM. advanced_machine_features: (Optional) - Configure Nested Virtualisation and Simultaneous Hyper Threading on this VM. Structure is documented below - advanced_machine_features.enable_nested_virtualization: (Optional) Defines whether the instance should have nested virtualization enabled. Defaults to false. - advanced_machine_features.threads_per_core: (Optional) The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. - advanced_machine_features.visible_core_count: (Optional, Beta) The number of physical cores to expose to an instance. visible cores info (VC). + advanced_machine_features.enable_nested_virtualization: '- (Optional) Defines whether the instance should have nested virtualization enabled. Defaults to false.' + advanced_machine_features.enable_uefi_networking: '- (Optional) Whether to enable UEFI networking for instance creation.' + advanced_machine_features.performance_monitoring_unit: '- (Optional) The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL.' + advanced_machine_features.threads_per_core: '- (Optional) The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1.' + advanced_machine_features.turbo_mode: '- (Optional) Turbo frequency mode to use for the instance. Supported modes are currently either ALL_CORE_MAX or unset (default).' + advanced_machine_features.visible_core_count: '- (Optional) The number of physical cores to expose to an instance. visible cores info (VC).' alias_ip_range.ip_cidr_range: |- - The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by @@ -39567,9 +42863,10 @@ resources: - (Optional) Whether to allow sending and receiving of packets with non-matching source or destination IPs. This defaults to false. confidential_instance_config: (Optional) - Enable Confidential Mode on this VM. Structure is documented below - confidential_instance_config.confidential_instance_type: '(Optional) Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. TDX is only available in beta.' + confidential_instance_config.confidential_instance_type: '(Optional) Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM.' confidential_instance_config.enable_confidential_compute: (Optional) Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, on_host_maintenance can be set to MIGRATE if min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. create: '- Default is 4 minutes.' + creation_timestamp: '- Creation timestamp in RFC3339 text format.' custom-VCPUS-MEM_IN_MB: like custom-6-20480 for 6 vCPU and 20GB of RAM. delete: '- Default is 4 minutes.' description: '- (Optional) A brief description of this resource.' @@ -39658,6 +42955,7 @@ resources: ipv6_access_config.network_tier: |- - (Optional) The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM and STANDARD tier is valid for IPv6. + key_revocation_action_type: '- (optional) Action to be taken when a customer''s encryption key is revoked. Supports STOP and NONE, with NONE being the default.' labels: |- - (Optional) A set of key/value label pairs to assign to instances created from this template. @@ -39688,7 +42986,9 @@ resources: this blank, Terraform will auto-generate a unique name. name_prefix: |- - (Optional) Creates a unique name beginning with the specified - prefix. Conflicts with name. + prefix. Conflicts with name. Max length is 54 characters. + Prefixes with lengths longer than 37 characters will use a shortened + UUID that will be more prone to collisions. network_interface: |- - (Required) Networks to attach to instances created from this template. This can be specified multiple times for multiple networks. @@ -39699,7 +42999,7 @@ resources: is not accessible from the Internet (this means that ssh provisioners will not work unless you are running Terraform can send traffic to the instance's network (e.g. via tunnel or because it is running on another cloud instance - on that network). This block can be repeated multiple times. Structure documented below. + on that network). This block can be specified once per network_interface. Structure documented below. network_interface.alias_ip_range: |- - (Optional) An array of alias IP ranges for this network interface. Can only be specified for network @@ -39716,7 +43016,7 @@ resources: network_interface.network_ip: |- - (Optional) The private IP address to assign to the instance. If empty, the address will be automatically assigned. - network_interface.nic_type: '- (Optional) The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET.' + network_interface.nic_type: '- (Optional) The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. In the beta provider the additional values of MRDMA and IRDMA are supported.' network_interface.queue_count: '- (Optional) The networking queue count that''s specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified.' network_interface.stack_type: '- (Optional) The stack type for this network interface to identify whether the IPv6 feature is enabled or not. Values are IPV4_IPV6 or IPV4_ONLY. If not specified, IPV4_ONLY will be used.' network_interface.subnetwork: |- @@ -39767,6 +43067,8 @@ resources: - (Optional) Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). This defaults to true. + scheduling.availability_domain: '- (Optional) Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance.' + scheduling.host_error_timeout_seconds: '- (Optional) Beta Specifies the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used.' scheduling.instance_termination_action: '- (Optional) Describe the type of termination action for SPOT VM. Can be STOP or DELETE. Read more on here' scheduling.local_ssd_recovery_timeout: |- - (Optional) (https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the maximum amount of time a Local Ssd Vm should wait while recovery of the Local Ssd state is attempted. Its value should be in between 0 and 168 hours with hour granularity and the default value being 1 hour. Structure is documented below. @@ -39899,9 +43201,10 @@ resources: Creation timestamp in RFC3339 text format. customer_name: |- - - (Required) + (Optional) Customer name, to put in the Letter of Authorization as the party authorized to request a - crossconnect. + crossconnect. This field is required for Dedicated and Partner Interconnect, should not be specified + for cross-cloud interconnect. delete: '- Default is 20 minutes.' description: |- - @@ -39985,23 +43288,16 @@ resources: bundle, not the speed of the entire bundle. Can take one of the following values: location: |- - - (Required) + (Optional) URL of the InterconnectLocation object that represents where this connection is to be provisioned. + Specifies the location inside Google's Networks, should not be passed in case of cross-cloud interconnect. macsec: |- - (Optional) Configuration that enables Media Access Control security (MACsec) on the Cloud Interconnect connection between Google and your on-premises router. Structure is documented below. - macsec.pre_shared_keys: |- - - - (Required) - A keychain placeholder describing a set of named key objects along with their - start times. A MACsec CKN/CAK is generated for each key in the key chain. - Google router automatically picks the key with the most recent startTime when establishing - or re-establishing a MACsec secure link. - Structure is documented below. - macsec.pre_shared_keys.fail_open: |- + macsec.fail_open: |- - (Optional) If set to true, the Interconnect connection is configured with a should-secure @@ -40009,22 +43305,14 @@ resources: traffic if the MKA session cannot be established. By default, the Interconnect connection is configured with a must-secure security policy that drops all traffic if the MKA session cannot be established with your router. - macsec.pre_shared_keys.name: |- + macsec.pre_shared_keys: |- - (Required) - A name for this pre-shared key. The name must be 1-63 characters long, and - comply with RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character - must be a lowercase letter, and all following characters must be a dash, lowercase - letter, or digit, except the last character, which cannot be a dash. - macsec.pre_shared_keys.start_time: |- - - - (Optional) - A RFC3339 timestamp on or after which the key is valid. startTime can be in the - future. If the keychain has a single key, startTime can be omitted. If the keychain - has multiple keys, startTime is mandatory for each key. The start times of keys must - be in increasing order. The start times of two consecutive keys must be at least 6 - hours apart. + A keychain placeholder describing a set of named key objects along with their + start times. A MACsec CKN/CAK is generated for each key in the key chain. + Google router automatically picks the key with the most recent startTime when establishing + or re-establishing a MACsec secure link. + Structure is documented below. macsec_enabled: |- - (Optional) @@ -40054,6 +43342,31 @@ resources: IP address configured on the customer side of the Interconnect link. The customer should configure this IP address during turnup when prompted by Google NOC. This can be used only for ping tests. + pre_shared_keys.fail_open: |- + - + (Optional, Deprecated) + If set to true, the Interconnect connection is configured with a should-secure + MACsec security policy, that allows the Google router to fallback to cleartext + traffic if the MKA session cannot be established. By default, the Interconnect + connection is configured with a must-secure security policy that drops all traffic + if the MKA session cannot be established with your router. + pre_shared_keys.failOpen: is deprecated and will be removed in a future major release. Use other failOpen instead. + pre_shared_keys.name: |- + - + (Required) + A name for this pre-shared key. The name must be 1-63 characters long, and + comply with RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + pre_shared_keys.start_time: |- + - + (Optional) + A RFC3339 timestamp on or after which the key is valid. startTime can be in the + future. If the keychain has a single key, startTime can be omitted. If the keychain + has multiple keys, startTime is mandatory for each key. The start times of keys must + be in increasing order. The start times of two consecutive keys must be at least 6 + hours apart. project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -40068,11 +43381,12 @@ resources: requested_features: |- - (Optional) - interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if - available). - Each value may be one of: MACSEC. + available). Note that MACSEC is still technically allowed for compatibility reasons, but it + does not work with the API, and will be removed in an upcoming major version. + Each value may be one of: MACSEC, IF_MACSEC. requested_link_count: |- - (Required) @@ -40781,6 +44095,40 @@ resources: "network_firewall_policy_enforcement_order": "BEFORE_CLASSIC_FIREWALL", "project": "my-project-name" } + - name: vpc_network + manifest: |- + { + "name": "vpc-network", + "project": "my-project-name", + "provider": "${google-beta}", + "routing_mode": "GLOBAL" + } + references: + provider: google-beta + - name: vpc_network + manifest: |- + { + "bgp_best_path_selection_mode": "STANDARD", + "name": "vpc-network", + "project": "my-project-name", + "provider": "${google-beta}", + "routing_mode": "GLOBAL" + } + references: + provider: google-beta + - name: vpc_network + manifest: |- + { + "bgp_always_compare_med": true, + "bgp_best_path_selection_mode": "STANDARD", + "bgp_inter_region_cost": "ADD_COST_TO_MED", + "name": "vpc-network", + "project": "my-project-name", + "provider": "${google-beta}", + "routing_mode": "GLOBAL" + } + references: + provider: google-beta argumentDocs: auto_create_subnetworks: |- - @@ -40790,6 +44138,21 @@ resources: 10.128.0.0/9 address range. When set to false, the network is created in "custom subnet mode" so the user can explicitly connect subnetwork resources. + bgp_always_compare_med: |- + - + (Optional, Beta) + Enables/disables the comparison of MED across routes with different Neighbor ASNs. + This value can only be set if the --bgp-best-path-selection-mode is STANDARD + bgp_best_path_selection_mode: |- + - + (Optional, Beta) + The BGP best selection algorithm to be employed. MODE can be LEGACY or STANDARD. + Possible values are: LEGACY, STANDARD. + bgp_inter_region_cost: |- + - + (Optional, Beta) + Choice of the behavior of inter-regional cost and MED in the BPS algorithm. + Possible values are: DEFAULT, ADD_COST_TO_MED. create: '- Default is 20 minutes.' delete: '- Default is 20 minutes.' delete_default_routes_on_create: |- @@ -40842,8 +44205,18 @@ resources: Set the order that Firewall Rules and Firewall Policies are evaluated. Default value is AFTER_CLASSIC_FIREWALL. Possible values are: BEFORE_CLASSIC_FIREWALL, AFTER_CLASSIC_FIREWALL. + network_id: |- + - + The unique identifier for the resource. This identifier is defined by the server. + network_profile: |- + - + (Optional, Beta) + A full or partial URL of the network profile to apply to this network. + This field can be set only at resource creation time. For example, the + following are valid URLs: numeric_id: |- - + (Deprecated) The unique identifier for the resource. This identifier is defined by the server. project: |- - (Optional) The ID of the project in which the resource belongs. @@ -40903,6 +44276,7 @@ resources: google_project.accepted_producer_project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "prj-accepted", "org_id": "123456789", "project_id": "prj-accepted" @@ -40910,6 +44284,7 @@ resources: google_project.rejected_producer_project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "prj-rejected", "org_id": "123456789", "project_id": "prj-rejected" @@ -41409,7 +44784,7 @@ resources: argumentDocs: create: '- Default is 20 minutes.' delete: '- Default is 20 minutes.' - id: '- an identifier for the resource with format {{project}}/{{zone}}/{{network_endpoint_group}}/endpoints' + id: '- an identifier for the resource with format {{project}}/{{zone}}/{{network_endpoint_group}}' network_endpoint_group: |- - (Required) @@ -41501,26 +44876,27 @@ resources: name: google_compute_network_firewall_policy_association title: "" examples: - - name: primary + - name: default manifest: |- { "attachment_target": "${google_compute_network.network.id}", - "firewall_policy": "${google_compute_network_firewall_policy.network_firewall_policy.name}", - "name": "association", + "firewall_policy": "${google_compute_network_firewall_policy.policy.id}", + "name": "my-association", "project": "my-project-name" } references: attachment_target: google_compute_network.network.id - firewall_policy: google_compute_network_firewall_policy.network_firewall_policy.name + firewall_policy: google_compute_network_firewall_policy.policy.id dependencies: google_compute_network.network: |- { - "name": "network" + "auto_create_subnetworks": false, + "name": "my-network" } - google_compute_network_firewall_policy.network_firewall_policy: |- + google_compute_network_firewall_policy.policy: |- { "description": "Sample global network firewall policy", - "name": "policy", + "name": "my-policy", "project": "my-project-name" } argumentDocs: @@ -41533,23 +44909,22 @@ resources: firewall_policy: |- - (Required) - The firewall policy ID of the association. + The firewall policy of the resource. id: '- an identifier for the resource with format projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}' name: |- - (Required) The name for an association. project: |- - - - (Optional) - The project for the resource + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. short_name: |- - The short name of the firewall policy of the association. importStatements: [] google_compute_network_firewall_policy_rule: subCategory: Compute Engine - description: The Compute NetworkFirewallPolicyRule resource + description: Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). name: google_compute_network_firewall_policy_rule title: "" examples: @@ -41583,7 +44958,7 @@ resources: ], "src_secure_tags": [ { - "name": "tagValues/${google_tags_tag_value.basic_value.name}" + "name": "${google_tags_tag_value.basic_value.id}" } ], "src_threat_intelligences": [ @@ -41600,6 +44975,7 @@ resources: references: firewall_policy: google_compute_network_firewall_policy.basic_network_firewall_policy.name match.src_address_groups: google_network_security_address_group.basic_global_networksecurity_address_group.id + match.src_secure_tags.name: google_tags_tag_value.basic_value.id dependencies: google_compute_network.basic_network: |- { @@ -41619,7 +44995,7 @@ resources: "208.80.154.224/32" ], "location": "global", - "name": "policy", + "name": "address", "parent": "projects/my-project-name", "type": "IPV4" } @@ -41636,7 +45012,7 @@ resources: google_tags_tag_value.basic_value: |- { "description": "For valuename resources.", - "parent": "tagKeys/${google_tags_tag_key.basic_key.name}", + "parent": "${google_tags_tag_key.basic_key.id}", "short_name": "tagvalue" } argumentDocs: @@ -41645,11 +45021,15 @@ resources: (Required) The Action to perform when the client connection triggers the rule. Valid actions are "allow", "deny", "goto_next" and "apply_security_profile_group". create: '- Default is 20 minutes.' + creation_timestamp: |- + - + Creation timestamp in RFC3339 text format. delete: '- Default is 20 minutes.' direction: |- - (Required) - The direction in which this rule applies. Possible values: INGRESS, EGRESS + The direction in which this rule applies. + Possible values are: INGRESS, EGRESS. firewall_policy: |- - (Required) @@ -41658,123 +45038,624 @@ resources: kind: |- - Type of the resource. Always compute#firewallPolicyRule for firewall policy rules - layer4_configs.description: |- + layer4_configs.ip_protocol: |- + - + (Required) + The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. + layer4_configs.ports: |- - (Optional) - An optional description for this resource. - layer4_configs.disabled: |- + An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. + Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. + match: |- + - + (Required) + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. + match.dest_address_groups: |- - (Optional) - Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. - layer4_configs.enable_logging: |- + Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. + match.dest_fqdns: |- - (Optional) - Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. - layer4_configs.ip_protocol: |- + Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. + match.dest_ip_ranges: |- + - + (Optional) + CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. + match.dest_region_codes: |- + - + (Optional) + Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. + match.dest_threat_intelligences: |- + - + (Optional) + Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. + match.layer4_configs: |- - (Required) - The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. - layer4_configs.ports: |- + Pairs of IP protocols and ports that the rule should match. + Structure is documented below. + match.src_address_groups: |- - (Optional) - An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``. - layer4_configs.project: |- + Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. + match.src_fqdns: |- - (Optional) - The project for the resource - layer4_configs.rule_name: |- + Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. + match.src_ip_ranges: |- + - + (Optional) + CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. + match.src_region_codes: |- + - + (Optional) + Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. + match.src_secure_tags: |- + - + (Optional) + List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. + Structure is documented below. + match.src_threat_intelligences: |- + - + (Optional) + Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. + priority: |- + - + (Required) + An integer indicating the priority of a rule in the list. + The priority must be a positive value between 0 and 2147483647. + Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. + rule_tuple_count: |- + - + Calculation of the complexity of a single firewall policy rule. + src_secure_tags.description: |- + - + (Optional) + An optional description for this resource. + src_secure_tags.disabled: |- + - + (Optional) + Denotes whether the firewall policy rule is disabled. + When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + If this is unspecified, the firewall policy rule will be enabled. + src_secure_tags.enable_logging: |- + - + (Optional) + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + Logs may be exported to BigQuery or Pub/Sub. + Note: you cannot enable logging on "goto_next" rules. + src_secure_tags.name: |- + - + (Optional) + Name of the secure tag, created with TagManager's TagValue API. + src_secure_tags.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + src_secure_tags.rule_name: |- - (Optional) An optional name for the rule. This field is not a unique identifier and can be updated. - layer4_configs.security_profile_group: |- + src_secure_tags.security_profile_group: |- - (Optional) - A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. - layer4_configs.target_secure_tags: |- + A fully-qualified URL of a SecurityProfile resource instance. + Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. + src_secure_tags.state: |- + - + (Output) + State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. + src_secure_tags.target_secure_tags: |- - (Optional) - A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. - layer4_configs.target_service_accounts: |- + A list of secure tags that controls which instances the firewall rule applies to. + If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored. + targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. + Structure is documented below. + src_secure_tags.target_service_accounts: |- - (Optional) A list of service accounts indicating the sets of instances that are applied with this rule. - layer4_configs.tls_inspect: |- + src_secure_tags.tls_inspect: |- - (Optional) - Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. - match: |- + Boolean flag indicating if the traffic should be TLS decrypted. + Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + target_secure_tags.name: |- + - + (Optional) + Name of the secure tag, created with TagManager's TagValue API. + target_secure_tags.state: |- + - + (Output) + State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. + update: '- Default is 20 minutes.' + importStatements: [] + google_compute_network_firewall_policy_with_rules: + subCategory: Compute Engine + description: The Compute NetworkFirewallPolicy with rules resource + name: google_compute_network_firewall_policy_with_rules + title: "" + examples: + - name: network-firewall-policy-with-rules + manifest: |- + { + "description": "Terraform test", + "name": "tf-fw-policy-with-rules", + "provider": "${google-beta}", + "rule": [ + { + "action": "allow", + "description": "tcp rule", + "direction": "EGRESS", + "enable_logging": true, + "match": [ + { + "dest_address_groups": [ + "${google_network_security_address_group.address_group_1.id}" + ], + "dest_fqdns": [ + "www.yyy.com", + "www.zzz.com" + ], + "dest_ip_ranges": [ + "11.100.0.1/32" + ], + "dest_region_codes": [ + "HK", + "IN" + ], + "dest_threat_intelligences": [ + "iplist-search-engines-crawlers", + "iplist-tor-exit-nodes" + ], + "layer4_config": [ + { + "ip_protocol": "tcp", + "ports": [ + 8080, + 7070 + ] + } + ] + } + ], + "priority": 1000, + "target_secure_tag": [ + { + "name": "${google_tags_tag_value.secure_tag_value_1.id}" + } + ] + }, + { + "action": "deny", + "description": "udp rule", + "direction": "INGRESS", + "disabled": true, + "enable_logging": false, + "match": [ + { + "layer4_config": [ + { + "ip_protocol": "udp" + } + ], + "src_address_groups": [ + "${google_network_security_address_group.address_group_1.id}" + ], + "src_fqdns": [ + "www.abc.com", + "www.def.com" + ], + "src_ip_ranges": [ + "0.0.0.0/0" + ], + "src_region_codes": [ + "US", + "CA" + ], + "src_secure_tag": [ + { + "name": "${google_tags_tag_value.secure_tag_value_1.id}" + } + ], + "src_threat_intelligences": [ + "iplist-known-malicious-ips", + "iplist-public-clouds" + ] + } + ], + "priority": 2000 + }, + { + "action": "apply_security_profile_group", + "description": "security profile group rule", + "direction": "INGRESS", + "enable_logging": false, + "match": [ + { + "layer4_config": [ + { + "ip_protocol": "tcp" + } + ], + "src_ip_ranges": [ + "0.0.0.0/0" + ] + } + ], + "priority": 3000, + "rule_name": "tcp rule", + "security_profile_group": "//networksecurity.googleapis.com/${google_network_security_security_profile_group.security_profile_group_1.id}", + "target_service_accounts": [ + "test@google.com" + ], + "tls_inspect": true + } + ] + } + references: + provider: google-beta + rule.match.dest_address_groups: google_network_security_address_group.address_group_1.id + rule.match.src_address_groups: google_network_security_address_group.address_group_1.id + rule.match.src_secure_tag.name: google_tags_tag_value.secure_tag_value_1.id + rule.target_secure_tag.name: google_tags_tag_value.secure_tag_value_1.id + dependencies: + google_network_security_address_group.address_group_1: |- + { + "capacity": 100, + "description": "Global address group", + "items": [ + "208.80.154.224/32" + ], + "location": "global", + "name": "tf-address-group", + "parent": "${data.google_project.project.id}", + "provider": "${google-beta}", + "type": "IPV4" + } + google_network_security_security_profile.security_profile_1: |- + { + "location": "global", + "name": "tf-security-profile", + "parent": "organizations/123456789", + "provider": "${google-beta}", + "type": "THREAT_PREVENTION" + } + google_network_security_security_profile_group.security_profile_group_1: |- + { + "description": "my description", + "name": "tf-security-profile-group", + "parent": "organizations/123456789", + "provider": "${google-beta}", + "threat_prevention_profile": "${google_network_security_security_profile.security_profile_1.id}" + } + google_tags_tag_key.secure_tag_key_1: |- + { + "description": "Tag key", + "parent": "${data.google_project.project.id}", + "provider": "${google-beta}", + "purpose": "GCE_FIREWALL", + "purpose_data": { + "network": "${data.google_project.project.name}/default" + }, + "short_name": "tf-tag-key" + } + google_tags_tag_value.secure_tag_value_1: |- + { + "description": "Tag value", + "parent": "${google_tags_tag_key.secure_tag_key_1.id}", + "provider": "${google-beta}", + "short_name": "tf-tag-value" + } + argumentDocs: + create: '- Default is 20 minutes.' + creation_timestamp: |- + - + Creation timestamp in RFC3339 text format. + delete: '- Default is 20 minutes.' + fingerprint: |- + - + Fingerprint of the resource. This field is used internally during updates of this resource. + id: '- an identifier for the resource with format projects/{{project}}/global/firewallPolicies/{{name}}' + layer4_config.ip_protocol: |- - (Required) - A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + The IP protocol to which this rule applies. The protocol + type is required when creating a firewall rule. + This value can either be one of the following well + known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), + or the IP protocol number. + layer4_config.ports: |- + - + (Optional) + An optional list of ports to which this rule applies. This field + is only applicable for UDP or TCP protocol. Each entry must be + either an integer or a range. If not specified, this rule + applies to connections through any port. + Example inputs include: ["22"], ["80","443"], and + ["12345-12349"]. match.dest_address_groups: |- - (Optional) - Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules. + Address groups which should be matched against the traffic destination. + Maximum number of destination address groups is 10. match.dest_fqdns: |- - (Optional) - Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress. + Fully Qualified Domain Name (FQDN) which should be matched against + traffic destination. Maximum number of destination fqdn allowed is 100. match.dest_ip_ranges: |- - (Optional) - CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. + Destination IP address range in CIDR format. Required for + EGRESS rules. match.dest_region_codes: |- - (Optional) - The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress. + Region codes whose IP addresses will be used to match for destination + of traffic. Should be specified as 2 letter country code defined as per + ISO 3166 alpha-2 country codes. ex."US" + Maximum number of destination region codes allowed is 5000. match.dest_threat_intelligences: |- - (Optional) - Name of the Google Cloud Threat Intelligence list. - match.layer4_configs: |- + Names of Network Threat Intelligence lists. + The IPs in these lists will be matched against traffic destination. + match.layer4_config: |- - (Required) Pairs of IP protocols and ports that the rule should match. + Structure is documented below. match.src_address_groups: |- - (Optional) - Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules. + Address groups which should be matched against the traffic source. + Maximum number of source address groups is 10. match.src_fqdns: |- - (Optional) - Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress. + Fully Qualified Domain Name (FQDN) which should be matched against + traffic source. Maximum number of source fqdn allowed is 100. match.src_ip_ranges: |- - (Optional) - CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. + Source IP address range in CIDR format. Required for + INGRESS rules. match.src_region_codes: |- - (Optional) - The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress. - match.src_secure_tags: |- + Region codes whose IP addresses will be used to match for source + of traffic. Should be specified as 2 letter country code defined as per + ISO 3166 alpha-2 country codes. ex."US" + Maximum number of source region codes allowed is 5000. + match.src_secure_tag: |- - (Optional) - List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. + List of secure tag values, which should be matched at the source + of the traffic. + For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, + and there is no srcIpRange, this rule will be ignored. + Maximum number of source tag values allowed is 256. + Structure is documented below. match.src_threat_intelligences: |- - (Optional) - Name of the Google Cloud Threat Intelligence list. - priority: |- + Names of Network Threat Intelligence lists. + The IPs in these lists will be matched against traffic source. + name: |- - (Required) - An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. - rule_tuple_count: |- + User-provided name of the Network firewall policy. + The name should be unique in the project in which the firewall policy is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression a-z? + which means the first character must be a lowercase letter, and all following characters must be a dash, + lowercase letter, or digit, except the last character, which cannot be a dash. + network_firewall_policy_id: |- - - Calculation of the complexity of a single firewall policy rule. - src_secure_tags.name: |- + The unique identifier for the resource. This identifier is defined by the server. + predefined_rules: |- + - + A list of firewall policy pre-defined rules. + Structure is documented below. + predefined_rules.action: |- + - + (Output) + The Action to perform when the client connection triggers the rule. Can currently be either + "allow", "deny", "apply_security_profile_group" or "goto_next". + predefined_rules.description: |- + - + (Output) + A description of the rule. + predefined_rules.direction: |- + - + (Output) + The direction in which this rule applies. If unspecified an INGRESS rule is created. + predefined_rules.disabled: |- + - + (Output) + Denotes whether the firewall policy rule is disabled. When set to true, + the firewall policy rule is not enforced and traffic behaves as if it did + not exist. If this is unspecified, the firewall policy rule will be + enabled. + predefined_rules.enable_logging: |- + - + (Output) + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the + configured export destination in Stackdriver. + predefined_rules.match: |- + - + (Output) + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. + predefined_rules.priority: |- + - + (Output) + An integer indicating the priority of a rule in the list. The priority must be a value + between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the + highest priority and 2147483647 is the lowest priority. + predefined_rules.rule_name: |- + - + (Output) + An optional name for the rule. This field is not a unique identifier + and can be updated. + predefined_rules.security_profile_group: |- + - + (Output) + A fully-qualified URL of a SecurityProfile resource instance. + Example: + https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action is 'apply_security_profile_group'. + predefined_rules.target_secure_tag: |- + - + (Output) + A list of secure tags that controls which instances the firewall rule + applies to. If targetSecureTag are specified, then the + firewall rule applies only to instances in the VPC network that have one + of those EFFECTIVE secure tags, if all the target_secure_tag are in + INEFFECTIVE state, then this rule will be ignored. + targetSecureTag may not be set at the same time as + targetServiceAccounts. + If neither targetServiceAccounts nor + targetSecureTag are specified, the firewall rule applies + to all instances on the specified network. + Maximum number of target label tags allowed is 256. + Structure is documented below. + predefined_rules.target_service_accounts: |- + - + (Output) + A list of service accounts indicating the sets of + instances that are applied with this rule. + predefined_rules.tls_inspect: |- + - + (Output) + Boolean flag indicating if the traffic should be TLS decrypted. + It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + rule: |- - (Required) - Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+ - src_secure_tags.state: |- + A list of firewall policy rules. + Structure is documented below. + rule.action: |- - - [Output Only] State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. - target_secure_tags.name: |- + (Required) + The Action to perform when the client connection triggers the rule. Can currently be either + "allow", "deny", "apply_security_profile_group" or "goto_next". + rule.description: |- + - + (Optional) + A description of the rule. + rule.direction: |- + - + (Optional) + The direction in which this rule applies. If unspecified an INGRESS rule is created. + Possible values are: INGRESS, EGRESS. + rule.disabled: |- + - + (Optional) + Denotes whether the firewall policy rule is disabled. When set to true, + the firewall policy rule is not enforced and traffic behaves as if it did + not exist. If this is unspecified, the firewall policy rule will be + enabled. + rule.enable_logging: |- + - + (Optional) + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the + configured export destination in Stackdriver. + rule.match: |- - (Required) - Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+ - target_secure_tags.state: |- + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. + rule.priority: |- + - + (Required) + An integer indicating the priority of a rule in the list. The priority must be a value + between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the + highest priority and 2147483647 is the lowest priority. + rule.rule_name: |- + - + (Optional) + An optional name for the rule. This field is not a unique identifier + and can be updated. + rule.security_profile_group: |- - - [Output Only] State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. + (Optional) + A fully-qualified URL of a SecurityProfile resource instance. + Example: + https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action is 'apply_security_profile_group'. + rule.target_secure_tag: |- + - + (Optional) + A list of secure tags that controls which instances the firewall rule + applies to. If targetSecureTag are specified, then the + firewall rule applies only to instances in the VPC network that have one + of those EFFECTIVE secure tags, if all the target_secure_tag are in + INEFFECTIVE state, then this rule will be ignored. + targetSecureTag may not be set at the same time as + targetServiceAccounts. + If neither targetServiceAccounts nor + targetSecureTag are specified, the firewall rule applies + to all instances on the specified network. + Maximum number of target label tags allowed is 256. + Structure is documented below. + rule.target_service_accounts: |- + - + (Optional) + A list of service accounts indicating the sets of + instances that are applied with this rule. + rule.tls_inspect: |- + - + (Optional) + Boolean flag indicating if the traffic should be TLS decrypted. + It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + rule_tuple_count: |- + - + Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples. + self_link: |- + - + Server-defined URL for the resource. + self_link_with_id: |- + - + Server-defined URL for this resource with the resource id. + src_secure_tag.name: |- + - + (Optional) + Name of the secure tag, created with TagManager's TagValue API. + @pattern tagValues/[0-9]+ + src_secure_tag.state: |- + - + (Output) + [Output Only] State of the secure tag, either EFFECTIVE or + INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted + or its network is deleted. + target_secure_tag.description: |- + - + (Optional) + An optional description of this resource. + target_secure_tag.name: |- + - + (Optional) + Name of the secure tag, created with TagManager's TagValue API. + @pattern tagValues/[0-9]+ + target_secure_tag.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + target_secure_tag.state: |- + - + (Output) + [Output Only] State of the secure tag, either EFFECTIVE or + INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted + or its network is deleted. update: '- Default is 20 minutes.' importStatements: [] google_compute_network_peering: @@ -41933,7 +45814,7 @@ resources: } google_container_cluster.private_cluster: |- { - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "ip_allocation_policy": [ { @@ -42091,6 +45972,7 @@ resources: } google_project.guest_project: |- { + "deletion_policy": "DELETE", "name": "project-name", "org_id": "123456789", "project_id": "project-id" @@ -42219,7 +46101,50 @@ resources: } ] } + - name: template + manifest: |- + { + "accelerators": [ + { + "accelerator_count": 4, + "accelerator_type": "nvidia-tesla-t4" + } + ], + "name": "soletenant-with-accelerators", + "node_type": "n1-node-96-624", + "region": "us-central1" + } + - name: template + manifest: |- + { + "disks": [ + { + "disk_count": 16, + "disk_size_gb": 375, + "disk_type": "local-ssd" + } + ], + "name": "soletenant-with-disks", + "node_type": "n2-node-80-640", + "region": "us-central1" + } argumentDocs: + accelerators: |- + - + (Optional) + List of the type and count of accelerator cards attached to the + node template + Structure is documented below. + accelerators.accelerator_count: |- + - + (Optional) + The number of the guest accelerator cards exposed to this + node template. + accelerators.accelerator_type: |- + - + (Optional) + Full or partial URL of the accelerator type resource to expose + to this node template. cpu_overcommit_type: |- - (Optional) @@ -42235,6 +46160,24 @@ resources: - (Optional) An optional textual description of the resource. + disks: |- + - + (Optional) + List of the type, size and count of disks attached to the + node template + Structure is documented below. + disks.disk_count: |- + - + (Optional) + Specifies the number of such disks. + disks.disk_size_gb: |- + - + (Optional) + Specifies the size of the disk in base-2 GB. + disks.disk_type: |- + - + (Optional) + Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL. id: '- an identifier for the resource with format projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}' name: |- - @@ -42409,6 +46352,7 @@ resources: } google_folder.security_policy_target: |- { + "deletion_protection": false, "display_name": "tf-test-secpol-", "parent": "organizations/123456789", "provider": "${google-beta}" @@ -43023,6 +46967,7 @@ resources: google_project.project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "your_project_id", "org_id": "123456789", "project_id": "your_project_id" @@ -43037,7 +46982,7 @@ resources: - (Required) Managed protection tier to be set. - Possible values are: CA_STANDARD, CA_ENTERPRISE_PAYGO. + Possible values are: CA_STANDARD, CA_ENTERPRISE_PAYGO, CA_ENTERPRISE_ANNUAL. create: '- Default is 20 minutes.' delete: '- Default is 20 minutes.' id: '- an identifier for the resource with format projects/{{project}}' @@ -43517,7 +47462,7 @@ resources: scale_down_control.max_scaled_down_replicas: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. scale_down_control.time_window_sec: |- - @@ -43527,7 +47472,7 @@ resources: scale_in_control.max_scaled_in_replicas: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. scale_in_control.time_window_sec: |- - @@ -43609,6 +47554,7 @@ resources: { "iap": [ { + "enabled": true, "oauth2_client_id": "abc", "oauth2_client_secret": "xyz" } @@ -43782,6 +47728,45 @@ resources: ], "name": "rbs-health-check" } + - name: default + manifest: |- + { + "health_checks": [ + "${google_compute_health_check.health_check.id}" + ], + "load_balancing_scheme": "INTERNAL_MANAGED", + "locality_lb_policy": "RING_HASH", + "name": "region-service", + "protocol": "HTTP", + "provider": "${google-beta}", + "region": "us-central1", + "session_affinity": "STRONG_COOKIE_AFFINITY", + "strong_session_affinity_cookie": [ + { + "name": "mycookie", + "ttl": [ + { + "nanos": 1111, + "seconds": 11 + } + ] + } + ] + } + references: + health_checks: google_compute_health_check.health_check.id + provider: google-beta + dependencies: + google_compute_health_check.health_check: |- + { + "http_health_check": [ + { + "port": 80 + } + ], + "name": "rbs-health-check", + "provider": "${google-beta}" + } - name: default manifest: |- { @@ -43900,6 +47885,31 @@ resources: } ] } + - name: default + manifest: |- + { + "health_checks": [ + "${google_compute_region_health_check.health_check.id}" + ], + "ip_address_selection_policy": "IPV6_ONLY", + "load_balancing_scheme": "EXTERNAL_MANAGED", + "name": "region-service", + "protocol": "HTTP", + "region": "us-central1" + } + references: + health_checks: google_compute_region_health_check.health_check.id + dependencies: + google_compute_region_health_check.health_check: |- + { + "name": "rbs-health-check", + "region": "us-central1", + "tcp_health_check": [ + { + "port": 80 + } + ] + } argumentDocs: LEAST_REQUEST: |- : An O(1) algorithm which selects two random healthy @@ -43926,7 +47936,8 @@ resources: is selected in round robin order. WEIGHTED_MAGLEV: |- : Per-instance weighted Load Balancing via health check - reported weights. If set, the Backend Service must + reported weights. Only applicable to loadBalancingScheme + EXTERNAL. If set, the Backend Service must configure a non legacy HTTP-based Health Check, and health check replies are expected to contain non-standard HTTP response header field @@ -43937,7 +47948,7 @@ resources: instance either reported a valid weight or had UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. - This field is applicable to either: + locality_lb_policy is applicable to either: affinity_cookie_ttl_sec: |- - (Optional) @@ -43957,8 +47968,7 @@ resources: Specifies the balancing mode for this backend. See the Backend Services Overview for an explanation of load balancing modes. - From version 6.0.0 default value will be UTILIZATION to match default GCP value. - Default value is CONNECTION. + Default value is UTILIZATION. Possible values are: UTILIZATION, RATE, CONNECTION. backend.capacity_scaler: |- - @@ -44219,7 +48229,6 @@ resources: (Optional) Time for which instance will be drained (not accept new connections, but still work to finish started). - From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value. connection_tracking_policy: |- - (Optional, Beta) @@ -44378,13 +48387,17 @@ resources: (Optional) Settings for enabling Cloud Identity Aware Proxy Structure is documented below. - iap.oauth2_client_id: |- + iap.enabled: |- - (Required) + Whether the serving infrastructure will authenticate and authorize all incoming requests. + iap.oauth2_client_id: |- + - + (Optional) OAuth2 Client ID for IAP iap.oauth2_client_secret: |- - - (Required) + (Optional) OAuth2 Client Secret for IAP Note: This property is sensitive and will not be displayed in the plan. iap.oauth2_client_secret_sha256: |- @@ -44404,6 +48417,11 @@ resources: (Required) Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. + ip_address_selection_policy: |- + - + (Optional) + Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. load_balancing_scheme: is set to INTERNAL_MANAGED locality_lb_policy: |- is set to MAGLEV or RING_HASH @@ -44456,8 +48474,6 @@ resources: Settings controlling eviction of unhealthy hosts from the load balancing pool. This field is applicable only when the load_balancing_scheme is set to INTERNAL_MANAGED and the protocol is set to HTTP, HTTPS, or HTTP2. - From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. - Default values are enforce by GCP without providing them. Structure is documented below. outlier_detection.base_ejection_time: |- - @@ -44560,7 +48576,25 @@ resources: (Optional) Type of session affinity to use. The default is NONE. Session affinity is not applicable if the protocol is UDP. - Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION. + Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION, STRONG_COOKIE_AFFINITY. + strong_session_affinity_cookie: |- + - + (Optional) + Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + Structure is documented below. + strong_session_affinity_cookie.name: |- + - + (Optional) + Name of the cookie. + strong_session_affinity_cookie.path: |- + - + (Optional) + Path to set for the cookie. + strong_session_affinity_cookie.ttl: |- + - + (Optional) + Lifetime of the cookie. + Structure is documented below. subsetting: |- - (Optional, Beta) @@ -44729,6 +48763,10 @@ resources: end_timestamp: |- - Commitment end time in RFC3339 text format. + existing_reservations: |- + - + (Optional) + Specifies the already existing reservations to attach to the Commitment. id: '- an identifier for the resource with format projects/{{project}}/regions/{{region}}/commitments/{{name}}' license_resource: |- - @@ -44907,7 +48945,7 @@ resources: async_primary_disk: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. async_primary_disk.disk: |- - @@ -45451,7 +49489,7 @@ resources: grpc_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. grpc_health_check.grpc_service_name: |- - @@ -45474,6 +49512,9 @@ resources: (Optional) Specifies how port is selected for health checking, can be one of the following values: + health_check_id: |- + - + The unique identifier number for the resource. This identifier is defined by the server. healthy_threshold: |- - (Optional) @@ -45482,7 +49523,7 @@ resources: http_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. http_health_check.host: |- - @@ -45526,7 +49567,7 @@ resources: http2_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. http2_health_check.host: |- - @@ -45570,7 +49611,7 @@ resources: https_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. https_health_check.host: |- - @@ -45644,7 +49685,7 @@ resources: ssl_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. ssl_health_check.port: |- - @@ -45684,7 +49725,7 @@ resources: tcp_health_check: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. tcp_health_check.port: |- - @@ -45832,7 +49873,6 @@ resources: { "base_instance_name": "tf-sr-igm-instance", "name": "tf-sr-igm", - "provider": "${google-beta}", "region": "us-central1", "standby_policy": [ { @@ -45851,7 +49891,6 @@ resources: ] } references: - provider: google-beta version.instance_template: google_compute_instance_template.sr-igm.self_link argumentDocs: all_instances_config: |- @@ -45889,6 +49928,11 @@ resources: group. You can specify one or more values. For more information, see the official documentation. fingerprint: '- The fingerprint of the instance group manager.' id: '- an identifier for the resource with format projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}}' + instance_flexibility_policy: '- (Optional) The flexibility policy for managed instance group. Instance flexibility allows managed instance group to create VMs from multiple types of machines. Instance flexibility configuration on managed instance group overrides instance template configuration. Structure is documented below.' + instance_flexibility_policy.instance_selections: '- (Optional), Named instance selections configuring properties that the group will use when creating new VMs. One can specify multiple instance selection to allow managed instance group to create VMs from multiple types of machines, based on preference and availability. Structure is documented below.' + instance_flexibility_policy.instance_selections.machine_types: '- (Required), A list of full machine-type names, e.g. "n1-standard-16".' + instance_flexibility_policy.instance_selections.name: '- (Required), Name of the instance selection, e.g. instance_selection_with_n1_machines_types. Instance selection names must be unique within the flexibility policy.' + instance_flexibility_policy.instance_selections.rank: '- (Optional), Preference of this instance selection. Lower number means higher preference. Managed instance group will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference.' instance_group: '- The full URL of the instance group created by the manager.' instance_lifecycle_policy.default_action_on_failure: '- (Optional), Default behavior for all instance or health check failures. Valid options are: REPAIR, DO_NOTHING. If DO_NOTHING then instances will not be repaired. If REPAIR (default), then failed instances will be repaired.' instance_lifecycle_policy.force_update_on_repair: '- (Optional), Specifies whether to apply the group''s latest configuration when repairing a VM. Valid options are: YES, NO. If YES and you updated the group''s instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If NO (default), then updates are applied in accordance with the group''s update policy type.' @@ -45917,7 +49961,7 @@ resources: is not provided, the provider project is used. region: '- (Optional) The region where the managed instance group resides. If not provided, the provider region is used.' self_link: '- The URL of the created resource.' - standby_policy: '- (Optional Beta) The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the official documentation and API' + standby_policy: '- (Optional) The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the official documentation.' standby_policy.initial_delay_sec: '- (Optional) - Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.' standby_policy.mode: '- (Optional) - Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have full control over which VMs are stopped and suspended in the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes.' stateful.has_stateful_config: '- A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions.' @@ -45949,8 +49993,8 @@ resources: - (Optional), The number of instances (calculated as percentage) which are managed for this version. Conflicts with fixed. Note that when using percent, rounding will be in favor of explicitly set target_size values; a managed instance group with 2 instances and 2 versions, one of which has a target_size.percent of 60 will create 2 instances of that version. - target_stopped_size: '- (Optional Beta) The target number of stopped instances for this managed instance group.' - target_suspended_size: '- (Optional Beta) The target number of suspended instances for this managed instance group.' + target_stopped_size: '- (Optional) The target number of stopped instances for this managed instance group.' + target_suspended_size: '- (Optional) The target number of suspended instances for this managed instance group.' update: '- Default is 15 minutes.' update_policy: '- (Optional) The update policy for this managed instance group. Structure is documented below. For more information, see the official documentation and API' update_policy.instance_redistribution_type: '- (Optional) - The instance redistribution policy for regional managed instance groups. Valid values are: "PROACTIVE", "NONE". If PROACTIVE (default), the group attempts to maintain an even distribution of VM instances across zones in the region. If NONE, proactive redistribution is disabled.' @@ -45968,7 +50012,7 @@ resources: version deals with a specific instance template, allowing canary release scenarios. Structure is documented below. version.instance_template: '- (Required) - The full URL to an instance template from which all new instances of this version will be created.' - version.name: '- (Required) - Version name.' + version.name: '- (Optional) - Version name.' version.target_size: '- (Optional) - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.' version_target.version_target: '- A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances'' target version are specified by version field on Instance Group Manager.' wait_for_instances: |- @@ -46180,9 +50224,12 @@ resources: this instance template. This field can take the following values: PREMIUM, STANDARD or FIXED_STANDARD. If this field is not specified, it is assumed to be PREMIUM. advanced_machine_features: (Optional) - Configure Nested Virtualisation and Simultaneous Hyper Threading on this VM. Structure is documented below - advanced_machine_features.enable_nested_virtualization: (Optional) Defines whether the instance should have nested virtualization enabled. Defaults to false. - advanced_machine_features.threads_per_core: (Optional) The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. - advanced_machine_features.visible_core_count: (Optional, ) The number of physical cores to expose to an instance. visible cores info (VC). + advanced_machine_features.enable_nested_virtualization: '- (Optional) Defines whether the instance should have nested virtualization enabled. Defaults to false.' + advanced_machine_features.enable_uefi_networking: '- (Optional) Whether to enable UEFI networking for instance creation.' + advanced_machine_features.performance_monitoring_unit: '- (Optional) The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL.' + advanced_machine_features.threads_per_core: '- (Optional) The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1.' + advanced_machine_features.turbo_mode: '- (Optional) Turbo frequency mode to use for the instance. Supported modes are currently either ALL_CORE_MAX or unset (default).' + advanced_machine_features.visible_core_count: '- (Optional) The number of physical cores to expose to an instance. visible cores info (VC).' alias_ip_range.ip_cidr_range: |- - The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by @@ -46197,9 +50244,10 @@ resources: - (Optional) Whether to allow sending and receiving of packets with non-matching source or destination IPs. This defaults to false. confidential_instance_config: (Optional) - Enable Confidential Mode on this VM. Structure is documented below - confidential_instance_config.confidential_instance_type: '(Optional) Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM. TDX is only available in beta.' + confidential_instance_config.confidential_instance_type: '(Optional) Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: SEV, SEV_SNP, TDX. on_host_maintenance can be set to MIGRATE if confidential_instance_type is set to SEV and min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will fail to create the VM.' confidential_instance_config.enable_confidential_compute: (Optional) Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, on_host_maintenance can be set to MIGRATE if min_cpu_platform is set to "AMD Milan". Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. create: '- Default is 4 minutes.' + creation_timestamp: '- Creation timestamp in RFC3339 text format.' custom-VCPUS-MEM_IN_MB: like custom-6-20480 for 6 vCPU and 20GB of RAM. delete: '- Default is 4 minutes.' description: '- (Optional) A brief description of this resource.' @@ -46288,6 +50336,7 @@ resources: ipv6_access_config.network_tier: |- - (Optional) The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM and STANDARD tier is valid for IPv6. + key_revocation_action_type: '- (optional) Action to be taken when a customer''s encryption key is revoked. Supports STOP and NONE, with NONE being the default.' labels: |- - (Optional) A set of key/value label pairs to assign to instances created from this template. @@ -46322,7 +50371,9 @@ resources: this blank, Terraform will auto-generate a unique name. name_prefix: |- - (Optional) Creates a unique name beginning with the specified - prefix. Conflicts with name. + prefix. Conflicts with name. Max length is 54 characters. + Prefixes with lengths longer than 37 characters will use a shortened + UUID that will be more prone to collisions. network_interface: |- - (Required) Networks to attach to instances created from this template. This can be specified multiple times for multiple networks. @@ -46333,7 +50384,7 @@ resources: is not accessible from the Internet (this means that ssh provisioners will not work unless you are running Terraform can send traffic to the instance's network (e.g. via tunnel or because it is running on another cloud instance - on that network). This block can be repeated multiple times. Structure documented below. + on that network). This block can be specified once per network_interface. Structure documented below. network_interface.alias_ip_range: |- - (Optional) An array of alias IP ranges for this network interface. Can only be specified for network @@ -46349,7 +50400,7 @@ resources: network_interface.network_ip: |- - (Optional) The private IP address to assign to the instance. If empty, the address will be automatically assigned. - network_interface.nic_type: '- (Optional) The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET.' + network_interface.nic_type: '- (Optional) The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. In the beta provider the additional values of MRDMA and IRDMA are supported.' network_interface.queue_count: '- (Optional) The networking queue count that''s specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified.' network_interface.stack_type: '- (Optional) The stack type for this network interface to identify whether the IPv6 feature is enabled or not. Values are IPV4_IPV6 or IPV4_ONLY. If not specified, IPV4_ONLY will be used.' network_interface.subnetwork: |- @@ -46396,6 +50447,8 @@ resources: - (Optional) Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). This defaults to true. + scheduling.availability_domain: '- (Optional) Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance.' + scheduling.host_error_timeout_seconds: '- (Optional) Beta Specifies the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used.' scheduling.instance_termination_action: '- (Optional) Describe the type of termination action for SPOT VM. Can be STOP or DELETE. Read more on here' scheduling.max_run_duration: '- (Optional) The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in instance_termination_action. Only support DELETE instance_termination_action at this point. Structure is documented below.' scheduling.node_affinities: |- @@ -46596,6 +50649,9 @@ resources: (Optional) IPv4 address external endpoint. This can only be specified when network_endpoint_type of the NEG is INTERNET_IP_PORT. + network_endpoint_id: |- + - + The unique identifier number for the resource. This identifier is defined by the server. port: |- - (Required) @@ -46773,13 +50829,14 @@ resources: } ], "runtime": "nodejs", - "service": "appengine-network-endpoint-group", + "service": "appengine-neg", "version_id": "v1" } google_storage_bucket.appengine_neg: |- { "location": "US", - "name": "appengine-neg" + "name": "appengine-neg", + "uniform_bucket_level_access": true } google_storage_bucket_object.appengine_neg: |- { @@ -46811,6 +50868,11 @@ resources: "name": "psc-neg", "network": "${google_compute_network.default.self_link}", "network_endpoint_type": "PRIVATE_SERVICE_CONNECT", + "psc_data": [ + { + "producer_port": "88" + } + ], "psc_target_service": "${google_compute_service_attachment.default.self_link}", "region": "europe-west4", "subnetwork": "${google_compute_subnetwork.default.self_link}" @@ -46822,11 +50884,15 @@ resources: dependencies: google_compute_forwarding_rule.default: |- { - "all_ports": true, "backend_service": "${google_compute_region_backend_service.default.id}", "load_balancing_scheme": "INTERNAL", "name": "psc-forwarding-rule", "network": "${google_compute_network.default.name}", + "ports": [ + "80", + "88", + "443" + ], "region": "europe-west4", "subnetwork": "${google_compute_subnetwork.default.name}" } @@ -47050,6 +51116,18 @@ resources: project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + psc_data: |- + - + (Optional) + This field is only used for PSC NEGs. + Structure is documented below. + psc_data.producer_port: |- + - + (Optional) + The PSC producer port to use when consumer PSC NEG connects to a producer. If + this flag isn't specified for a PSC NEG with endpoint type + private-service-connect, then PSC NEG will be connected to a first port in the + available PSC producer port range. psc_target_service: |- - (Optional) @@ -47153,27 +51231,28 @@ resources: name: google_compute_region_network_firewall_policy_association title: "" examples: - - name: primary + - name: default manifest: |- { - "attachment_target": "${google_compute_network.basic_network.id}", - "firewall_policy": "${google_compute_region_network_firewall_policy.basic_regional_network_firewall_policy.name}", - "name": "association", + "attachment_target": "${google_compute_network.network.id}", + "firewall_policy": "${google_compute_region_network_firewall_policy.policy.id}", + "name": "my-association", "project": "my-project-name", "region": "us-west1" } references: - attachment_target: google_compute_network.basic_network.id - firewall_policy: google_compute_region_network_firewall_policy.basic_regional_network_firewall_policy.name + attachment_target: google_compute_network.network.id + firewall_policy: google_compute_region_network_firewall_policy.policy.id dependencies: - google_compute_network.basic_network: |- + google_compute_network.network: |- { - "name": "network" + "auto_create_subnetworks": false, + "name": "my-network" } - google_compute_region_network_firewall_policy.basic_regional_network_firewall_policy: |- + google_compute_region_network_firewall_policy.policy: |- { "description": "Sample global network firewall policy", - "name": "policy", + "name": "my-policy", "project": "my-project-name", "region": "us-west1" } @@ -47187,16 +51266,15 @@ resources: firewall_policy: |- - (Required) - The firewall policy ID of the association. + The firewall policy of the resource. id: '- an identifier for the resource with format projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/associations/{{name}}' name: |- - (Required) The name for an association. project: |- - - - (Optional) - The project for the resource + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. region: |- - (Optional) @@ -47207,7 +51285,7 @@ resources: importStatements: [] google_compute_region_network_firewall_policy_rule: subCategory: Compute Engine - description: The Compute NetworkFirewallPolicyRule resource + description: Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). name: google_compute_region_network_firewall_policy_rule title: "" examples: @@ -47241,7 +51319,7 @@ resources: ], "src_secure_tags": [ { - "name": "tagValues/${google_tags_tag_value.basic_value.name}" + "name": "${google_tags_tag_value.basic_value.id}" } ], "src_threat_intelligences": [ @@ -47259,6 +51337,7 @@ resources: references: firewall_policy: google_compute_region_network_firewall_policy.basic_regional_network_firewall_policy.name match.src_address_groups: google_network_security_address_group.basic_regional_networksecurity_address_group.id + match.src_secure_tags.name: google_tags_tag_value.basic_value.id dependencies: google_compute_network.basic_network: |- { @@ -47279,7 +51358,7 @@ resources: "208.80.154.224/32" ], "location": "us-west1", - "name": "policy", + "name": "address", "parent": "projects/my-project-name", "type": "IPV4" } @@ -47296,7 +51375,7 @@ resources: google_tags_tag_value.basic_value: |- { "description": "For valuename resources.", - "parent": "tagKeys/${google_tags_tag_key.basic_key.name}", + "parent": "${google_tags_tag_key.basic_key.id}", "short_name": "tagvalue" } argumentDocs: @@ -47305,11 +51384,15 @@ resources: (Required) The Action to perform when the client connection triggers the rule. Valid actions are "allow", "deny", "goto_next" and "apply_security_profile_group". create: '- Default is 20 minutes.' + creation_timestamp: |- + - + Creation timestamp in RFC3339 text format. delete: '- Default is 20 minutes.' direction: |- - (Required) - The direction in which this rule applies. Possible values: INGRESS, EGRESS + The direction in which this rule applies. + Possible values are: INGRESS, EGRESS. firewall_policy: |- - (Required) @@ -47318,127 +51401,594 @@ resources: kind: |- - Type of the resource. Always compute#firewallPolicyRule for firewall policy rules - layer4_configs.description: |- + layer4_configs.ip_protocol: |- + - + (Required) + The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. + layer4_configs.ports: |- - (Optional) - An optional description for this resource. - layer4_configs.disabled: |- + An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. + Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. + match: |- + - + (Required) + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. + match.dest_address_groups: |- - (Optional) - Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. - layer4_configs.enable_logging: |- + Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. + match.dest_fqdns: |- - (Optional) - Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. - layer4_configs.ip_protocol: |- + Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. + match.dest_ip_ranges: |- + - + (Optional) + CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. + match.dest_region_codes: |- + - + (Optional) + Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. + match.dest_threat_intelligences: |- + - + (Optional) + Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. + match.layer4_configs: |- - (Required) - The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. - layer4_configs.ports: |- + Pairs of IP protocols and ports that the rule should match. + Structure is documented below. + match.src_address_groups: |- - (Optional) - An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``. - layer4_configs.project: |- + Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. + match.src_fqdns: |- - (Optional) - The project for the resource - layer4_configs.region: |- + Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. + match.src_ip_ranges: |- + - + (Optional) + CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. + match.src_region_codes: |- + - + (Optional) + Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. + match.src_secure_tags: |- + - + (Optional) + List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. + Structure is documented below. + match.src_threat_intelligences: |- + - + (Optional) + Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. + priority: |- + - + (Required) + An integer indicating the priority of a rule in the list. + The priority must be a positive value between 0 and 2147483647. + Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. + rule_tuple_count: |- + - + Calculation of the complexity of a single firewall policy rule. + src_secure_tags.description: |- + - + (Optional) + An optional description for this resource. + src_secure_tags.disabled: |- + - + (Optional) + Denotes whether the firewall policy rule is disabled. + When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + If this is unspecified, the firewall policy rule will be enabled. + src_secure_tags.enable_logging: |- + - + (Optional) + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + Logs may be exported to BigQuery or Pub/Sub. + Note: you cannot enable logging on "goto_next" rules. + src_secure_tags.name: |- + - + (Optional) + Name of the secure tag, created with TagManager's TagValue API. + src_secure_tags.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + src_secure_tags.region: |- - (Optional) The location of this resource. - layer4_configs.rule_name: |- + src_secure_tags.rule_name: |- - (Optional) An optional name for the rule. This field is not a unique identifier and can be updated. - layer4_configs.security_profile_group: |- + src_secure_tags.security_profile_group: |- - (Optional) - A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. - layer4_configs.target_secure_tags: |- + A fully-qualified URL of a SecurityProfile resource instance. + Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. + Security Profile Group and Firewall Policy Rule must be in the same scope. + src_secure_tags.state: |- + - + (Output) + State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. + src_secure_tags.target_secure_tags: |- - (Optional) - A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. - layer4_configs.target_service_accounts: |- + A list of secure tags that controls which instances the firewall rule applies to. + If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored. + targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. + Structure is documented below. + src_secure_tags.target_service_accounts: |- - (Optional) A list of service accounts indicating the sets of instances that are applied with this rule. - layer4_configs.tls_inspect: |- + src_secure_tags.tls_inspect: |- - (Optional) - Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. - match: |- + Boolean flag indicating if the traffic should be TLS decrypted. + Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + target_secure_tags.name: |- + - + (Optional) + Name of the secure tag, created with TagManager's TagValue API. + target_secure_tags.state: |- + - + (Output) + State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. + update: '- Default is 20 minutes.' + importStatements: [] + google_compute_region_network_firewall_policy_with_rules: + subCategory: Compute Engine + description: The Compute NetworkFirewallPolicy with rules resource + name: google_compute_region_network_firewall_policy_with_rules + title: "" + examples: + - name: region-network-firewall-policy-with-rules + manifest: |- + { + "description": "Terraform test", + "name": "tf-region-fw-policy-with-rules", + "provider": "${google-beta}", + "region": "us-west2", + "rule": [ + { + "action": "allow", + "description": "tcp rule", + "direction": "EGRESS", + "enable_logging": true, + "match": [ + { + "dest_address_groups": [ + "${google_network_security_address_group.address_group_1.id}" + ], + "dest_fqdns": [ + "www.yyy.com", + "www.zzz.com" + ], + "dest_ip_ranges": [ + "11.100.0.1/32" + ], + "dest_region_codes": [ + "HK", + "IN" + ], + "dest_threat_intelligences": [ + "iplist-search-engines-crawlers", + "iplist-tor-exit-nodes" + ], + "layer4_config": [ + { + "ip_protocol": "tcp", + "ports": [ + 8080, + 7070 + ] + } + ] + } + ], + "priority": 1000, + "target_secure_tag": [ + { + "name": "${google_tags_tag_value.secure_tag_value_1.id}" + } + ] + }, + { + "action": "deny", + "description": "udp rule", + "direction": "INGRESS", + "disabled": true, + "enable_logging": false, + "match": [ + { + "layer4_config": [ + { + "ip_protocol": "udp" + } + ], + "src_address_groups": [ + "${google_network_security_address_group.address_group_1.id}" + ], + "src_fqdns": [ + "www.abc.com", + "www.def.com" + ], + "src_ip_ranges": [ + "0.0.0.0/0" + ], + "src_region_codes": [ + "US", + "CA" + ], + "src_secure_tag": [ + { + "name": "${google_tags_tag_value.secure_tag_value_1.id}" + } + ], + "src_threat_intelligences": [ + "iplist-known-malicious-ips", + "iplist-public-clouds" + ] + } + ], + "priority": 2000, + "rule_name": "test-rule" + } + ] + } + references: + provider: google-beta + rule.match.dest_address_groups: google_network_security_address_group.address_group_1.id + rule.match.src_address_groups: google_network_security_address_group.address_group_1.id + rule.match.src_secure_tag.name: google_tags_tag_value.secure_tag_value_1.id + rule.target_secure_tag.name: google_tags_tag_value.secure_tag_value_1.id + dependencies: + google_network_security_address_group.address_group_1: |- + { + "capacity": 100, + "description": "Regional address group", + "items": [ + "208.80.154.224/32" + ], + "location": "us-west2", + "name": "tf-address-group", + "parent": "${data.google_project.project.id}", + "provider": "${google-beta}", + "type": "IPV4" + } + google_tags_tag_key.secure_tag_key_1: |- + { + "description": "Tag key", + "parent": "${data.google_project.project.id}", + "provider": "${google-beta}", + "purpose": "GCE_FIREWALL", + "purpose_data": { + "network": "${data.google_project.project.name}/default" + }, + "short_name": "tf-tag-key" + } + google_tags_tag_value.secure_tag_value_1: |- + { + "description": "Tag value", + "parent": "${google_tags_tag_key.secure_tag_key_1.id}", + "provider": "${google-beta}", + "short_name": "tf-tag-value" + } + argumentDocs: + create: '- Default is 20 minutes.' + creation_timestamp: |- + - + Creation timestamp in RFC3339 text format. + delete: '- Default is 20 minutes.' + fingerprint: |- + - + Fingerprint of the resource. This field is used internally during updates of this resource. + id: '- an identifier for the resource with format projects/{{project}}/regions/{{region}}/firewallPolicies/{{name}}' + layer4_config.ip_protocol: |- - (Required) - A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + The IP protocol to which this rule applies. The protocol + type is required when creating a firewall rule. + This value can either be one of the following well + known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), + or the IP protocol number. + layer4_config.ports: |- + - + (Optional) + An optional list of ports to which this rule applies. This field + is only applicable for UDP or TCP protocol. Each entry must be + either an integer or a range. If not specified, this rule + applies to connections through any port. + Example inputs include: ["22"], ["80","443"], and + ["12345-12349"]. match.dest_address_groups: |- - (Optional) - Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules. + Address groups which should be matched against the traffic destination. + Maximum number of destination address groups is 10. match.dest_fqdns: |- - (Optional) - Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress. + Fully Qualified Domain Name (FQDN) which should be matched against + traffic destination. Maximum number of destination fqdn allowed is 100. match.dest_ip_ranges: |- - (Optional) - CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. + Destination IP address range in CIDR format. Required for + EGRESS rules. match.dest_region_codes: |- - (Optional) - The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress. + Region codes whose IP addresses will be used to match for destination + of traffic. Should be specified as 2 letter country code defined as per + ISO 3166 alpha-2 country codes. ex."US" + Maximum number of destination region codes allowed is 5000. match.dest_threat_intelligences: |- - (Optional) - Name of the Google Cloud Threat Intelligence list. - match.layer4_configs: |- + Names of Network Threat Intelligence lists. + The IPs in these lists will be matched against traffic destination. + match.layer4_config: |- - (Required) Pairs of IP protocols and ports that the rule should match. + Structure is documented below. match.src_address_groups: |- - (Optional) - Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules. + Address groups which should be matched against the traffic source. + Maximum number of source address groups is 10. match.src_fqdns: |- - (Optional) - Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress. + Fully Qualified Domain Name (FQDN) which should be matched against + traffic source. Maximum number of source fqdn allowed is 100. match.src_ip_ranges: |- - (Optional) - CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. + Source IP address range in CIDR format. Required for + INGRESS rules. match.src_region_codes: |- - (Optional) - The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress. - match.src_secure_tags: |- + Region codes whose IP addresses will be used to match for source + of traffic. Should be specified as 2 letter country code defined as per + ISO 3166 alpha-2 country codes. ex."US" + Maximum number of source region codes allowed is 5000. + match.src_secure_tag: |- - (Optional) - List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. + List of secure tag values, which should be matched at the source + of the traffic. + For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, + and there is no srcIpRange, this rule will be ignored. + Maximum number of source tag values allowed is 256. + Structure is documented below. match.src_threat_intelligences: |- - (Optional) - Name of the Google Cloud Threat Intelligence list. - priority: |- + Names of Network Threat Intelligence lists. + The IPs in these lists will be matched against traffic source. + name: |- - (Required) - An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. - rule_tuple_count: |- + User-provided name of the Network firewall policy. + The name should be unique in the project in which the firewall policy is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression a-z? + which means the first character must be a lowercase letter, and all following characters must be a dash, + lowercase letter, or digit, except the last character, which cannot be a dash. + network_firewall_policy_id: |- - - Calculation of the complexity of a single firewall policy rule. - src_secure_tags.name: |- + The unique identifier for the resource. This identifier is defined by the server. + predefined_rules: |- + - + A list of firewall policy pre-defined rules. + Structure is documented below. + predefined_rules.action: |- + - + (Output) + The Action to perform when the client connection triggers the rule. Can currently be either + "allow", "deny", "apply_security_profile_group" or "goto_next". + predefined_rules.description: |- + - + (Output) + A description of the rule. + predefined_rules.direction: |- + - + (Output) + The direction in which this rule applies. If unspecified an INGRESS rule is created. + predefined_rules.disabled: |- + - + (Output) + Denotes whether the firewall policy rule is disabled. When set to true, + the firewall policy rule is not enforced and traffic behaves as if it did + not exist. If this is unspecified, the firewall policy rule will be + enabled. + predefined_rules.enable_logging: |- + - + (Output) + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the + configured export destination in Stackdriver. + predefined_rules.match: |- + - + (Output) + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. + predefined_rules.priority: |- + - + (Output) + An integer indicating the priority of a rule in the list. The priority must be a value + between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the + highest priority and 2147483647 is the lowest priority. + predefined_rules.rule_name: |- + - + (Output) + An optional name for the rule. This field is not a unique identifier + and can be updated. + predefined_rules.security_profile_group: |- + - + (Output) + A fully-qualified URL of a SecurityProfile resource instance. + Example: + https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action is 'apply_security_profile_group'. + predefined_rules.target_secure_tag: |- + - + (Output) + A list of secure tags that controls which instances the firewall rule + applies to. If targetSecureTag are specified, then the + firewall rule applies only to instances in the VPC network that have one + of those EFFECTIVE secure tags, if all the target_secure_tag are in + INEFFECTIVE state, then this rule will be ignored. + targetSecureTag may not be set at the same time as + targetServiceAccounts. + If neither targetServiceAccounts nor + targetSecureTag are specified, the firewall rule applies + to all instances on the specified network. + Maximum number of target label tags allowed is 256. + Structure is documented below. + predefined_rules.target_service_accounts: |- + - + (Output) + A list of service accounts indicating the sets of + instances that are applied with this rule. + predefined_rules.tls_inspect: |- + - + (Output) + Boolean flag indicating if the traffic should be TLS decrypted. + It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + rule: |- - (Required) - Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+ - src_secure_tags.state: |- + A list of firewall policy rules. + Structure is documented below. + rule.action: |- - - [Output Only] State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. - target_secure_tags.name: |- + (Required) + The Action to perform when the client connection triggers the rule. Can currently be either + "allow", "deny", "apply_security_profile_group" or "goto_next". + rule.description: |- + - + (Optional) + A description of the rule. + rule.direction: |- + - + (Optional) + The direction in which this rule applies. If unspecified an INGRESS rule is created. + Possible values are: INGRESS, EGRESS. + rule.disabled: |- + - + (Optional) + Denotes whether the firewall policy rule is disabled. When set to true, + the firewall policy rule is not enforced and traffic behaves as if it did + not exist. If this is unspecified, the firewall policy rule will be + enabled. + rule.enable_logging: |- + - + (Optional) + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the + configured export destination in Stackdriver. + rule.match: |- - (Required) - Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+ - target_secure_tags.state: |- + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. + rule.priority: |- + - + (Required) + An integer indicating the priority of a rule in the list. The priority must be a value + between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the + highest priority and 2147483647 is the lowest priority. + rule.rule_name: |- + - + (Optional) + An optional name for the rule. This field is not a unique identifier + and can be updated. + rule.security_profile_group: |- + - + (Optional) + A fully-qualified URL of a SecurityProfile resource instance. + Example: + https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action is 'apply_security_profile_group'. + rule.target_secure_tag: |- + - + (Optional) + A list of secure tags that controls which instances the firewall rule + applies to. If targetSecureTag are specified, then the + firewall rule applies only to instances in the VPC network that have one + of those EFFECTIVE secure tags, if all the target_secure_tag are in + INEFFECTIVE state, then this rule will be ignored. + targetSecureTag may not be set at the same time as + targetServiceAccounts. + If neither targetServiceAccounts nor + targetSecureTag are specified, the firewall rule applies + to all instances on the specified network. + Maximum number of target label tags allowed is 256. + Structure is documented below. + rule.target_service_accounts: |- - - [Output Only] State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. + (Optional) + A list of service accounts indicating the sets of + instances that are applied with this rule. + rule.tls_inspect: |- + - + (Optional) + Boolean flag indicating if the traffic should be TLS decrypted. + It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + rule_tuple_count: |- + - + Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples. + self_link: |- + - + Server-defined URL for the resource. + self_link_with_id: |- + - + Server-defined URL for this resource with the resource id. + src_secure_tag.name: |- + - + (Optional) + Name of the secure tag, created with TagManager's TagValue API. + @pattern tagValues/[0-9]+ + src_secure_tag.state: |- + - + (Output) + [Output Only] State of the secure tag, either EFFECTIVE or + INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted + or its network is deleted. + target_secure_tag.description: |- + - + (Optional) + An optional description of this resource. + target_secure_tag.name: |- + - + (Optional) + Name of the secure tag, created with TagManager's TagValue API. + @pattern tagValues/[0-9]+ + target_secure_tag.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + target_secure_tag.region: |- + - + (Optional) + The region of this resource. + target_secure_tag.state: |- + - + (Output) + [Output Only] State of the secure tag, either EFFECTIVE or + INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted + or its network is deleted. update: '- Default is 20 minutes.' importStatements: [] google_compute_region_per_instance_config: @@ -47645,6 +52195,301 @@ resources: State will be removed on the next instance recreation or update. update: '- Default is 20 minutes.' importStatements: [] + google_compute_region_resize_request: + subCategory: Compute Engine + description: Represents a Regional Managed Instance Group Resize Request Resize Requests are the Managed Instance Group implementation of Dynamic Workload Scheduler Flex Start. + name: google_compute_region_resize_request + title: "" + examples: + - name: a3_resize_request + manifest: |- + { + "description": "Test resize request resource", + "instance_group_manager": "${google_compute_region_instance_group_manager.a3_dws.name}", + "name": "a3-dws", + "provider": "${google-beta}", + "region": "us-central1", + "requested_run_duration": [ + { + "nanos": 0, + "seconds": 14400 + } + ], + "resize_by": 2 + } + references: + instance_group_manager: google_compute_region_instance_group_manager.a3_dws.name + provider: google-beta + dependencies: + google_compute_region_instance_group_manager.a3_dws: |- + { + "base_instance_name": "a3-dws", + "distribution_policy_target_shape": "ANY_SINGLE_ZONE", + "distribution_policy_zones": [ + "us-central1-a", + "us-central1-b", + "us-central1-c", + "us-central1-f" + ], + "instance_lifecycle_policy": [ + { + "default_action_on_failure": "DO_NOTHING" + } + ], + "name": "a3-dws", + "provider": "${google-beta}", + "region": "us-central1", + "update_policy": [ + { + "instance_redistribution_type": "NONE", + "max_surge_fixed": 0, + "max_unavailable_fixed": 6, + "minimal_action": "REPLACE", + "type": "OPPORTUNISTIC" + } + ], + "version": [ + { + "instance_template": "${google_compute_region_instance_template.a3_dws.self_link}" + } + ], + "wait_for_instances": false + } + google_compute_region_instance_template.a3_dws: |- + { + "can_ip_forward": false, + "description": "This template is used to create a mig instance that is compatible with DWS resize requests.", + "disk": [ + { + "auto_delete": true, + "boot": true, + "disk_size_gb": "960", + "disk_type": "pd-ssd", + "mode": "READ_WRITE", + "source_image": "cos-cloud/cos-105-lts" + } + ], + "guest_accelerator": [ + { + "count": 8, + "type": "nvidia-h100-80gb" + } + ], + "instance_description": "A3 GPU", + "machine_type": "a3-highgpu-8g", + "name": "a3-dws", + "network_interface": [ + { + "network": "default" + } + ], + "provider": "${google-beta}", + "region": "us-central1", + "reservation_affinity": [ + { + "type": "NO_RESERVATION" + } + ], + "scheduling": [ + { + "automatic_restart": false, + "on_host_maintenance": "TERMINATE" + } + ], + "shielded_instance_config": [ + { + "enable_integrity_monitoring": true, + "enable_vtpm": true + } + ] + } + argumentDocs: + create: '- Default is 20 minutes.' + creation_timestamp: |- + - + The creation timestamp for this resize request in RFC3339 text format. + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + An optional description of this resize-request. + error.errors: |- + - + (Output) + The array of errors encountered while processing this operation. + Structure is documented below. + error.errors.code: |- + - + (Output) + The error type identifier for this error. + error.errors.error_details: |- + - + (Output) + An array of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + Structure is documented below. + error.errors.location: |- + - + (Output) + Indicates the field in the request that caused the error. This property is optional. + error.errors.message: |- + - + (Output) + An optional, human-readable error message. + error_details.error_info: |- + - + (Output) + A nested object resource. + Structure is documented below. + error_details.help: |- + - + (Output) + A nested object resource. + Structure is documented below. + error_details.localized_message: |- + - + (Output) + A nested object resource. + Structure is documented below. + error_details.quota_info: |- + - + (Output) + A nested object resource. + Structure is documented below. + error_info.domain: |- + - + (Output) + The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". + error_info.metadatas: |- + - + (Output) + Additional structured details about this error. + error_info.reason: |- + - + (Output) + The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. + help.links: |- + - + (Output) + A nested object resource. + Structure is documented below. + help.links.description: |- + - + (Output) + Describes what the link offers. + help.links.url: |- + - + (Output) + The URL of the link. + id: '- an identifier for the resource with format projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{instance_group_manager}}/resizeRequests/{{name}}' + instance_group_manager: |- + - + (Required) + The reference of the regional instance group manager this ResizeRequest is a part of. + last_attempt.error: |- + - + (Output) + Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry. + Structure is documented below. + last_attempt.error.errors: |- + - + (Output) + The array of errors encountered while processing this operation. + Structure is documented below. + last_attempt.error.errors.code: |- + - + (Output) + The error type identifier for this error. + last_attempt.error.errors.error_details: |- + - + (Output) + An array of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + Structure is documented below. + last_attempt.error.errors.location: |- + - + (Output) + Indicates the field in the request that caused the error. This property is optional. + last_attempt.error.errors.message: |- + - + (Output) + An optional, human-readable error message. + localized_message.locale: |- + - + (Output) + The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + localized_message.message: |- + - + (Output) + The localized error message in the above locale. + name: |- + - + (Required) + The name of this resize request. The name must be 1-63 characters long, and comply with RFC1035. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + quota_info.dimensions: |- + - + (Output) + The map holding related quota dimensions + quota_info.future_limit: |- + - + (Output) + Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + quota_info.limit: |- + - + (Output) + Current effective quota limit. The limit's unit depends on the quota type or metric. + quota_info.limit_name: |- + - + (Output) + The name of the quota limit. + quota_info.metric_name: |- + - + (Output) + The Compute Engine quota metric name. + quota_info.rollout_status: |- + - + (Output) + Rollout status of the future quota limit. + region: |- + - + (Required) + The reference of the compute region scoping this request. + requested_run_duration: |- + - + (Optional) + Requested run duration for instances that will be created by this request. At the end of the run duration instances will be deleted. + Structure is documented below. + requested_run_duration.nanos: |- + - + (Optional) + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + requested_run_duration.seconds: |- + - + (Required) + Span of time at a resolution of a second. Must be from 600 to 604800 inclusive. Note: minimum and maximum allowed range for requestedRunDuration is 10 minutes (600 seconds) and 7 days(604800 seconds) correspondingly. + resize_by: |- + - + (Required) + The number of instances to be created by this resize request. The group's target size will be increased by this number. + state: |- + - + Current state of the request. + status: |- + - + Status of the request. + Structure is documented below. + status.error: |- + - + (Output) + Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry. + Structure is documented below. + status.last_attempt: |- + - + (Output) + Information about the last attempt to fulfill the request. The value is temporary since the ResizeRequest can retry, as long as it's still active and the last attempt value can either be cleared or replaced with a different error. Since ResizeRequest retries infrequently, the value may be stale and no longer show an active problem. The value is cleared when ResizeRequest transitions to the final state (becomes inactive). If the final state is FAILED the error describing it will be storred in the "error" field only. + Structure is documented below. + importStatements: [] google_compute_region_security_policy: subCategory: Compute Engine description: Represents a Region Cloud Armor Security Policy resource. @@ -47702,10 +52547,65 @@ resources: } references: provider: google-beta + - name: region-sec-policy-with-rules + manifest: |- + { + "description": "basic region security policy with multiple rules", + "name": "my-sec-policy-with-rules", + "provider": "${google-beta}", + "rules": [ + { + "action": "deny", + "match": [ + { + "expr": [ + { + "expression": "request.path.matches(\"/login.html\") \u0026\u0026 token.recaptcha_session.score \u003c 0.2" + } + ] + } + ], + "priority": "1000" + }, + { + "action": "deny", + "description": "default rule", + "match": [ + { + "config": [ + { + "src_ip_ranges": [ + "*" + ] + } + ], + "versioned_expr": "SRC_IPS_V1" + } + ], + "priority": "2147483647" + } + ], + "type": "CLOUD_ARMOR" + } + references: + provider: google-beta argumentDocs: ADVANCED: ', ADVANCED_PREVIEW, STANDARD.' + ALL: ', IP, HTTP_HEADER, XFF_IP, HTTP_COOKIE, HTTP_PATH, SNI, REGION_CODE, TLS_JA3_FINGERPRINT, USER_IP.' CLOUD_ARMOR: ', CLOUD_ARMOR_EDGE, CLOUD_ARMOR_NETWORK.' IPV4: ', IPV6, TCP, UDP.' + ban_threshold.count: |- + - + (Optional) + Number of HTTP(S) requests for calculating the threshold. + ban_threshold.interval_sec: |- + - + (Optional) + Interval over which the threshold is computed. + config.src_ip_ranges: |- + - + (Optional) + CIDR IP address range. Maximum number of srcIpRanges allowed is 10. create: '- Default is 20 minutes.' ddos_protection_config: |- - @@ -47721,27 +52621,294 @@ resources: - (Optional) An optional description of this resource. Provide this property when you create the resource. + enforce_on_key_configs.enforce_on_key_name: |- + - + (Optional) + Rate limit key name applicable only for the following key types: + HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. + HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + enforce_on_key_configs.enforce_on_key_type: |- + - + (Optional) + Determines the key to enforce the rateLimitThreshold on. Possible values are: + expr.expression: |- + - + (Required) + Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported. fingerprint: |- - Fingerprint of this resource. This field is used internally during updates of this resource. id: '- an identifier for the resource with format projects/{{project}}/regions/{{region}}/securityPolicies/{{name}}' + match.config: |- + - + (Optional) + The configuration options available when specifying versionedExpr. + This field must be specified if versionedExpr is specified and cannot be specified if versionedExpr is not specified. + Structure is documented below. + match.expr: |- + - + (Optional) + User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. See Sample expressions for examples. + Structure is documented below. + match.versioned_expr: |- + - + (Optional) + Preconfigured versioned expression. If this field is specified, config must also be specified. + Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding srcIpRange field in config. + Possible values are: SRC_IPS_V1. name: |- - (Required) Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + network_match.dest_ip_ranges: |- + - + (Optional) + Destination IPv4/IPv6 addresses or CIDR prefixes, in standard text format. + network_match.dest_ports: |- + - + (Optional) + Destination port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). + network_match.ip_protocols: |- + - + (Optional) + IPv4 protocol / IPv6 next header (after extension headers). Each element can be an 8-bit unsigned decimal number (e.g. "6"), range (e.g. "253-254"), or one of the following protocol names: "tcp", "udp", "icmp", "esp", "ah", "ipip", or "sctp". + network_match.src_asns: |- + - + (Optional) + BGP Autonomous System Number associated with the source IP address. + network_match.src_ip_ranges: |- + - + (Optional) + Source IPv4/IPv6 addresses or CIDR prefixes, in standard text format. + network_match.src_ports: |- + - + (Optional) + Source port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). + network_match.src_region_codes: |- + - + (Optional) + Two-letter ISO 3166-1 alpha-2 country code associated with the source IP address. + network_match.user_defined_fields: |- + - + (Optional) + User-defined fields. Each element names a defined field and lists the matching values for that field. + Structure is documented below. policy_id: |- - The unique identifier for the resource. This identifier is defined by the server. + preconfigured_waf_config.exclusion: |- + - + (Optional) + An exclusion to apply during preconfigured WAF evaluation. + Structure is documented below. + preconfigured_waf_config.exclusion.request_cookie: |- + - + (Optional) + Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation. + Structure is documented below. + preconfigured_waf_config.exclusion.request_header: |- + - + (Optional) + Request header whose value will be excluded from inspection during preconfigured WAF evaluation. + Structure is documented below. + preconfigured_waf_config.exclusion.request_query_param: |- + - + (Optional) + Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. + Note that the parameter can be in the query string or in the POST body. + Structure is documented below. + preconfigured_waf_config.exclusion.request_uri: |- + - + (Optional) + Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. + When specifying this field, the query or fragment part should be excluded. + Structure is documented below. + preconfigured_waf_config.exclusion.target_rule_ids: |- + - + (Optional) + A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. + If omitted, it refers to all the rule IDs under the WAF rule set. + preconfigured_waf_config.exclusion.target_rule_set: |- + - + (Required) + Target WAF rule set to apply the preconfigured WAF exclusion. project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + rate_limit_options.ban_duration_sec: |- + - + (Optional) + Can only be specified if the action for the rule is "rate_based_ban". + If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. + rate_limit_options.ban_threshold: |- + - + (Optional) + Can only be specified if the action for the rule is "rate_based_ban". + If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'. + Structure is documented below. + rate_limit_options.conform_action: |- + - + (Optional) + Action to take for requests that are under the configured rate limit threshold. + Valid option is "allow" only. + rate_limit_options.enforce_on_key: |- + - + (Optional) + Determines the key to enforce the rateLimitThreshold on. Possible values are: + rate_limit_options.enforce_on_key_configs: |- + - + (Optional) + If specified, any combination of values of enforceOnKeyType/enforceOnKeyName is treated as the key on which ratelimit threshold/action is enforced. + You can specify up to 3 enforceOnKeyConfigs. + If enforceOnKeyConfigs is specified, enforceOnKey must not be specified. + Structure is documented below. + rate_limit_options.enforce_on_key_name: |- + - + (Optional) + Rate limit key name applicable only for the following key types: + HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. + HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + rate_limit_options.exceed_action: |- + - + (Optional) + Action to take for requests that are above the configured rate limit threshold, to deny with a specified HTTP response code. + Valid options are deny(STATUS), where valid values for STATUS are 403, 404, 429, and 502. + rate_limit_options.rate_limit_threshold: |- + - + (Optional) + Threshold at which to begin ratelimiting. + Structure is documented below. + rate_limit_threshold.count: |- + - + (Optional) + Number of HTTP(S) requests for calculating the threshold. + rate_limit_threshold.interval_sec: |- + - + (Optional) + Interval over which the threshold is computed. region: |- - (Optional) The Region in which the created Region Security Policy should reside. If it is not provided, the provider region is used. + request_cookie.operator: |- + - + (Required) + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + Possible values are: CONTAINS, ENDS_WITH, EQUALS, EQUALS_ANY, STARTS_WITH. + request_cookie.value: |- + - + (Optional) + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + request_header.operator: |- + - + (Required) + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + Possible values are: CONTAINS, ENDS_WITH, EQUALS, EQUALS_ANY, STARTS_WITH. + request_header.value: |- + - + (Optional) + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + request_query_param.operator: |- + - + (Required) + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + Possible values are: CONTAINS, ENDS_WITH, EQUALS, EQUALS_ANY, STARTS_WITH. + request_query_param.value: |- + - + (Optional) + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + request_uri.operator: |- + - + (Required) + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + Possible values are: CONTAINS, ENDS_WITH, EQUALS, EQUALS_ANY, STARTS_WITH. + request_uri.value: |- + - + (Optional) + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + rules: |- + - + (Optional) + The set of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match "*"). If no rules are provided when creating a security policy, a default rule with action "allow" will be added. + Structure is documented below. + rules.action: |- + - + (Required) + The Action to perform when the rule is matched. The following are the valid actions: + rules.description: |- + - + (Optional) + An optional description of this resource. Provide this property when you create the resource. + rules.match: |- + - + (Optional) + A match condition that incoming traffic is evaluated against. + If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. + rules.network_match: |- + - + (Optional) + A match condition that incoming packets are evaluated against for CLOUD_ARMOR_NETWORK security policies. If it matches, the corresponding 'action' is enforced. + The match criteria for a rule consists of built-in match fields (like 'srcIpRanges') and potentially multiple user-defined match fields ('userDefinedFields'). + Field values may be extracted directly from the packet or derived from it (e.g. 'srcRegionCodes'). Some fields may not be present in every packet (e.g. 'srcPorts'). A user-defined field is only present if the base header is found in the packet and the entire field is in bounds. + Each match field may specify which values can match it, listing one or more ranges, prefixes, or exact values that are considered a match for the field. A field value must be present in order to match a specified match field. If no match values are specified for a match field, then any field value is considered to match it, and it's not required to be present. For strings specifying '*' is also equivalent to match all. + For a packet to match a rule, all specified match fields must match the corresponding field values derived from the packet. + Example: + networkMatch: srcIpRanges: - "192.0.2.0/24" - "198.51.100.0/24" userDefinedFields: - name: "ipv4_fragment_offset" values: - "1-0x1fff" + The above match condition matches packets with a source IP in 192.0.2.0/24 or 198.51.100.0/24 and a user-defined field named "ipv4_fragment_offset" with a value between 1 and 0x1fff inclusive + Structure is documented below. + rules.preconfigured_waf_config: |- + - + (Optional) + Preconfigured WAF configuration to be applied for the rule. + If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. + Structure is documented below. + rules.preview: |- + - + (Optional) + If set to true, the specified action is not enforced. + rules.priority: |- + - + (Required) + An integer indicating the priority of a rule in the list. + The priority must be a positive value between 0 and 2147483647. + Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. + rules.rate_limit_options: |- + - + (Optional) + Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. + Structure is documented below. self_link: |- - Server-defined URL for the resource. @@ -47782,6 +52949,10 @@ resources: - (Optional) Size of the field in bytes. Valid values: 1-4. + user_defined_fields.values: |- + - + (Optional) + Matching values of the field. Each element can be a 32-bit unsigned decimal or hexadecimal (starting with "0x") number (e.g. "64") or range (e.g. "0x400-0x7ff"). importStatements: [] google_compute_region_security_policy_rule: subCategory: Compute Engine @@ -47895,6 +53066,75 @@ resources: "region": "us-west2", "type": "CLOUD_ARMOR" } + - name: default_rule + manifest: |- + { + "action": "deny", + "description": "new rule", + "match": [ + { + "config": [ + { + "src_ip_ranges": [ + "*" + ] + } + ], + "versioned_expr": "SRC_IPS_V1" + } + ], + "priority": "2147483647", + "provider": "${google-beta}", + "region": "us-west2", + "security_policy": "${google_compute_region_security_policy.default.name}" + } + references: + provider: google-beta + security_policy: google_compute_region_security_policy.default.name + dependencies: + google_compute_region_security_policy.default: |- + { + "description": "basic region security policy", + "name": "policywithdefaultrule", + "provider": "${google-beta}", + "region": "us-west2", + "type": "CLOUD_ARMOR" + } + - name: policy_rule + manifest: |- + { + "action": "allow", + "description": "new rule", + "match": [ + { + "config": [ + { + "src_ip_ranges": [ + "10.10.0.0/16" + ] + } + ], + "versioned_expr": "SRC_IPS_V1" + } + ], + "preview": true, + "priority": 100, + "provider": "${google-beta}", + "region": "us-west2", + "security_policy": "${google_compute_region_security_policy.default.name}" + } + references: + provider: google-beta + security_policy: google_compute_region_security_policy.default.name + dependencies: + google_compute_region_security_policy.default: |- + { + "description": "basic region security policy", + "name": "policywithdefaultrule", + "provider": "${google-beta}", + "region": "us-west2", + "type": "CLOUD_ARMOR" + } - name: policy_rule manifest: |- { @@ -48492,7 +53732,13 @@ resources: These are in the same namespace as the managed SSL certificates. name_prefix: |- - (Optional) Creates a unique name beginning with the - specified prefix. Conflicts with name. + specified prefix. Conflicts with name. Max length is 54 characters. + Prefixes with lengths longer than 37 characters will use a shortened + UUID that will be more prone to collisions. + Resulting name for a name_prefix <= 37 characters: + name_prefix + YYYYmmddHHSSssss + 8 digit incremental counter + Resulting name for a name_prefix 38 - 54 characters: + name_prefix + YYmmdd + 3 digit incremental counter private_key: |- - (Required) @@ -48573,6 +53819,67 @@ resources: ], "region": "us-central1" } + - name: default + manifest: |- + { + "http_keep_alive_timeout_sec": 600, + "name": "test-http-keep-alive-timeout-proxy", + "region": "us-central1", + "url_map": "${google_compute_region_url_map.default.id}" + } + references: + url_map: google_compute_region_url_map.default.id + dependencies: + google_compute_region_backend_service.default: |- + { + "health_checks": [ + "${google_compute_region_health_check.default.id}" + ], + "load_balancing_scheme": "INTERNAL_MANAGED", + "name": "backend-service", + "port_name": "http", + "protocol": "HTTP", + "region": "us-central1", + "timeout_sec": 10 + } + google_compute_region_health_check.default: |- + { + "http_health_check": [ + { + "port": 80 + } + ], + "name": "http-health-check", + "region": "us-central1" + } + google_compute_region_url_map.default: |- + { + "default_service": "${google_compute_region_backend_service.default.id}", + "host_rule": [ + { + "hosts": [ + "mysite.com" + ], + "path_matcher": "allpaths" + } + ], + "name": "url-map", + "path_matcher": [ + { + "default_service": "${google_compute_region_backend_service.default.id}", + "name": "allpaths", + "path_rule": [ + { + "paths": [ + "/*" + ], + "service": "${google_compute_region_backend_service.default.id}" + } + ] + } + ], + "region": "us-central1" + } - name: default manifest: |- { @@ -48604,6 +53911,14 @@ resources: - (Optional) An optional description of this resource. + http_keep_alive_timeout_sec: |- + - + (Optional) + Specifies how long to keep a connection open, after completing a response, + while there is no matching traffic (in seconds). If an HTTP keepalive is + not specified, a default value (600 seconds) will be used. For Regional + HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + maximum allowed value is 600 seconds. id: '- an identifier for the resource with format projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}' name: |- - @@ -48711,6 +54026,79 @@ resources: ], "region": "us-central1" } + - name: default + manifest: |- + { + "http_keep_alive_timeout_sec": 600, + "name": "test-http-keep-alive-timeout-proxy", + "region": "us-central1", + "ssl_certificates": [ + "${google_compute_region_ssl_certificate.default.id}" + ], + "url_map": "${google_compute_region_url_map.default.id}" + } + references: + ssl_certificates: google_compute_region_ssl_certificate.default.id + url_map: google_compute_region_url_map.default.id + dependencies: + google_compute_region_backend_service.default: |- + { + "health_checks": [ + "${google_compute_region_health_check.default.id}" + ], + "load_balancing_scheme": "INTERNAL_MANAGED", + "name": "backend-service", + "port_name": "http", + "protocol": "HTTP", + "region": "us-central1", + "timeout_sec": 10 + } + google_compute_region_health_check.default: |- + { + "http_health_check": [ + { + "port": 80 + } + ], + "name": "http-health-check", + "region": "us-central1" + } + google_compute_region_ssl_certificate.default: |- + { + "certificate": "${file(\"path/to/certificate.crt\")}", + "name": "my-certificate", + "private_key": "${file(\"path/to/private.key\")}", + "region": "us-central1" + } + google_compute_region_url_map.default: |- + { + "default_service": "${google_compute_region_backend_service.default.id}", + "description": "a description", + "host_rule": [ + { + "hosts": [ + "mysite.com" + ], + "path_matcher": "allpaths" + } + ], + "name": "url-map", + "path_matcher": [ + { + "default_service": "${google_compute_region_backend_service.default.id}", + "name": "allpaths", + "path_rule": [ + { + "paths": [ + "/*" + ], + "service": "${google_compute_region_backend_service.default.id}" + } + ] + } + ], + "region": "us-central1" + } - name: default manifest: |- { @@ -48884,6 +54272,14 @@ resources: - (Optional) An optional description of this resource. + http_keep_alive_timeout_sec: |- + - + (Optional) + Specifies how long to keep a connection open, after completing a response, + while there is no matching traffic (in seconds). If an HTTP keepalive is + not specified, a default value (600 seconds) will be used. For Regioanl + HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + maximum allowed value is 600 seconds. id: '- an identifier for the resource with format projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}' name: |- - @@ -51457,7 +56853,7 @@ resources: importStatements: [] google_compute_resize_request: subCategory: Compute Engine - description: Represents a Managed Instance Group Resize Request + description: Represents a Managed Instance Group Resize Request Resize Requests are the Managed Instance Group implementation of Dynamic Workload Scheduler Flex Start. name: google_compute_resize_request title: "" examples: @@ -51556,62 +56952,61 @@ resources: error.errors: |- - (Output) - [Output Only] The array of errors encountered while processing this operation. + The array of errors encountered while processing this operation. Structure is documented below. error.errors.code: |- - (Output) - [Output Only] The error type identifier for this error. + The error type identifier for this error. error.errors.error_details: |- - (Output) - [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + An array of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. Structure is documented below. error.errors.location: |- - (Output) - Output Only] Indicates the field in the request that caused the error. This property is optional. + Indicates the field in the request that caused the error. This property is optional. error.errors.message: |- - (Output) - [Output Only] An optional, human-readable error message. + An optional, human-readable error message. error_details.error_info: |- - (Output) - [Output Only] + A nested object resource. Structure is documented below. error_details.help: |- - (Output) - [Output Only] + A nested object resource. Structure is documented below. error_details.localized_message: |- - (Output) - [Output Only] + A nested object resource. Structure is documented below. error_details.quota_info: |- - (Output) - [Output Only] + A nested object resource. Structure is documented below. error_info.domain: |- - (Output) - The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". error_info.metadatas: |- - (Output) Additional structured details about this error. - Keys must match /[a-z][a-zA-Z0-9-_]+/ but should ideally be lowerCamelCase. Also they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than {"instanceLimit": "100/request"}, should be returned as, {"instanceLimitPerRequest": "100"}, if the client exceeds the number of instances that can be created in a single (batch) request. error_info.reason: |- - (Output) - The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of [A-Z][A-Z0-9_]+[A-Z0-9], which represents UPPER_SNAKE_CASE. + The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. help.links: |- - (Output) - [Output Only] + A nested object resource. Structure is documented below. help.links.description: |- - @@ -51625,36 +57020,34 @@ resources: instance_group_manager: |- - (Required) - The name of the managed instance group. The name should conform to RFC1035 or be a resource ID. - Authorization requires the following IAM permission on the specified resource instanceGroupManager: - *compute.instanceGroupManagers.update + The reference of the instance group manager this ResizeRequest is a part of. last_attempt.error: |- - (Output) - [Output only] Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry. + Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry. Structure is documented below. last_attempt.error.errors: |- - (Output) - [Output Only] The array of errors encountered while processing this operation. + The array of errors encountered while processing this operation. Structure is documented below. last_attempt.error.errors.code: |- - (Output) - [Output Only] The error type identifier for this error. + The error type identifier for this error. last_attempt.error.errors.error_details: |- - (Output) - [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + An array of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. Structure is documented below. last_attempt.error.errors.location: |- - (Output) - Output Only] Indicates the field in the request that caused the error. This property is optional. + Indicates the field in the request that caused the error. This property is optional. last_attempt.error.errors.message: |- - (Output) - [Output Only] An optional, human-readable error message. + An optional, human-readable error message. localized_message.locale: |- - (Output) @@ -51706,32 +57099,32 @@ resources: requested_run_duration.seconds: |- - (Required) - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Span of time at a resolution of a second. Must be from 600 to 604800 inclusive. Note: minimum and maximum allowed range for requestedRunDuration is 10 minutes (600 seconds) and 7 days(604800 seconds) correspondingly. resize_by: |- - (Required) The number of instances to be created by this resize request. The group's target size will be increased by this number. state: |- - - [Output only] Current state of the request. + Current state of the request. status: |- - - [Output only] Status of the request. + Status of the request. Structure is documented below. status.error: |- - (Output) - [Output only] Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry. + Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry. Structure is documented below. status.last_attempt: |- - (Output) - [Output only] Information about the last attempt to fulfill the request. The value is temporary since the ResizeRequest can retry, as long as it's still active and the last attempt value can either be cleared or replaced with a different error. Since ResizeRequest retries infrequently, the value may be stale and no longer show an active problem. The value is cleared when ResizeRequest transitions to the final state (becomes inactive). If the final state is FAILED the error describing it will be storred in the "error" field only. + Information about the last attempt to fulfill the request. The value is temporary since the ResizeRequest can retry, as long as it's still active and the last attempt value can either be cleared or replaced with a different error. Since ResizeRequest retries infrequently, the value may be stale and no longer show an active problem. The value is cleared when ResizeRequest transitions to the final state (becomes inactive). If the final state is FAILED the error describing it will be storred in the "error" field only. Structure is documented below. zone: |- - (Required) - Name of the compute zone scoping this request. Name should conform to RFC1035. + The reference of the compute zone scoping this request. importStatements: [] google_compute_resource_policy: subCategory: Compute Engine @@ -52320,13 +57713,25 @@ resources: URL to an instance that should handle matching packets. You can specify this as a full or partial URL. For example: next_hop_instance_zone: . + next_hop_inter_region_cost: |- + - + (Beta) + Internal fixed region-to-region cost that Google Cloud calculates based on factors such as network performance, distance, and available bandwidth between regions. next_hop_ip: |- - (Optional) Network IP address of an instance that should handle matching packets. + next_hop_med: |- + - + (Beta) + Multi-Exit Discriminator, a BGP route metric that indicates the desirability of a particular route in a network. next_hop_network: |- - URL to a Network that should handle matching packets. + next_hop_origin: |- + - + (Beta) + Indicates the origin of the route. Can be IGP (Interior Gateway Protocol), EGP (Exterior Gateway Protocol), or INCOMPLETE. next_hop_vpn_tunnel: |- - (Optional) @@ -52637,6 +58042,11 @@ resources: google_compute_address.address: |- { "count": 2, + "lifecycle": [ + { + "create_before_destroy": true + } + ], "name": "nat-manual-ip-${count.index}", "region": "${google_compute_subnetwork.subnet.region}" } @@ -52885,6 +58295,11 @@ resources: (Optional) Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. id: '- an identifier for the resource with format {{project}}/{{region}}/{{router}}/{{name}}' + initial_nat_ips: |- + - + (Optional) + Self-links of NAT IPs to be used as initial value for creation alongside a RouterNatAddress resource. + Conflicts with natIps and drainNatIps. Only valid if natIpAllocateOption is set to MANUAL_ONLY. log_config: |- - (Optional) @@ -52925,6 +58340,9 @@ resources: (Optional) Self-links of NAT IPs. Only valid if natIpAllocateOption is set to MANUAL_ONLY. + If this field is used alongside with a count created list of address resources google_compute_address.foobar.*.self_link, + the access level resource for the address resource must have a lifecycle block with create_before_destroy = true so + the number of resources can be increased/decreased without triggering the resourceInUseByAnotherResource error. project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -53033,6 +58451,103 @@ resources: Timeout (in seconds) for UDP connections. Defaults to 30s if not set. update: '- Default is 20 minutes.' importStatements: [] + google_compute_router_nat_address: + subCategory: Compute Engine + description: A resource used to set the list of IP addresses to be used in a NAT service and manage the draining of destroyed IPs. + name: google_compute_router_nat_address + title: "" + examples: + - name: nat_address + manifest: |- + { + "nat_ips": "${google_compute_address.address.*.self_link}", + "region": "${google_compute_router_nat.router_nat.region}", + "router": "${google_compute_router.router.name}", + "router_nat": "${google_compute_router_nat.router_nat.name}" + } + references: + region: google_compute_router_nat.router_nat.region + router: google_compute_router.router.name + router_nat: google_compute_router_nat.router_nat.name + dependencies: + google_compute_address.address: |- + { + "count": 3, + "lifecycle": [ + { + "create_before_destroy": true + } + ], + "name": "nat-manual-ip-${count.index}", + "region": "${google_compute_subnetwork.subnet.region}" + } + google_compute_network.net: |- + { + "name": "my-network" + } + google_compute_router.router: |- + { + "name": "my-router", + "network": "${google_compute_network.net.id}", + "region": "${google_compute_subnetwork.subnet.region}" + } + google_compute_router_nat.router_nat: |- + { + "initial_nat_ips": [ + "${google_compute_address.address[0].self_link}" + ], + "name": "my-router-nat", + "nat_ip_allocate_option": "MANUAL_ONLY", + "region": "${google_compute_router.router.region}", + "router": "${google_compute_router.router.name}", + "source_subnetwork_ip_ranges_to_nat": "LIST_OF_SUBNETWORKS", + "subnetwork": [ + { + "name": "${google_compute_subnetwork.subnet.id}", + "source_ip_ranges_to_nat": [ + "ALL_IP_RANGES" + ] + } + ] + } + google_compute_subnetwork.subnet: |- + { + "ip_cidr_range": "10.0.0.0/16", + "name": "my-subnetwork", + "network": "${google_compute_network.net.id}", + "region": "us-central1" + } + argumentDocs: + create: '- Default is 20 minutes.' + delete: '- Default is 20 minutes.' + drain_nat_ips: |- + - + (Optional) + A list of URLs of the IP resources to be drained. These IPs must be + valid static external IPs that have been assigned to the NAT. + id: '- an identifier for the resource with format projects/{{project}}/regions/{{region}}/routers/{{router}}/{{router_nat}}' + nat_ips: |- + - + (Required) + Self-links of NAT IPs to be used in a Nat service. Only valid if the referenced RouterNat + natIpAllocateOption is set to MANUAL_ONLY. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + region: |- + - + (Optional) + Region where the NAT service reside. + router: |- + - + (Required) + The name of the Cloud Router in which the referenced NAT service is configured. + router_nat: |- + - + (Required) + The name of the Nat service in which this address will be configured. + update: '- Default is 20 minutes.' + importStatements: [] google_compute_router_peer: subCategory: Compute Engine description: BGP information that must be configured into the routing stack to establish BGP peering. @@ -53480,6 +58995,25 @@ resources: If set to DISABLED, BFD is disabled for this BGP peer. Possible values are: ACTIVE, DISABLED, PASSIVE. create: '- Default is 20 minutes.' + custom_learned_ip_ranges: |- + - + (Optional) + The custom learned route IP address range. Must be a valid CIDR-formatted prefix. + If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, + a /32 singular IP address range, and, for IPv6, /128. + Structure is documented below. + custom_learned_ip_ranges.range: |- + - + (Required) + The IP range to learn. The value must be a + CIDR-formatted string. + custom_learned_route_priority: |- + - + (Optional) + The user-defined custom learned route priority for a BGP session. + This value is applied to all custom learned route ranges for the session. + You can choose a value from 0 to 65335. If you don't provide a value, + Google Cloud assigns a priority of 100 to the ranges. delete: '- Default is 20 minutes.' enable: |- - @@ -54055,6 +59589,7 @@ resources: value, only the type/subtype needs to be specified, and the parameters should be excluded. layer_7_ddos_defense_config.enable: '- (Optional) If set to true, enables CAAP for L7 DDoS detection.' layer_7_ddos_defense_config.rule_visibility: '- (Optional) Rule visibility can be one of the following:' + layer_7_ddos_defense_config.threshold_configs: '- (Optional) Configuration options for layer7 adaptive protection for various customizable thresholds. Structure is documented below.' match.config: |- - (Optional) The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified. @@ -54119,7 +59654,7 @@ resources: rule.match: |- - (Required) A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding action is enforced. Structure is documented below. - rule.preconfigured_waf_config: '- (Optional, Beta) Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. Structure is documented below.' + rule.preconfigured_waf_config: '- (Optional) Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. Structure is documented below.' rule.preview: |- - (Optional) When set to true, the action specified above is not enforced. Stackdriver logs for requests that trigger a preview action are annotated as such. @@ -54133,7 +59668,19 @@ resources: - (Optional) Can be specified if the action is redirect. Cannot be specified for other actions. Structure is documented below. self_link: '- The URI of the created resource.' + threshold_configs.auto_deploy_confidence_threshold: '- (Optional) Confidence threshold above which Adaptive Protection''s auto-deploy takes actions.' + threshold_configs.auto_deploy_expiration_sec: '- (Optional) Duration over which Adaptive Protection''s auto-deployed actions last.' + threshold_configs.auto_deploy_impacted_baseline_threshold: '- (Optional) Impacted baseline threshold below which Adaptive Protection''s auto-deploy takes actions.' + threshold_configs.auto_deploy_load_threshold: '- (Optional) Load threshold above which Adaptive Protection automatically deploy threshold based on the backend load threshold and detect a new rule during an alerted attack.' + threshold_configs.detection_absolute_qps: '- (Optional) Detection threshold based on absolute QPS.' + threshold_configs.detection_load_threshold: '- (Optional) Detection threshold based on the backend service''s load.' + threshold_configs.detection_relative_to_baseline_qps: '- (Optional) Detection threshold based on QPS relative to the average of baseline traffic.' + threshold_configs.name: '- The name of config. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy.' + threshold_configs.traffic_granularity_configs: '- (Optional) Configuration options for enabling Adaptive Protection to work on the specified service granularity. Structure is documented below.' throttle: ': limit client traffic to the configured threshold. Configure parameters for this action in rate_limit_options. Requires rate_limit_options to be set for this.' + traffic_granularity_configs.enable_each_unique_value: '- (Optional) If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if value is empty.' + traffic_granularity_configs.type: '- The type of this configuration, a granular traffic unit can be one of the following:' + traffic_granularity_configs.value: '- (Optional) Requests that match this value constitute a granular traffic unit.' type: '- The type indicates the intended use of the security policy. This field can be set only at resource creation time.' importStatements: [] google_compute_security_policy_rule: @@ -54175,7 +59722,7 @@ resources: - name: default_rule manifest: |- { - "action": "allow", + "action": "deny", "description": "default rule", "match": [ { @@ -54351,6 +59898,24 @@ resources: - (Optional) A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. + header_action: |- + - + (Optional) + Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + Structure is documented below. + header_action.request_headers_to_adds: |- + - + (Optional) + The list of request headers to add or overwrite if they're already present. + Structure is documented below. + header_action.request_headers_to_adds.header_name: |- + - + (Optional) + The name of the header to set. + header_action.request_headers_to_adds.header_value: |- + - + (Optional) + The value to set the named header to. id: '- an identifier for the resource with format projects/{{project}}/global/securityPolicies/{{security_policy}}/priority/{{priority}}' match: |- - @@ -54496,6 +60061,19 @@ resources: - (Optional) Interval over which the threshold is computed. + redirect_options: |- + - + (Optional) + Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + Structure is documented below. + redirect_options.target: |- + - + (Optional) + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + redirect_options.type: |- + - + (Optional) + Type of the redirect action. request_cookie.operator: |- - (Required) @@ -54936,10 +60514,22 @@ resources: An array of the consumer forwarding rules connected to this service attachment. Structure is documented below. + connected_endpoints.consumer_network: |- + - + (Output) + The url of the consumer network. connected_endpoints.endpoint: |- - (Output) The URL of the consumer forwarding rule. + connected_endpoints.propagated_connection_count: |- + - + (Output) + The number of consumer Network Connectivity Center spokes that the connected Private Service Connect endpoint has propagated to. + connected_endpoints.psc_connection_id: |- + - + (Output) + The PSC connection id of the connected endpoint. connected_endpoints.status: |- - (Output) @@ -55016,6 +60606,14 @@ resources: project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + propagated_connection_limit: |- + - + (Optional) + The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center. + This limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer. + If the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list. + If the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint. + If unspecified, the default propagated connection limit is 250. reconcile_connections: |- - (Optional) @@ -55453,7 +61051,13 @@ resources: These are in the same namespace as the managed SSL certificates. name_prefix: |- - (Optional) Creates a unique name beginning with the - specified prefix. Conflicts with name. + specified prefix. Conflicts with name. Max length is 54 characters. + Prefixes with lengths longer than 37 characters will use a shortened + UUID that will be more prone to collisions. + Resulting name for a name_prefix <= 37 characters: + name_prefix + YYYYmmddHHSSssss + 8 digit incremental counter + Resulting name for a name_prefix 38 - 54 characters: + name_prefix + YYmmdd + 3 digit incremental counter private_key: |- - (Required) @@ -55708,6 +61312,86 @@ resources: "name": "net-cidr-overlap", "provider": "${google-beta}" } + - name: subnetwork-reserved-internal-range + manifest: |- + { + "name": "subnetwork-reserved-internal-range", + "network": "${google_compute_network.default.id}", + "provider": "${google-beta}", + "region": "us-central1", + "reserved_internal_range": "networkconnectivity.googleapis.com/${google_network_connectivity_internal_range.reserved.id}" + } + references: + network: google_compute_network.default.id + provider: google-beta + dependencies: + google_compute_network.default: |- + { + "auto_create_subnetworks": false, + "name": "network-reserved-internal-range", + "provider": "${google-beta}" + } + google_network_connectivity_internal_range.reserved: |- + { + "name": "reserved", + "network": "${google_compute_network.default.id}", + "peering": "FOR_SELF", + "prefix_length": 24, + "provider": "${google-beta}", + "target_cidr_range": [ + "10.0.0.0/8" + ], + "usage": "FOR_VPC" + } + - name: subnetwork-reserved-secondary-range + manifest: |- + { + "name": "subnetwork-reserved-secondary-range", + "network": "${google_compute_network.default.id}", + "provider": "${google-beta}", + "region": "us-central1", + "reserved_internal_range": "networkconnectivity.googleapis.com/${google_network_connectivity_internal_range.reserved.id}", + "secondary_ip_range": [ + { + "range_name": "secondary", + "reserved_internal_range": "networkconnectivity.googleapis.com/${google_network_connectivity_internal_range.reserved_secondary.id}" + } + ] + } + references: + network: google_compute_network.default.id + provider: google-beta + dependencies: + google_compute_network.default: |- + { + "auto_create_subnetworks": false, + "name": "network-reserved-secondary-range", + "provider": "${google-beta}" + } + google_network_connectivity_internal_range.reserved: |- + { + "name": "reserved-primary", + "network": "${google_compute_network.default.id}", + "peering": "FOR_SELF", + "prefix_length": 24, + "provider": "${google-beta}", + "target_cidr_range": [ + "10.0.0.0/8" + ], + "usage": "FOR_VPC" + } + google_network_connectivity_internal_range.reserved_secondary: |- + { + "name": "reserved-secondary", + "network": "${google_compute_network.default.id}", + "peering": "FOR_SELF", + "prefix_length": 16, + "provider": "${google-beta}", + "target_cidr_range": [ + "10.0.0.0/8" + ], + "usage": "FOR_VPC" + } argumentDocs: allow_subnet_cidr_routes_overlap: |- - @@ -55741,11 +61425,12 @@ resources: The internal IPv6 address range that is assigned to this subnetwork. ip_cidr_range: |- - - (Required) + (Optional) The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. + Field is optional when reserved_internal_range is defined, otherwise required. ipv6_access_type: |- - (Optional) @@ -55830,17 +61515,22 @@ resources: purpose: |- - (Optional) - The purpose of the resource. This field can be either PRIVATE_RFC_1918, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). + The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnetwork in a given region with purpose set to GLOBAL_MANAGED_PROXY is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. A subnetwork with purpose set to PRIVATE_SERVICE_CONNECT reserves the subnet for hosting a Private Service Connect published service. A subnetwork with purpose set to PRIVATE_NAT is used as source range for Private NAT gateways. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. - If unspecified, the purpose defaults to PRIVATE_RFC_1918. + If unspecified, the purpose defaults to PRIVATE. region: |- - (Optional) The GCP region for this subnetwork. + reserved_internal_range: |- + - + (Optional) + The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} role: |- - (Optional) @@ -55863,11 +61553,12 @@ resources: Structure is documented below. secondary_ip_range.ip_cidr_range: |- - - (Required) + (Optional) The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. + Field is optional when reserved_internal_range is defined, otherwise required. secondary_ip_range.range_name: |- - (Required) @@ -55875,6 +61566,11 @@ resources: when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork. + secondary_ip_range.reserved_internal_range: |- + - + (Optional) + The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} self_link: '- The URI of the created resource.' send_secondary_ip_range_if_empty: |- - (Optional) Controls the removal behavior of secondary_ip_range. @@ -55889,6 +61585,9 @@ resources: The stack type for this subnet to identify whether the IPv6 feature is enabled or not. If not specified IPV4_ONLY will be used. Possible values are: IPV4_ONLY, IPV4_IPV6. + subnetwork_id: |- + - + The unique identifier number for the resource. This identifier is defined by the server. update: '- Default is 20 minutes.' importStatements: [] google_compute_subnetwork_iam_policy: @@ -56295,10 +61994,13 @@ resources: (Optional) Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keepalive is - not specified, a default value (610 seconds) will be used. For Global - external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - the maximum allowed value is 1200 seconds. For Global external HTTP(S) - load balancer (classic), this option is not available publicly. + not specified, a default value will be used. For Global + external HTTP(S) load balancer, the default value is 610 seconds, the + minimum allowed value is 5 seconds and the maximum allowed value is 1200 + seconds. For cross-region internal HTTP(S) load balancer, the default + value is 600 seconds, the minimum allowed value is 5 seconds, and the + maximum allowed value is 600 seconds. For Global external HTTP(S) load + balancer (classic), this option is not available publicly. id: '- an identifier for the resource with format projects/{{project}}/global/targetHttpProxies/{{name}}' name: |- - @@ -56664,10 +62366,13 @@ resources: (Optional) Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keepalive is - not specified, a default value (610 seconds) will be used. For Global - external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - the maximum allowed value is 1200 seconds. For Global external HTTP(S) - load balancer (classic), this option is not available publicly. + not specified, a default value will be used. For Global + external HTTP(S) load balancer, the default value is 610 seconds, the + minimum allowed value is 5 seconds and the maximum allowed value is 1200 + seconds. For cross-region internal HTTP(S) load balancer, the default + value is 600 seconds, the minimum allowed value is 5 seconds, and the + maximum allowed value is 600 seconds. For Global external HTTP(S) load + balancer (classic), this option is not available publicly. id: '- an identifier for the resource with format projects/{{project}}/global/targetHttpsProxies/{{name}}' name: |- - @@ -56712,6 +62417,10 @@ resources: INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. ssl_certificates: |- - (Optional) @@ -58082,7 +63791,7 @@ resources: "5xx" ], "override_response_code": 502, - "path": "/*" + "path": "/internal_error.html" } ], "error_service": "${google_compute_backend_bucket.error.id}" @@ -58110,14 +63819,14 @@ resources: "5xx" ], "override_response_code": 404, - "path": "/login" + "path": "/login_error.html" }, { "match_response_codes": [ "503" ], "override_response_code": 502, - "path": "/example" + "path": "/bad_gateway.html" } ], "error_service": "${google_compute_backend_bucket.error.id}" @@ -58135,14 +63844,14 @@ resources: "4xx" ], "override_response_code": 401, - "path": "/register" + "path": "/login.html" } ], "error_service": "${google_compute_backend_bucket.error.id}" } ], "paths": [ - "/*" + "/private/*" ], "service": "${google_compute_backend_service.example.id}" } @@ -58268,7 +63977,7 @@ resources: default_custom_error_response_policy: |- - (Optional, Beta) - defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendService or BackendBucket responds with an error. This policy takes effect at the PathMatcher level and applies only when no policy has been defined for the error code at lower levels like RouteRule and PathRule within this PathMatcher. If an error code does not have a policy defined in defaultCustomErrorResponsePolicy, then a policy defined for the error code in UrlMap.defaultCustomErrorResponsePolicy takes effect. For example, consider a UrlMap with the following configuration: UrlMap.defaultCustomErrorResponsePolicy is configured with policies for 5xx and 4xx errors @@ -58676,7 +64385,7 @@ resources: path_matcher.default_custom_error_response_policy: |- - (Optional, Beta) - defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendService or BackendBucket responds with an error. This policy takes effect at the PathMatcher level and applies only when no policy has been defined for the error code at lower levels like RouteRule and PathRule within this PathMatcher. If an error code does not have a policy defined in defaultCustomErrorResponsePolicy, then a policy defined for the error code in UrlMap.defaultCustomErrorResponsePolicy takes effect. For example, consider a UrlMap with the following configuration: UrlMap.defaultCustomErrorResponsePolicy is configured with policies for 5xx and 4xx errors @@ -58744,7 +64453,7 @@ resources: path_rule.custom_error_response_policy: |- - (Optional, Beta) - customErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + customErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendService or BackendBucket responds with an error. If a policy for an error code is not configured for the PathRule, a policy for the error code configured in pathMatcher.defaultCustomErrorResponsePolicy is applied. If one is not specified in pathMatcher.defaultCustomErrorResponsePolicy, the policy configured in UrlMap.defaultCustomErrorResponsePolicy takes effect. For example, consider a UrlMap with the following configuration: UrlMap.defaultCustomErrorResponsePolicy are configured with policies for 5xx and 4xx errors @@ -59911,7 +65620,7 @@ resources: (Optional) Binary Authorization configuration. Structure is documented below. - fleet.deletion_policy: '- (Optional) Policy to determine what flags to send on delete.' + fleet.deletion_policy: '- (Optional) Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS' fleet.description: |- - (Optional) @@ -59943,6 +65652,11 @@ resources: (Optional) Support for proxy configuration. Structure is documented below. + fleet.security_posture_config: |- + - + (Optional, Deprecated) + Enable/Disable Security Posture API features for the cluster. + Structure is documented below. id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}' kubernetes_version: |- - @@ -60015,6 +65729,11 @@ resources: reconciling: |- - If set, there are currently changes in flight to the cluster. + security_posture_config.vulnerability_mode: |- + - + (Required) + Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. + Possible values are: VULNERABILITY_DISABLED, VULNERABILITY_ENTERPRISE. state: |- - The current state of the cluster. Possible values: @@ -60689,6 +66408,14 @@ resources: ] } ], + "kubelet_config": [ + { + "cpu_cfs_quota": true, + "cpu_cfs_quota_period": "100ms", + "cpu_manager_policy": "none", + "pod_pids_limit": 1024 + } + ], "location": "us-west1", "management": [ { @@ -61253,6 +66980,22 @@ resources: - (Optional) The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST + kubelet_config.cpu_cfs_quota: |- + - + (Optional) + Whether or not to enable CPU CFS quota. Defaults to true. + kubelet_config.cpu_cfs_quota_period: |- + - + (Optional) + Optional. The CPU CFS quota period to use for the node. Defaults to "100ms". + kubelet_config.cpu_manager_policy: |- + - + (Optional) + The CpuManagerPolicy to use for the node. Defaults to "none". + kubelet_config.pod_pids_limit: |- + - + (Optional) + Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset. location: |- - (Required) @@ -61269,6 +67012,10 @@ resources: - (Optional) Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + max_pods_constraint.kubelet_config: |- + - + (Optional) + The kubelet configuration for the node pool. max_pods_constraint.management: |- - (Optional) @@ -62110,6 +67857,8 @@ resources: CGROUP_MODE_UNSPECIFIED: ': CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.' CGROUP_MODE_V1: ': CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.' CGROUP_MODE_V2: ': CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.' + EPHEMERAL_KEY_ENCRYPTION: ': The given node will opt-in for using ephemeral key for encrypting Local SSDs. The Local SSDs will not be able to recover data in case of node crash.' + STANDARD_ENCRYPTION: ': The given node will be encrypted using keys managed by Google infrastructure and the keys wll be deleted when the node is deleted.' additional_pod_ranges_config.pod_range_names: '- (Required) The names of the Pod ranges to add to the cluster.' addons_config: |- - (Optional) The configuration for addons supported by GKE. @@ -62161,6 +67910,12 @@ resources: otherwise nothing will happen. It can only be disabled if the nodes already do not have network policies enabled. Defaults to disabled; set disabled = false to enable. + addons_config.parallelstore_csi_driver_config: |- + - (Optional) The status of the Parallelstore CSI driver addon, + which allows the usage of a Parallelstore instances as volumes. + It is disabled by default for Standard clusters; set enabled = true to enable. + It is enabled by default for Autopilot clusters with version 1.29 or later; set enabled = true to enable it explicitly. + See Enable the Parallelstore CSI driver for more information. addons_config.ray_cluster_logging_config.enabled: |- and ray_cluster_monitoring_config.enabled which control Ray Cluster logging @@ -62178,8 +67933,7 @@ resources: The status of the Stateful HA addon, which provides automatic configurable failover for stateful applications. It is disabled by default for Standard clusters. Set enabled = true to enable. advanced_datapath_observability_config.enable_metrics: '- (Required) Whether or not to enable advanced datapath metrics.' - advanced_datapath_observability_config.enable_relay: '- (Optional) Whether or not Relay is enabled.' - advanced_datapath_observability_config.relay_mode: '- (Optional, Deprecated) Mode used to make Relay available. Deprecated in favor of enable_relay field. Remove this attribute''s configuration as this field will be removed in the next major release and enable_relay will become a required field.' + advanced_datapath_observability_config.enable_relay: '- (Required) Whether or not Relay is enabled.' advanced_machine_features.enable_nested_virtualization: '- (Optional) Defines whether the instance should have nested virtualization enabled. Defaults to false.' advanced_machine_features.threads_per_core: '- (Required) The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.' allow_net_admin: |- @@ -62193,7 +67947,7 @@ resources: authenticator_groups_config.security_group: '- (Required) The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com.' auto_provisioning_defaults.boot_disk_kms_key: '- (Optional) The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption' auto_provisioning_defaults.disk_size: '- (Optional) Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. Defaults to 100' - auto_provisioning_defaults.disk_type: '- (Optional) Type of the disk attached to each node (e.g. ''pd-standard'', ''pd-ssd'' or ''pd-balanced''). Defaults to pd-standard' + auto_provisioning_defaults.disk_type: '- (Optional) Type of the disk attached to each node (e.g. ''pd-standard'', ''pd-ssd'', ''pd-balanced'', or ''hyperdisk-balanced''). Defaults to hyperdisk-balanced if hyperdisk-balanced is supported and pd-balanced is not supported for the machine type; otherwise defaults to pd-balanced.' auto_provisioning_defaults.image_type: '- (Optional) The default image type used by NAP once a new node pool is being created. Please note that according to the official documentation the value must be one of the [COS_CONTAINERD, COS, UBUNTU_CONTAINERD, UBUNTU]. NOTE : COS AND UBUNTU are deprecated as of GKE 1.24' auto_provisioning_defaults.management: '- (Optional) NodeManagement configuration for this NodePool. Structure is documented below.' auto_provisioning_defaults.min_cpu_platform: |- @@ -62264,6 +68018,12 @@ resources: (Required) - Enable Confidential GKE Nodes for this node pool, to enforce encryption of data in-use. containerd_config.private_registry_access_config: '(Optional) - Configuration for private container registries. There are two fields in this config:' + control_plane_endpoints_config: |- + - (Optional) Configuration for all of the cluster's control plane endpoints. + Structure is documented below. + control_plane_endpoints_config.dns_endpoint_config: '- (Optional) DNS endpoint configuration.' + control_plane_endpoints_config.dns_endpoint_config.allow_external_traffic: '- (Optional) Controls whether user traffic is allowed over this endpoint. Note that GCP-managed services may still use the endpoint even if this is false.' + control_plane_endpoints_config.dns_endpoint_config.endpoint: '- (Output) The cluster''s DNS endpoint.' cost_management_config: |- - (Optional) Configuration for the Cost Allocation feature. @@ -62289,17 +68049,21 @@ resources: default_snat_status.disabled: '- (Required) Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when defaultSnatStatus is disabled.When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic' delete: '- Default is 40 minutes.' deletion_protection: |- - - (Optional) Whether or not to allow Terraform to destroy - the cluster. Unless this field is set to false in Terraform state, a - terraform destroy or terraform apply that would delete the cluster will fail. + - (Optional) Whether Terraform will be prevented from + destroying the cluster. Deleting this cluster via terraform destroy or + terraform apply will only succeed if this field is false in the Terraform + state. description: '- (Optional) Description of the cluster.' dns_config: |- - (Optional) Configuration for Using Cloud DNS for GKE. Structure is documented below. - dns_config.additive_vpc_scope_dns_domain: '- (Optional, Beta) This will enable Cloud DNS additive VPC scope. Must provide a domain name that is unique within the VPC. For this to work cluster_dns = "CLOUD_DNS" and cluster_dns_scope = "CLUSTER_SCOPE" must both be set as well.' + dns_config.additive_vpc_scope_dns_domain: '- (Optional) This will enable Cloud DNS additive VPC scope. Must provide a domain name that is unique within the VPC. For this to work cluster_dns = "CLOUD_DNS" and cluster_dns_scope = "CLUSTER_SCOPE" must both be set as well.' dns_config.cluster_dns: '- (Optional) Which in-cluster DNS provider should be used. PROVIDER_UNSPECIFIED (default) or PLATFORM_DEFAULT or CLOUD_DNS.' dns_config.cluster_dns_domain: '- (Optional) The suffix used for all cluster service records.' dns_config.cluster_dns_scope: '- (Optional) The scope of access to cluster DNS records. DNS_SCOPE_UNSPECIFIED (default) or CLUSTER_SCOPE or VPC_SCOPE.' + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. enable_autopilot: |- - (Optional) Enable Autopilot for this cluster. Defaults to false. Note that when this option is enabled, certain features of Standard GKE are not available. @@ -62309,7 +68073,7 @@ resources: - (Optional) Whether CiliumClusterWideNetworkPolicy is enabled on this cluster. Defaults to false. enable_fqdn_network_policy: |- - - (Optional, Beta) + - (Optional) Whether FQDN Network Policy is enabled on this cluster. Users who enable this feature for existing Standard clusters must restart the GKE Dataplane V2 anetd DaemonSet after enabling it. See the Enable FQDN Network Policy in an existing cluster for more information. enable_intranode_visibility: |- - (Optional) @@ -62339,6 +68103,11 @@ resources: See the official documentation. enabled: (Required) - Enables private registry config. If set to false, all other fields in this object must not be set. endpoint: '- The IP address of this cluster''s Kubernetes master.' + enterprise_config: |- + - (Optional) + Configuration for [Enterprise edition].(https://cloud.google.com/kubernetes-engine/enterprise/docs/concepts/gke-editions). Structure is documented below. + enterprise_config.0.cluster_tier: '- The effective tier of the cluster.' + enterprise_config.desired_tier: '- (Optional) Sets the tier of the cluster. Available options include STANDARD and ENTERPRISE.' ephemeral_storage_config.local_ssd_count: (Required) - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage. ephemeral_storage_local_ssd_config.local_ssd_count: (Required) - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage. fast_socket.enabled: (Required) - Whether or not the NCCL Fast Socket is enabled @@ -62368,6 +68137,8 @@ resources: guest_accelerator.gpu_sharing_config: (Optional) - Configuration for GPU sharing. Structure is documented below. guest_accelerator.type: (Required) - The accelerator type resource to expose to this instance. E.g. nvidia-tesla-k80. gvnic.enabled: (Required) - Whether or not the Google Virtual NIC (gVNIC) is enabled + hugepages_config.hugepage_size_1g: '- (Optional) Amount of 1G hugepages.' + hugepages_config.hugepage_size_2m: '- (Optional) Amount of 2M hugepages.' id: '- an identifier for the resource with format projects/{{project}}/locations/{{zone}}/clusters/{{name}}' identity_service_config: '- (Optional). Structure is documented below.' identity_service_config.enabled: '- (Optional) Whether to enable the Identity Service component. It is disabled by default. Set enabled=true to enable.' @@ -62424,13 +68195,18 @@ resources: such as "300ms". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration. kubelet_config.cpu_manager_policy: |- - - (Required) The CPU management policy on the node. See + - (Optional) The CPU management policy on the node. See K8S CPU Management Policies. - One of "none" or "static". Defaults to none when kubelet_config is unset. + One of "none" or "static". If unset (or set to the empty string ""), the API will treat the field as if set to "none". + Prior to the 6.4.0 this field was marked as required. The workaround for the required field + is setting the empty string "", which will function identically to not setting this field. + kubelet_config.insecure_kubelet_readonly_port_enabled: '- (Optional) Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE.' + kubelet_config.pod_pids_limit: '- (Optional) Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.' label_fingerprint: '- The fingerprint of the set of labels for this cluster.' linux_node_config.cgroup_mode: |- - (Optional) Possible cgroup modes that can be used. Accepted values are: + linux_node_config.hugepages_config: '- (Optional) Amounts for 2M and 1G hugepages. Structure is documented below.' linux_node_config.sysctls: |- - (Optional) The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. Specified as a map from the key, such as @@ -62507,6 +68283,7 @@ resources: - (Optional) External network that can access Kubernetes master through HTTPS. Must be specified in CIDR notation. master_authorized_networks_config.cidr_blocks.display_name: '- (Optional) Field for users to identify CIDR blocks.' + master_authorized_networks_config.cidr_blocks.private_endpoint_enforcement_enabled: '- (Optional) Whether authorized networks is enforced on the private endpoint or not.' master_authorized_networks_config.gcp_public_cidrs_access_enabled: |- - (Optional) Whether Kubernetes master is accessible via Google Compute Engine Public IPs. @@ -62600,8 +68377,10 @@ resources: node_config.guest_accelerator: |- - (Optional) List of the type and count of accelerator cards attached to the instance. Structure documented below. - To support removal of guest_accelerators in Terraform 0.12 this field is an - Attribute as Block + Note: As of 6.0.0, argument syntax + is no longer supported for this field in favor of block syntax. + To dynamically set a list of guest accelerators, use dynamic blocks. + To set an empty list, use a single guest_accelerator block with count = 0. node_config.gvnic: |- - (Optional) Google Virtual NIC (gVNIC) is a virtual network interface. Installing the gVNIC driver allows for more efficient traffic transmission across the Google network infrastructure. @@ -62623,6 +68402,9 @@ resources: node_config.local_ssd_count: |- - (Optional) The amount of local SSD disks that will be attached to each cluster node. Defaults to 0. + node_config.local_ssd_encryption_mode: |- + - (Optional) Possible Local SSD encryption modes: + Accepted values are: node_config.logging_variant: (Optional) Parameter for specifying the type of logging agent used in a node pool. This will override any cluster-wide default value. Valid values include DEFAULT and MAX_THROUGHPUT. See Increasing logging agent throughput for more information. node_config.machine_type: |- - (Optional) The name of a Google Compute Engine machine type. @@ -62667,6 +68449,7 @@ resources: - (Optional) A boolean that represents whether the underlying node VMs are spot. See the official documentation for more information. Defaults to false. + node_config.storage_pools: '- (Optional) The list of Storage Pools where boot disks are provisioned.' node_config.tags: |- - (Optional) The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for network firewalls. @@ -62698,6 +68481,7 @@ resources: - (Optional) Node pool configs that apply to auto-provisioned node pools in autopilot clusters and node auto-provisioning-enabled clusters. Structure is documented below. + node_pool_auto_config.linux_node_config: (Optional) - Linux system configuration for the cluster's automatically provisioned node pools. Only cgroup_mode field is supported in node_pool_auto_config. Structure is documented below. node_pool_auto_config.network_tags: (Optional) - The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. node_pool_auto_config.node_kubelet_config: |- - (Optional) Kubelet configuration for Autopilot clusters. Currently, only insecure_kubelet_readonly_port_enabled is supported here. @@ -62705,7 +68489,7 @@ resources: node_pool_auto_config.resource_manager_tags: '- (Optional) A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found here. A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. tagKeys/{tag_key_id}=tagValues/{tag_value_id} 2. {org_id}/{tag_key_name}={tag_value_name} 3. {project_id}/{tag_key_name}={tag_value_name}.' node_pool_defaults: '- (Optional) Default NodePool settings for the entire cluster. These settings are overridden if specified on the specific NodePool object. Structure is documented below.' node_pool_defaults.node_config_defaults: (Optional) - Subset of NodeConfig message that has defaults. - node_pool_defaults.node_config_defaults.gcfs_config: (Optional, Beta) The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable image streaming across all the node pools within the cluster. Structure is documented below. + node_pool_defaults.node_config_defaults.gcfs_config: (Optional) The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable image streaming across all the node pools within the cluster. Structure is documented below. node_pool_defaults.node_config_defaults.insecure_kubelet_readonly_port_enabled: '(Optional) Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE.' node_pool_defaults.node_config_defaults.logging_variant: (Optional) The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See Increasing logging agent throughput for more information. node_version: |- @@ -62717,8 +68501,6 @@ resources: when fuzzy versions are used. See the google_container_engine_versions data source's version_prefix field to approximate fuzzy versions in a Terraform-compatible way. To update nodes in other node pools, use the version attribute on the node pool. - none.insecure_kubelet_readonly_port_enabled: '- (Optional) Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE.' - none.pod_pids_limit: '- (Optional) Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.' notification_config: '- (Optional) Configuration for the cluster upgrade notifications feature. Structure is documented below.' notification_config.pubsub: (Required) - The pubsub config for the cluster's upgrade notifications. notification_config.pubsub.enabled: (Required) - Whether or not the notification config is enabled @@ -62828,7 +68610,7 @@ resources: secondary_boot_disks.disk_image: (Required) - Path to disk image to create the secondary boot disk from. After using the gke-disk-image-builder, this argument should be global/images/DISK_IMAGE_NAME. secondary_boot_disks.mode: (Optional) - Mode for how the secondary boot disk is used. An example mode is CONTAINER_IMAGE_CACHE. secret_manager_config: |- - - (Optional, Beta) Configuration for the + - (Optional) Configuration for the SecretManagerConfig feature. Structure is documented below. secret_manager_config.enabled: (Required) - Enable the Secret Manager add-on for this cluster. @@ -62858,6 +68640,9 @@ resources: taint.effect: (Required) Effect for taint. Accepted values are NO_SCHEDULE, PREFER_NO_SCHEDULE, and NO_EXECUTE. taint.key: (Required) Key for taint. taint.value: (Required) Value for taint. + terraform_labels: |- + - + The combination of labels configured directly on the resource and default labels configured on the provider. tpu_ipv4_cidr_block: |- - The IP address range of the Cloud TPUs in this cluster, in CIDR @@ -62984,6 +68769,8 @@ resources: '"NO_RESERVATION"': ': Do not consume from any reserved capacity.' '"SPECIFIC_RESERVATION"': ': Must consume from a specific reservation. Must specify key value fields for specifying the reservations.' '"UNSPECIFIED"': ': Default value. This should not be used.' + EPHEMERAL_KEY_ENCRYPTION: ': The given node will opt-in for using ephemeral key for encrypting Local SSDs. The Local SSDs will not be able to recover data in case of node crash.' + STANDARD_ENCRYPTION: ': The given node will be encrypted using keys managed by Google infrastructure and the keys wll be deleted when the node is deleted.' additional_node_network_configs.network: '- Name of the VPC where the additional interface belongs.' additional_node_network_configs.subnetwork: '- Name of the subnetwork where the additional interface belongs.' additional_pod_network_configs.max_pods_per_node: '- The maximum number of pods per node which use this pod network.' @@ -63012,6 +68799,9 @@ resources: batch_node_count: '- (Optional) Number of blue nodes to drain in a batch.' batch_percentage: '- (Optional) Percentage of the blue pool nodes to drain in a batch.' batch_soak_duration: '- (Optionial) Soak time after each batch gets drained.' + blue_green_settings.local_ssd_encryption_mode: |- + - (Optional) Possible Local SSD encryption modes: + Accepted values are: blue_green_settings.node_pool_soak_duration: |- - (Optional) Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. @@ -65786,6 +71576,43 @@ resources: "topic": "projects//topics/${google_pubsub_topic.actions.name}" } ] + }, + { + "tag_resources": [ + { + "lower_data_risk_to_low": true, + "profile_generations_to_tag": [ + "PROFILE_GENERATION_NEW", + "PROFILE_GENERATION_UPDATE" + ], + "tag_conditions": [ + { + "sensitivity_score": [ + { + "score": "SENSITIVITY_HIGH" + } + ], + "tag": [ + { + "namespaced_value": "123456/environment/prod" + } + ] + }, + { + "sensitivity_score": [ + { + "score": "SENSITIVITY_LOW" + } + ], + "tag": [ + { + "namespaced_value": "123456/environment/test" + } + ] + } + ] + } + ] } ], "inspect_templates": [ @@ -65826,10 +71653,26 @@ resources: ], "parent": "projects/my-project-name" } + google_project_iam_member.tag_role: |- + { + "member": "serviceAccount:service-${data.google_project.project.number}@dlp-api.iam.gserviceaccount.com", + "project": "", + "role": "roles/resourcemanager.tagUser" + } google_pubsub_topic.actions: |- { "name": "fake-topic" } + google_tags_tag_key.tag_key: |- + { + "parent": "${data.google_project.project.id}", + "short_name": "environment" + } + google_tags_tag_value.tag_value: |- + { + "parent": "${google_tags_tag_key.tag_key.id}", + "short_name": "prod" + } - name: org_running manifest: |- { @@ -65947,6 +71790,11 @@ resources: { "cadence": [ { + "inspect_template_modified_cadence": [ + { + "frequency": "UPDATE_FREQUENCY_DAILY" + } + ], "schema_modified_cadence": [ { "frequency": "UPDATE_FREQUENCY_DAILY", @@ -66371,6 +72219,11 @@ resources: (Optional) Publish a message into the Pub/Sub topic. Structure is documented below. + actions.tag_resources: |- + - + (Optional) + Publish a message into the Pub/Sub topic. + Structure is documented below. big_query_target.cadence: |- - (Optional) @@ -66390,6 +72243,11 @@ resources: (Optional) Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table Structure is documented below. + cadence.inspect_template_modified_cadence: |- + - + (Optional) + Governs when to update data profiles when the inspection rules defined by the InspectTemplate change. If not set, changing the template will not cause a data profile to update. + Structure is documented below. cadence.schema_modified_cadence: |- - (Optional) @@ -66692,7 +72550,7 @@ resources: org_config: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. org_config.location: |- - @@ -66751,6 +72609,11 @@ resources: (Optional) The type of events to consider when deciding if the table's schema has been modified and should have the profile updated. Defaults to NEW_COLUMN. Each value may be one of: SCHEMA_NEW_COLUMNS, SCHEMA_REMOVED_COLUMNS. + sensitivity_score.score: |- + - + (Required) + The sensitivity score applied to the resource. + Possible values are: SENSITIVITY_LOW, SENSITIVITY_MODERATE, SENSITIVITY_HIGH. status: |- - (Optional) @@ -66796,6 +72659,34 @@ resources: - (Optional) if unset, this property matches all tables + tag.namespaced_value: |- + - + (Optional) + The namespaced name for the tag value to attach to resources. Must be in the format {parent_id}/{tag_key_short_name}/{short_name}, for example, "123456/environment/prod". + tag_conditions.sensitivity_score: |- + - + (Optional) + Conditions attaching the tag to a resource on its profile having this sensitivity score. + Structure is documented below. + tag_conditions.tag: |- + - + (Optional) + The tag value to attach to resources. + Structure is documented below. + tag_resources.lower_data_risk_to_low: |- + - + (Optional) + Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an IAM deny policy, you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + tag_resources.profile_generations_to_tag: |- + - + (Optional) + The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both PROFILE_GENERATION_NEW and PROFILE_GENERATION_UPDATE. + Each value may be one of: PROFILE_GENERATION_NEW, PROFILE_GENERATION_UPDATE. + tag_resources.tag_conditions: |- + - + (Optional) + The tags to associate with different conditions. + Structure is documented below. targets: |- - (Optional) @@ -69907,6 +75798,134 @@ resources: ], "service": "servicenetworking.googleapis.com" } + - name: existing-mysql + manifest: |- + { + "connection_profile_id": "destination-cp", + "depends_on": [ + "${google_sql_database_instance.destination_csql}" + ], + "display_name": "destination-cp_display", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "mysql": [ + { + "cloud_sql_id": "destination-csql" + } + ] + } + dependencies: + google_sql_database_instance.destination_csql: |- + { + "database_version": "MYSQL_5_7", + "deletion_protection": false, + "name": "destination-csql", + "settings": [ + { + "deletion_protection_enabled": false, + "tier": "db-n1-standard-1" + } + ] + } + - name: existing-psql + manifest: |- + { + "connection_profile_id": "destination-cp", + "depends_on": [ + "${google_sql_database_instance.destination_csql}" + ], + "display_name": "destination-cp_display", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "postgresql": [ + { + "cloud_sql_id": "destination-csql" + } + ] + } + dependencies: + google_sql_database_instance.destination_csql: |- + { + "database_version": "POSTGRES_15", + "deletion_protection": false, + "name": "destination-csql", + "settings": [ + { + "deletion_protection_enabled": false, + "tier": "db-custom-2-13312" + } + ] + } + - name: existing-alloydb + manifest: |- + { + "connection_profile_id": "destination-cp", + "depends_on": [ + "${google_alloydb_cluster.destination_alloydb}", + "${google_alloydb_instance.destination_alloydb_primary}" + ], + "display_name": "destination-cp_display", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "postgresql": [ + { + "alloydb_cluster_id": "destination-alloydb" + } + ] + } + dependencies: + google_alloydb_cluster.destination_alloydb: |- + { + "cluster_id": "destination-alloydb", + "database_version": "POSTGRES_15", + "initial_user": [ + { + "password": "destination-alloydb", + "user": "destination-alloydb" + } + ], + "location": "us-central1", + "network_config": [ + { + "network": "${google_compute_network.default.id}" + } + ] + } + google_alloydb_instance.destination_alloydb_primary: |- + { + "cluster": "${google_alloydb_cluster.destination_alloydb.name}", + "depends_on": [ + "${google_service_networking_connection.vpc_connection}" + ], + "instance_id": "destination-alloydb-primary", + "instance_type": "PRIMARY" + } + google_compute_global_address.private_ip_alloc: |- + { + "address_type": "INTERNAL", + "name": "destination-alloydb", + "network": "${google_compute_network.default.id}", + "prefix_length": 16, + "purpose": "VPC_PEERING" + } + google_compute_network.default: |- + { + "name": "destination-alloydb" + } + google_service_networking_connection.vpc_connection: |- + { + "network": "${google_compute_network.default.id}", + "reserved_peering_ranges": [ + "${google_compute_global_address.private_ip_alloc.name}" + ], + "service": "servicenetworking.googleapis.com" + } argumentDocs: alloydb: |- - @@ -70070,12 +76089,12 @@ resources: If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. mysql.host: |- - - (Required) - Required. The IP or hostname of the source MySQL database. + (Optional) + The IP or hostname of the source MySQL database. mysql.password: |- - - (Required) - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + (Optional) + Input only. The password for the user that Database Migration Service will be using to connect to the database. This field is not returned on request, and the value is encrypted when stored in Database Migration Service. Note: This property is sensitive and will not be displayed in the plan. mysql.password_set: |- @@ -70084,8 +76103,8 @@ resources: Output only. Indicates If this connection profile password is stored. mysql.port: |- - - (Required) - Required. The network port of the source MySQL database. + (Optional) + The network port of the source MySQL database. mysql.ssl: |- - (Optional) @@ -70093,8 +76112,8 @@ resources: Structure is documented below. mysql.username: |- - - (Required) - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + (Optional) + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. name: |- - The name of this connection profile resource in the form of projects/{project}/locations/{location}/connectionProfiles/{connectionProfile}. @@ -70154,22 +76173,26 @@ resources: (Optional) Specifies connection parameters required specifically for PostgreSQL databases. Structure is documented below. + postgresql.alloydb_cluster_id: |- + - + (Optional) + If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. postgresql.cloud_sql_id: |- - (Optional) If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. postgresql.host: |- - - (Required) - Required. The IP or hostname of the source MySQL database. + (Optional) + The IP or hostname of the source MySQL database. postgresql.network_architecture: |- - (Output) Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. postgresql.password: |- - - (Required) - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + (Optional) + Input only. The password for the user that Database Migration Service will be using to connect to the database. This field is not returned on request, and the value is encrypted when stored in Database Migration Service. Note: This property is sensitive and will not be displayed in the plan. postgresql.password_set: |- @@ -70178,8 +76201,8 @@ resources: Output only. Indicates If this connection profile password is stored. postgresql.port: |- - - (Required) - Required. The network port of the source MySQL database. + (Optional) + The network port of the source MySQL database. postgresql.ssl: |- - (Optional) @@ -70187,8 +76210,8 @@ resources: Structure is documented below. postgresql.username: |- - - (Required) - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + (Optional) + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. primary_instance_settings.database_flags: |- - (Optional) @@ -70343,6 +76366,539 @@ resources: and default labels configured on the provider. update: '- Default is 60 minutes.' importStatements: [] + google_database_migration_service_migration_job: + subCategory: DatabaseMigrationService + description: A migration job definition. + name: google_database_migration_service_migration_job + title: "" + examples: + - name: mysqltomysql + manifest: |- + { + "destination": "${google_database_migration_service_connection_profile.destination_cp.name}", + "display_name": "my-migrationid_display", + "dump_flags": [ + { + "dump_flags": [ + { + "name": "max-allowed-packet", + "value": "1073741824" + } + ] + } + ], + "dump_type": "LOGICAL", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "migration_job_id": "my-migrationid", + "performance_config": [ + { + "dump_parallel_level": "MAX" + } + ], + "source": "${google_database_migration_service_connection_profile.source_cp.name}", + "type": "CONTINUOUS", + "vpc_peering_connectivity": [ + { + "vpc": "${google_compute_network.default.id}" + } + ] + } + references: + destination: google_database_migration_service_connection_profile.destination_cp.name + source: google_database_migration_service_connection_profile.source_cp.name + vpc_peering_connectivity.vpc: google_compute_network.default.id + dependencies: + google_compute_network.default: |- + { + "name": "destination-csql" + } + google_database_migration_service_connection_profile.destination_cp: |- + { + "connection_profile_id": "destination-cp", + "depends_on": [ + "${google_sql_database_instance.destination_csql}" + ], + "display_name": "destination-cp_display", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "mysql": [ + { + "cloud_sql_id": "destination-csql" + } + ] + } + google_database_migration_service_connection_profile.source_cp: |- + { + "connection_profile_id": "source-cp", + "depends_on": [ + "${google_sql_user.source_sqldb_user}" + ], + "display_name": "source-cp_display", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "mysql": [ + { + "cloud_sql_id": "source-csql", + "host": "${google_sql_database_instance.source_csql.ip_address.0.ip_address}", + "password": "${google_sql_user.source_sqldb_user.password}", + "port": 3306, + "ssl": [ + { + "ca_certificate": "${google_sql_ssl_cert.source_sql_client_cert.server_ca_cert}", + "client_certificate": "${google_sql_ssl_cert.source_sql_client_cert.cert}", + "client_key": "${google_sql_ssl_cert.source_sql_client_cert.private_key}" + } + ], + "username": "${google_sql_user.source_sqldb_user.name}" + } + ] + } + google_sql_database_instance.destination_csql: |- + { + "database_version": "MYSQL_5_7", + "deletion_protection": false, + "name": "destination-csql", + "settings": [ + { + "deletion_protection_enabled": false, + "tier": "db-n1-standard-1" + } + ] + } + google_sql_database_instance.source_csql: |- + { + "database_version": "MYSQL_5_7", + "deletion_protection": false, + "name": "source-csql", + "settings": [ + { + "deletion_protection_enabled": false, + "tier": "db-n1-standard-1" + } + ] + } + google_sql_ssl_cert.source_sql_client_cert: |- + { + "common_name": "cert", + "depends_on": [ + "${google_sql_database_instance.source_csql}" + ], + "instance": "${google_sql_database_instance.source_csql.name}" + } + google_sql_user.source_sqldb_user: |- + { + "depends_on": [ + "${google_sql_ssl_cert.source_sql_client_cert}" + ], + "instance": "${google_sql_database_instance.source_csql.name}", + "name": "username", + "password": "password" + } + - name: psqltopsql + manifest: |- + { + "destination": "${google_database_migration_service_connection_profile.destination_cp.name}", + "display_name": "my-migrationid_display", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "migration_job_id": "my-migrationid", + "source": "${google_database_migration_service_connection_profile.source_cp.name}", + "static_ip_connectivity": [ + {} + ], + "type": "CONTINUOUS" + } + references: + destination: google_database_migration_service_connection_profile.destination_cp.name + source: google_database_migration_service_connection_profile.source_cp.name + dependencies: + google_database_migration_service_connection_profile.destination_cp: |- + { + "connection_profile_id": "destination-cp", + "depends_on": [ + "${google_sql_database_instance.destination_csql}" + ], + "display_name": "destination-cp_display", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "postgresql": [ + { + "cloud_sql_id": "destination-csql" + } + ] + } + google_database_migration_service_connection_profile.source_cp: |- + { + "connection_profile_id": "source-cp", + "depends_on": [ + "${google_sql_user.source_sqldb_user}" + ], + "display_name": "source-cp_display", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "postgresql": [ + { + "cloud_sql_id": "source-csql", + "host": "${google_sql_database_instance.source_csql.ip_address.0.ip_address}", + "password": "${google_sql_user.source_sqldb_user.password}", + "port": 3306, + "ssl": [ + { + "ca_certificate": "${google_sql_ssl_cert.source_sql_client_cert.server_ca_cert}", + "client_certificate": "${google_sql_ssl_cert.source_sql_client_cert.cert}", + "client_key": "${google_sql_ssl_cert.source_sql_client_cert.private_key}" + } + ], + "username": "${google_sql_user.source_sqldb_user.name}" + } + ] + } + google_sql_database_instance.destination_csql: |- + { + "database_version": "POSTGRES_15", + "deletion_protection": false, + "name": "destination-csql", + "settings": [ + { + "deletion_protection_enabled": false, + "tier": "db-custom-2-13312" + } + ] + } + google_sql_database_instance.source_csql: |- + { + "database_version": "POSTGRES_15", + "deletion_protection": false, + "name": "source-csql", + "settings": [ + { + "deletion_protection_enabled": false, + "tier": "db-custom-2-13312" + } + ] + } + google_sql_ssl_cert.source_sql_client_cert: |- + { + "common_name": "cert", + "depends_on": [ + "${google_sql_database_instance.source_csql}" + ], + "instance": "${google_sql_database_instance.source_csql.name}" + } + google_sql_user.source_sqldb_user: |- + { + "depends_on": [ + "${google_sql_ssl_cert.source_sql_client_cert}" + ], + "instance": "${google_sql_database_instance.source_csql.name}", + "name": "username", + "password": "password" + } + - name: psqltoalloydb + manifest: |- + { + "destination": "${google_database_migration_service_connection_profile.destination_cp.name}", + "display_name": "my-migrationid_display", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "migration_job_id": "my-migrationid", + "source": "${google_database_migration_service_connection_profile.source_cp.name}", + "static_ip_connectivity": [ + {} + ], + "type": "CONTINUOUS" + } + references: + destination: google_database_migration_service_connection_profile.destination_cp.name + source: google_database_migration_service_connection_profile.source_cp.name + dependencies: + google_alloydb_cluster.destination_alloydb: |- + { + "cluster_id": "destination-alloydb", + "database_version": "POSTGRES_15", + "initial_user": [ + { + "password": "destination-alloydb", + "user": "destination-alloydb" + } + ], + "location": "us-central1", + "network_config": [ + { + "network": "${google_compute_network.default.id}" + } + ] + } + google_alloydb_instance.destination_alloydb_primary: |- + { + "cluster": "${google_alloydb_cluster.destination_alloydb.name}", + "depends_on": [ + "${google_service_networking_connection.vpc_connection}" + ], + "instance_id": "destination-alloydb-primary", + "instance_type": "PRIMARY" + } + google_compute_global_address.private_ip_alloc: |- + { + "address_type": "INTERNAL", + "name": "destination-alloydb", + "network": "${google_compute_network.default.id}", + "prefix_length": 16, + "purpose": "VPC_PEERING" + } + google_compute_network.default: |- + { + "name": "destination-alloydb" + } + google_database_migration_service_connection_profile.destination_cp: |- + { + "connection_profile_id": "destination-cp", + "depends_on": [ + "${google_alloydb_cluster.destination_alloydb}", + "${google_alloydb_instance.destination_alloydb_primary}" + ], + "display_name": "destination-cp_display", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "postgresql": [ + { + "alloydb_cluster_id": "destination-alloydb" + } + ] + } + google_database_migration_service_connection_profile.source_cp: |- + { + "connection_profile_id": "source-cp", + "depends_on": [ + "${google_sql_user.source_sqldb_user}" + ], + "display_name": "source-cp_display", + "labels": { + "foo": "bar" + }, + "location": "us-central1", + "postgresql": [ + { + "cloud_sql_id": "source-csql", + "host": "${google_sql_database_instance.source_csql.ip_address.0.ip_address}", + "password": "${google_sql_user.source_sqldb_user.password}", + "port": 3306, + "ssl": [ + { + "ca_certificate": "${google_sql_ssl_cert.source_sql_client_cert.server_ca_cert}", + "client_certificate": "${google_sql_ssl_cert.source_sql_client_cert.cert}", + "client_key": "${google_sql_ssl_cert.source_sql_client_cert.private_key}" + } + ], + "username": "${google_sql_user.source_sqldb_user.name}" + } + ] + } + google_service_networking_connection.vpc_connection: |- + { + "network": "${google_compute_network.default.id}", + "reserved_peering_ranges": [ + "${google_compute_global_address.private_ip_alloc.name}" + ], + "service": "servicenetworking.googleapis.com" + } + google_sql_database_instance.source_csql: |- + { + "database_version": "POSTGRES_15", + "deletion_protection": false, + "name": "source-csql", + "settings": [ + { + "deletion_protection_enabled": false, + "tier": "db-custom-2-13312" + } + ] + } + google_sql_ssl_cert.source_sql_client_cert: |- + { + "common_name": "cert", + "depends_on": [ + "${google_sql_database_instance.source_csql}" + ], + "instance": "${google_sql_database_instance.source_csql.name}" + } + google_sql_user.source_sqldb_user: |- + { + "depends_on": [ + "${google_sql_ssl_cert.source_sql_client_cert}" + ], + "instance": "${google_sql_database_instance.source_csql.name}", + "name": "username", + "password": "password" + } + argumentDocs: + create: '- Default is 60 minutes.' + create_time: |- + - + Output only. The timestamp when the resource was created. A timestamp in RFC3339 UTC 'Zulu' format, accurate to nanoseconds. Example: '2014-10-02T15:01:23.045123456Z'. + delete: '- Default is 60 minutes.' + destination: |- + - + (Required) + The name of the destination connection profile resource in the form of projects/{project}/locations/{location}/connectionProfiles/{destinationConnectionProfile}. + display_name: |- + - + (Optional) + The migration job display name. + dump_flags: |- + - + (Optional) + The initial dump flags. + Structure is documented below. + dump_flags.dump_flags: |- + - + (Optional) + A list of dump flags + Structure is documented below. + dump_flags.dump_flags.name: |- + - + (Optional) + The name of the flag + dump_flags.dump_flags.value: |- + - + (Optional) + The vale of the flag + dump_path: |- + - + (Optional) + The path to the dump file in Google Cloud Storage, + in the format: (gs://[BUCKET_NAME]/[OBJECT_NAME]). + This field and the "dump_flags" field are mutually exclusive. + dump_type: |- + - + (Optional) + The type of the data dump. Supported for MySQL to CloudSQL for MySQL + migrations only. + Possible values are: LOGICAL, PHYSICAL. + effective_labels: for all of the labels present on the resource. + error: |- + - + Output only. The error details in case of state FAILED. + Structure is documented below. + error.code: |- + - + (Output) + The status code, which should be an enum value of google.rpc.Code. + error.details: |- + - + (Output) + A list of messages that carry the error details. + error.message: |- + - + (Output) + Human readable message indicating details about the current status. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/migrationJobs/{{migration_job_id}}' + labels: |- + - + (Optional) + The resource labels for migration job to use to annotate any related underlying resources such as Compute Engine VMs. + location: |- + - + (Optional) + The location where the migration job should reside. + migration_job_id: |- + - + (Required) + The ID of the migration job. + name: |- + - + The name of this migration job resource in the form of projects/{project}/locations/{location}/migrationJobs/{migrationJob}. + performance_config: |- + - + (Optional) + Data dump parallelism settings used by the migration. + Structure is documented below. + performance_config.dump_parallel_level: |- + - + (Optional) + Initial dump parallelism level. + Possible values are: MIN, OPTIMAL, MAX. + phase: |- + - + The current migration job phase. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + reverse_ssh_connectivity: |- + - + (Optional) + The details of the VPC network that the source database is located in. + Structure is documented below. + reverse_ssh_connectivity.vm: |- + - + (Optional) + The name of the virtual machine (Compute Engine) used as the bastion server + for the SSH tunnel. + reverse_ssh_connectivity.vm_ip: |- + - + (Optional) + The IP of the virtual machine (Compute Engine) used as the bastion server + for the SSH tunnel. + reverse_ssh_connectivity.vm_port: |- + - + (Optional) + The forwarding port of the virtual machine (Compute Engine) used as the + bastion server for the SSH tunnel. + reverse_ssh_connectivity.vpc: |- + - + (Optional) + The name of the VPC to peer with the Cloud SQL private network. + source: |- + - + (Required) + The name of the source connection profile resource in the form of projects/{project}/locations/{location}/connectionProfiles/{sourceConnectionProfile}. + state: |- + - + The current migration job state. + static_ip_connectivity: |- + - + (Optional) + If set to an empty object ({}), the source database will allow incoming + connections from the public IP of the destination database. + You can retrieve the public IP of the Cloud SQL instance from the + Cloud SQL console or using Cloud SQL APIs. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + type: |- + - + (Required) + The type of the migration job. + Possible values are: ONE_TIME, CONTINUOUS. + update: '- Default is 60 minutes.' + vpc_peering_connectivity: |- + - + (Optional) + The details of the VPC network that the source database is located in. + Structure is documented below. + vpc_peering_connectivity.vpc: |- + - + (Optional) + The name of the VPC network to peer with the Cloud SQL private network. + importStatements: [] google_database_migration_service_private_connection: subCategory: DatabaseMigrationService description: The PrivateConnection resource is used to establish private connectivity between Database Migration Service and a customer's network. @@ -73607,6 +80163,693 @@ resources: google_dataproc_autoscaling_policy_iam_binding can be used per role. Note that custom roles must be of the format [projects|organizations]/{parent-name}/roles/{role-name}. importStatements: [] + google_dataproc_batch: + subCategory: Dataproc + description: Dataproc Serverless Batches lets you run Spark workloads without requiring you to provision and manage your own Dataproc cluster. + name: google_dataproc_batch + title: "" + examples: + - name: example_batch_spark + manifest: |- + { + "batch_id": "tf-test-batch", + "environment_config": [ + { + "execution_config": [ + { + "network_tags": [ + "tag1" + ], + "subnetwork_uri": "default", + "ttl": "3600s" + } + ] + } + ], + "labels": { + "batch_test": "terraform" + }, + "location": "us-central1", + "runtime_config": [ + { + "properties": { + "spark.dynamicAllocation.enabled": "false", + "spark.executor.instances": "2" + } + } + ], + "spark_batch": [ + { + "args": [ + "10" + ], + "jar_file_uris": [ + "file:///usr/lib/spark/examples/jars/spark-examples.jar" + ], + "main_class": "org.apache.spark.examples.SparkPi" + } + ] + } + - name: example_batch_spark + manifest: |- + { + "batch_id": "dataproc-batch", + "depends_on": [ + "${google_kms_crypto_key_iam_member.crypto_key_member_1}" + ], + "environment_config": [ + { + "execution_config": [ + { + "kms_key": "example-key", + "network_tags": [ + "tag1" + ], + "network_uri": "default", + "service_account": "${data.google_project.project.number}-compute@developer.gserviceaccount.com", + "staging_bucket": "${google_storage_bucket.bucket.name}", + "ttl": "3600s" + } + ], + "peripherals_config": [ + { + "metastore_service": "${google_dataproc_metastore_service.ms.name}", + "spark_history_server_config": [ + { + "dataproc_cluster": "${google_dataproc_cluster.basic.id}" + } + ] + } + ] + } + ], + "labels": { + "batch_test": "terraform" + }, + "location": "us-central1", + "runtime_config": [ + { + "properties": { + "spark.dynamicAllocation.enabled": "false", + "spark.executor.instances": "2" + }, + "version": "2.2" + } + ], + "spark_batch": [ + { + "args": [ + "10" + ], + "jar_file_uris": [ + "file:///usr/lib/spark/examples/jars/spark-examples.jar" + ], + "main_class": "org.apache.spark.examples.SparkPi" + } + ] + } + references: + environment_config.execution_config.staging_bucket: google_storage_bucket.bucket.name + environment_config.peripherals_config.metastore_service: google_dataproc_metastore_service.ms.name + environment_config.peripherals_config.spark_history_server_config.dataproc_cluster: google_dataproc_cluster.basic.id + dependencies: + google_dataproc_cluster.basic: |- + { + "cluster_config": [ + { + "endpoint_config": [ + { + "enable_http_port_access": true + } + ], + "master_config": [ + { + "disk_config": [ + { + "boot_disk_size_gb": 35 + } + ], + "machine_type": "e2-standard-2", + "num_instances": 1 + } + ], + "metastore_config": [ + { + "dataproc_metastore_service": "${google_dataproc_metastore_service.ms.name}" + } + ], + "software_config": [ + { + "override_properties": { + "dataproc:dataproc.allow.zero.workers": "true", + "spark:spark.history.fs.logDirectory": "gs://${google_storage_bucket.bucket.name}/*/spark-job-history" + } + } + ] + } + ], + "name": "dataproc-batch", + "region": "us-central1" + } + google_dataproc_metastore_service.ms: |- + { + "hive_metastore_config": [ + { + "version": "3.1.2" + } + ], + "location": "us-central1", + "maintenance_window": [ + { + "day_of_week": "SUNDAY", + "hour_of_day": 2 + } + ], + "port": 9080, + "service_id": "dataproc-batch", + "tier": "DEVELOPER" + } + google_kms_crypto_key_iam_member.crypto_key_member_1: |- + { + "crypto_key_id": "example-key", + "member": "serviceAccount:service-${data.google_project.project.number}@dataproc-accounts.iam.gserviceaccount.com", + "role": "roles/cloudkms.cryptoKeyEncrypterDecrypter" + } + google_storage_bucket.bucket: |- + { + "force_destroy": true, + "location": "US", + "name": "dataproc-bucket", + "uniform_bucket_level_access": true + } + - name: example_batch_sparsql + manifest: |- + { + "batch_id": "tf-test-batch", + "environment_config": [ + { + "execution_config": [ + { + "subnetwork_uri": "default" + } + ] + } + ], + "location": "us-central1", + "runtime_config": [ + { + "properties": { + "spark.dynamicAllocation.enabled": "false", + "spark.executor.instances": "2" + } + } + ], + "spark_sql_batch": [ + { + "jar_file_uris": [ + "file:///usr/lib/spark/examples/jars/spark-examples.jar" + ], + "query_file_uri": "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql", + "query_variables": { + "name": "value" + } + } + ] + } + - name: example_batch_pyspark + manifest: |- + { + "batch_id": "tf-test-batch", + "environment_config": [ + { + "execution_config": [ + { + "subnetwork_uri": "default" + } + ] + } + ], + "location": "us-central1", + "pyspark_batch": [ + { + "archive_uris": [ + "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked", + "https://storage.googleapis.com/terraform-batches/animals.txt.jar", + "https://storage.googleapis.com/terraform-batches/animals.txt" + ], + "args": [ + "10" + ], + "file_uris": [ + "https://storage.googleapis.com/terraform-batches/people.txt" + ], + "jar_file_uris": [ + "file:///usr/lib/spark/examples/jars/spark-examples.jar" + ], + "main_python_file_uri": "https://storage.googleapis.com/terraform-batches/test_util.py", + "python_file_uris": [ + "gs://dataproc-examples/pyspark/hello-world/hello-world.py" + ] + } + ], + "runtime_config": [ + { + "properties": { + "spark.dynamicAllocation.enabled": "false", + "spark.executor.instances": "2" + } + } + ] + } + - name: example_batch_sparkr + manifest: |- + { + "batch_id": "tf-test-batch", + "environment_config": [ + { + "execution_config": [ + { + "network_tags": [ + "tag1" + ], + "subnetwork_uri": "default", + "ttl": "3600s" + } + ] + } + ], + "labels": { + "batch_test": "terraform" + }, + "location": "us-central1", + "runtime_config": [ + { + "properties": { + "spark.dynamicAllocation.enabled": "false", + "spark.executor.instances": "2" + } + } + ], + "spark_r_batch": [ + { + "args": [ + "https://storage.googleapis.com/terraform-batches/flights.csv" + ], + "main_r_file_uri": "https://storage.googleapis.com/terraform-batches/spark-r-flights.r" + } + ] + } + - name: example_batch_autotuning + manifest: |- + { + "batch_id": "tf-test-batch", + "environment_config": [ + { + "execution_config": [ + { + "subnetwork_uri": "default", + "ttl": "3600s" + } + ] + } + ], + "labels": { + "batch_test": "terraform" + }, + "location": "us-central1", + "runtime_config": [ + { + "autotuning_config": [ + { + "scenarios": [ + "SCALING", + "MEMORY" + ] + } + ], + "cohort": "tf-dataproc-batch-example", + "properties": { + "spark.dynamicAllocation.enabled": "false", + "spark.executor.instances": "2" + }, + "version": "2.2" + } + ], + "spark_batch": [ + { + "args": [ + "10" + ], + "jar_file_uris": [ + "file:///usr/lib/spark/examples/jars/spark-examples.jar" + ], + "main_class": "org.apache.spark.examples.SparkPi" + } + ] + } + argumentDocs: + approximate_usage.accelerator_type: |- + - + (Output) + Accelerator type being used, if any + approximate_usage.milli_accelerator_seconds: |- + - + (Output) + Accelerator usage in (milliAccelerator x seconds) + approximate_usage.milli_dcu_seconds: |- + - + (Output) + DCU (Dataproc Compute Units) usage in (milliDCU x seconds) + approximate_usage.shuffle_storage_gb_seconds: |- + - + (Output) + Shuffle storage usage in (GB x seconds) + autotuning_config.scenarios: |- + - + (Optional) + Optional. Scenarios for which tunings are applied. + Each value may be one of: SCALING, BROADCAST_HASH_JOIN, MEMORY. + batch_id: |- + - + (Optional) + The ID to use for the batch, which will become the final component of the batch's resource name. + This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. + create: '- Default is 10 minutes.' + create_time: |- + - + The time when the batch was created. + creator: |- + - + The email address of the user who created the batch. + current_usage.accelerator_type: |- + - + (Output) + Accelerator type being used, if any. + current_usage.milli_accelerator: |- + - + (Output) + Milli (one-thousandth) accelerator.. + current_usage.milli_dcu: |- + - + (Output) + Milli (one-thousandth) Dataproc Compute Units (DCUs). + current_usage.milli_dcu_premium: |- + - + (Output) + Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier. + current_usage.shuffle_storage_gb: |- + - + (Output) + Shuffle Storage in gigabytes (GB). + current_usage.shuffle_storage_gb_premium: |- + - + (Output) + Shuffle Storage in gigabytes (GB) charged at premium tier. + current_usage.snapshot_time: |- + - + (Output) + The timestamp of the usage snapshot. + delete: '- Default is 5 minutes.' + effective_labels: for all of the labels present on the resource. + environment_config: |- + - + (Optional) + Environment configuration for the batch execution. + Structure is documented below. + environment_config.execution_config: |- + - + (Optional) + Execution configuration for a workload. + Structure is documented below. + environment_config.peripherals_config: |- + - + (Optional) + Peripherals configuration that workload has access to. + Structure is documented below. + execution_config.kms_key: |- + - + (Optional) + The Cloud KMS key to use for encryption. + execution_config.network_tags: |- + - + (Optional) + Tags used for network traffic control. + execution_config.network_uri: |- + - + (Optional) + Network configuration for workload execution. + execution_config.service_account: |- + - + (Optional) + Service account that used to execute workload. + execution_config.staging_bucket: |- + - + (Optional) + A Cloud Storage bucket used to stage workload dependencies, config files, and store + workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, + Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, + and then create and manage project-level, per-location staging and temporary buckets. + This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + execution_config.subnetwork_uri: |- + - + (Optional) + Subnetwork configuration for workload execution. + execution_config.ttl: |- + - + (Optional) + The duration after which the workload will be terminated. + When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing + work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it + exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, + it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. + Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), + the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or + when ttl has been exceeded, whichever occurs first. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/batches/{{batch_id}}' + labels: |- + - + (Optional) + The labels to associate with this batch. + location: |- + - + (Optional) + The location in which the batch will be created in. + name: |- + - + The resource name of the batch. + operation: |- + - + The resource name of the operation associated with this batch. + peripherals_config.metastore_service: |- + - + (Optional) + Resource name of an existing Dataproc Metastore service. + peripherals_config.spark_history_server_config: |- + - + (Optional) + The Spark History Server configuration for the workload. + Structure is documented below. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + pyspark_batch: |- + - + (Optional) + PySpark batch config. + Structure is documented below. + pyspark_batch.archive_uris: |- + - + (Optional) + HCFS URIs of archives to be extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + pyspark_batch.args: |- + - + (Optional) + The arguments to pass to the driver. Do not include arguments that can be set as batch + properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + pyspark_batch.file_uris: |- + - + (Optional) + HCFS URIs of files to be placed in the working directory of each executor. + pyspark_batch.jar_file_uris: |- + - + (Optional) + HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. + pyspark_batch.main_python_file_uri: |- + - + (Optional) + The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file. + pyspark_batch.python_file_uris: |- + - + (Optional) + HCFS file URIs of Python files to pass to the PySpark framework. + Supported file types: .py, .egg, and .zip. + runtime_config: |- + - + (Optional) + Runtime configuration for the batch execution. + Structure is documented below. + runtime_config.autotuning_config: |- + - + (Optional) + Optional. Autotuning configuration of the workload. + Structure is documented below. + runtime_config.cohort: |- + - + (Optional) + Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + runtime_config.container_image: |- + - + (Optional) + Optional custom container image for the job runtime environment. If not specified, a default container image will be used. + runtime_config.effective_properties: |- + - + (Output) + A mapping of property names to values, which are used to configure workload execution. + runtime_config.properties: |- + - + (Optional) + A mapping of property names to values, which are used to configure workload execution. + runtime_config.version: |- + - + (Optional) + Version of the batch runtime. + runtime_info: |- + - + Runtime information about batch execution. + Structure is documented below. + runtime_info.approximate_usage: |- + - + (Output) + Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) + Structure is documented below. + runtime_info.current_usage: |- + - + (Output) + Snapshot of current workload resource usage(see Dataproc Serverless pricing) + Structure is documented below. + runtime_info.diagnostic_output_uri: |- + - + (Output) + A URI pointing to the location of the diagnostics tarball. + runtime_info.endpoints: |- + - + (Output) + Map of remote access endpoints (such as web interfaces and APIs) to their URIs. + runtime_info.output_uri: |- + - + (Output) + A URI pointing to the location of the stdout and stderr of the workload. + spark_batch: |- + - + (Optional) + Spark batch config. + Structure is documented below. + spark_batch.archive_uris: |- + - + (Optional) + HCFS URIs of archives to be extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + spark_batch.args: |- + - + (Optional) + The arguments to pass to the driver. Do not include arguments that can be set as batch + properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + spark_batch.file_uris: |- + - + (Optional) + HCFS URIs of files to be placed in the working directory of each executor. + spark_batch.jar_file_uris: |- + - + (Optional) + HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. + spark_batch.main_class: |- + - + (Optional) + The name of the driver main class. The jar file that contains the class must be in the + classpath or specified in jarFileUris. + spark_batch.main_jar_file_uri: |- + - + (Optional) + The HCFS URI of the jar file that contains the main class. + spark_history_server_config.dataproc_cluster: |- + - + (Optional) + Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload. + spark_r_batch: |- + - + (Optional) + SparkR batch config. + Structure is documented below. + spark_r_batch.archive_uris: |- + - + (Optional) + HCFS URIs of archives to be extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + spark_r_batch.args: |- + - + (Optional) + The arguments to pass to the driver. Do not include arguments that can be set as batch + properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + spark_r_batch.file_uris: |- + - + (Optional) + HCFS URIs of files to be placed in the working directory of each executor. + spark_r_batch.main_r_file_uri: |- + - + (Optional) + The HCFS URI of the main R file to use as the driver. Must be a .R or .r file. + spark_sql_batch: |- + - + (Optional) + Spark SQL batch config. + Structure is documented below. + spark_sql_batch.jar_file_uris: |- + - + (Optional) + HCFS URIs of jar files to be added to the Spark CLASSPATH. + spark_sql_batch.query_file_uri: |- + - + (Optional) + The HCFS URI of the script that contains Spark SQL queries to execute. + spark_sql_batch.query_variables: |- + - + (Optional) + Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + state: |- + - + The state of the batch. For possible values, see the API documentation. + state_history: |- + - + Historical state information for the batch. + Structure is documented below. + state_history.state: |- + - + (Output) + The state of the batch at this point in history. For possible values, see the API documentation. + state_history.state_message: |- + - + (Output) + Details about the state at this point in history. + state_history.state_start_time: |- + - + (Output) + The time when the batch entered the historical state. + state_message: |- + - + Batch state details, such as a failure description if the state is FAILED. + state_time: |- + - + Batch state details, such as a failure description if the state is FAILED. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' + uuid: |- + - + A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch. + importStatements: [] google_dataproc_cluster: subCategory: Dataproc description: Manages a Cloud Dataproc cluster resource. @@ -73790,6 +81033,7 @@ resources: cluster_config.gce_cluster_config: |- (Optional) Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below. + cluster_config.gce_cluster_config.confidential_instance_config: '- (Optional) Confidential Instance Config for clusters using Confidential VMs' cluster_config.gce_cluster_config.internal_ip_only: |- - (Optional) By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all @@ -73956,6 +81200,7 @@ resources: - (Computed) The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster. + enable_confidential_compute: '- (Optional) Defines whether the instance should have confidential compute enabled.' enable_kerberos: '- (Optional) Flag to indicate whether to Kerberize the cluster.' encryption_config.kms_key_name: |- - (Required) The Cloud KMS key name to use for PD disk encryption for @@ -74156,6 +81401,598 @@ resources: google_dataproc_cluster_iam_binding can be used per role. Note that custom roles must be of the format [projects|organizations]/{parent-name}/roles/{role-name}. importStatements: [] + google_dataproc_gdc_application_environment: + subCategory: Dataproc on GDC + description: An ApplicationEnvironment contains shared configuration that may be referenced by multiple SparkApplications. + name: google_dataproc_gdc_application_environment + title: "" + examples: + - name: application-environment + manifest: |- + { + "application_environment_id": "dp-tf-e2e-application-environment-basic", + "location": "us-west2", + "namespace": "default", + "project": "my-project", + "serviceinstance": "do-not-delete-dataproc-gdc-instance" + } + - name: application-environment + manifest: |- + { + "annotations": { + "an_annotation": "annotation_value" + }, + "application_environment_id": "dp-tf-e2e-application-environment", + "display_name": "An application environment", + "labels": { + "test-label": "label-value" + }, + "location": "us-west2", + "namespace": "default", + "project": "my-project", + "serviceinstance": "do-not-delete-dataproc-gdc-instance", + "spark_application_environment_config": [ + { + "default_properties": { + "spark.executor.memory": "4g" + }, + "default_version": "1.2" + } + ] + } + argumentDocs: + annotations: |- + - + (Optional) + The annotations to associate with this application environment. Annotations may be used to store client information, but are not used by the server. + Note: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field effective_annotations for all of the annotations present on the resource. + application_environment_id: |- + - + (Optional) + The id of the application environment + create: '- Default is 20 minutes.' + create_time: |- + - + The timestamp when the resource was created. + delete: '- Default is 20 minutes.' + display_name: |- + - + (Optional) + User-provided human-readable name to be used in user interfaces. + effective_annotations: |- + - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/serviceInstances/{{serviceinstance}}/applicationEnvironments/{{application_environment_id}}' + labels: |- + - + (Optional) + The labels to associate with this application environment. Labels may be used for filtering and billing tracking. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + The location of the application environment + name: |- + - + Identifier. The name of the application environment. Format: projects/{project}/locations/{location}/serviceInstances/{service_instance}/applicationEnvironments/{application_environment_id} + namespace: |- + - + (Optional) + The name of the namespace in which to create this ApplicationEnvironment. This namespace must already exist in the cluster + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + serviceinstance: |- + - + (Required) + The id of the service instance to which this application environment belongs. + spark_application_environment_config: |- + - + (Optional) + Represents the SparkApplicationEnvironmentConfig. + Structure is documented below. + spark_application_environment_config.default_properties: |- + - + (Optional) + A map of default Spark properties to apply to workloads in this application environment. These defaults may be overridden by per-application properties. + spark_application_environment_config.default_version: |- + - + (Optional) + The default Dataproc version to use for applications submitted to this application environment + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + uid: |- + - + System generated unique identifier for this application environment, formatted as UUID4. + update: '- Default is 20 minutes.' + update_time: |- + - + The timestamp when the resource was most recently updated. + importStatements: [] + google_dataproc_gdc_service_instance: + subCategory: Dataproc on GDC + description: A service instance is an instance of the Dataproc operator running on a GDC cluster. + name: google_dataproc_gdc_service_instance + title: "" + examples: + - name: service-instance + manifest: |- + { + "display_name": "A service instance", + "gdce_cluster": [ + { + "gdce_cluster": "projects/gdce-cluster-monitoring/locations/us-west2/clusters/gdce-prism-prober-ord106" + } + ], + "labels": { + "test-label": "label-value" + }, + "location": "us-west2", + "project": "my-project", + "service_account": "dataprocgdc-cep-workflows@gdce-cluster-monitoring.iam.gserviceaccount.com", + "service_instance_id": "tf-e2e-service-instance" + } + argumentDocs: + create: '- Default is 20 minutes.' + create_time: |- + - + The timestamp when the resource was created. + delete: '- Default is 20 minutes.' + display_name: |- + - + (Optional) + User-provided human-readable name to be used in user interfaces. + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + effective_service_account: |- + - + Effective service account associated with ServiceInstance. This will be the service_account if specified. Otherwise, it will be an automatically created per-resource P4SA that also automatically has Fleet Workload. Identity bindings applied. + gdce_cluster: |- + - + (Optional) + Gdce cluster information. + Structure is documented below. + gdce_cluster.gdce_cluster: |- + - + (Required) + Gdce cluster resource id. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/serviceInstances/{{service_instance_id}}' + labels: |- + - + (Optional) + The labels to associate with this service instance. Labels may be used for filtering and billing tracking. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + Location of the resource. + name: |- + - + Identifier. The name of the service instance. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + reconciling: |- + - + Whether the service instance is currently reconciling. True if the current state of the resource does not match the intended state, and the system is working to reconcile them, whether or not the change was user initiated. + requested_state: |- + - + The intended state to which the service instance is reconciling. Possible values: + service_account: |- + - + (Optional) + Requested service account to associate with ServiceInstance. + service_instance_id: |- + - + (Required) + Id of the service instance. + spark_service_instance_config: |- + - + (Optional) + Spark-specific service instance configuration. + state: |- + - + The current state. Possible values: + state_message: |- + - + A message explaining the current state. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + uid: |- + - + System generated unique identifier for this service instance, formatted as UUID4. + update: '- Default is 20 minutes.' + update_time: |- + - + The timestamp when the resource was most recently updated. + importStatements: [] + google_dataproc_gdc_spark_application: + subCategory: Dataproc on GDC + description: A Spark application is a single Spark workload run on a GDC cluster. + name: google_dataproc_gdc_spark_application + title: "" + examples: + - name: spark-application + manifest: |- + { + "location": "us-west2", + "namespace": "default", + "project": "my-project", + "serviceinstance": "do-not-delete-dataproc-gdc-instance", + "spark_application_config": [ + { + "args": [ + "10000" + ], + "jar_file_uris": [ + "file:///usr/lib/spark/examples/jars/spark-examples.jar" + ], + "main_class": "org.apache.spark.examples.SparkPi" + } + ], + "spark_application_id": "tf-e2e-spark-app-basic" + } + - name: spark-application + manifest: |- + { + "annotations": { + "an_annotation": "annotation_value" + }, + "application_environment": "${google_dataproc_gdc_application_environment.app_env.name}", + "labels": { + "test-label": "label-value" + }, + "location": "us-west2", + "namespace": "default", + "project": "my-project", + "properties": { + "spark.executor.instances": "2" + }, + "serviceinstance": "do-not-delete-dataproc-gdc-instance", + "spark_application_config": [ + { + "archive_uris": [ + "file://usr/lib/spark/examples/spark-examples.jar" + ], + "file_uris": [ + "file:///usr/lib/spark/examples/jars/spark-examples.jar" + ], + "jar_file_uris": [ + "file:///usr/lib/spark/examples/jars/spark-examples.jar" + ], + "main_jar_file_uri": "file:///usr/lib/spark/examples/jars/spark-examples.jar" + } + ], + "spark_application_id": "tf-e2e-spark-app", + "version": "1.2" + } + references: + application_environment: google_dataproc_gdc_application_environment.app_env.name + dependencies: + google_dataproc_gdc_application_environment.app_env: |- + { + "application_environment_id": "tf-e2e-spark-app-env", + "location": "us-west2", + "namespace": "default", + "project": "my-project", + "serviceinstance": "do-not-delete-dataproc-gdc-instance" + } + - name: spark-application + manifest: |- + { + "dependency_images": [ + "gcr.io/some/image" + ], + "display_name": "A Pyspark application for a Terraform create test", + "location": "us-west2", + "namespace": "default", + "project": "my-project", + "pyspark_application_config": [ + { + "archive_uris": [ + "file://usr/lib/spark/examples/spark-examples.jar" + ], + "args": [ + "10" + ], + "file_uris": [ + "file://usr/lib/spark/examples/spark-examples.jar" + ], + "jar_file_uris": [ + "file:///usr/lib/spark/examples/jars/spark-examples.jar" + ], + "main_python_file_uri": "gs://goog-dataproc-initialization-actions-us-west2/conda/test_conda.py", + "python_file_uris": [ + "gs://goog-dataproc-initialization-actions-us-west2/conda/get-sys-exec.py" + ] + } + ], + "serviceinstance": "do-not-delete-dataproc-gdc-instance", + "spark_application_id": "tf-e2e-pyspark-app" + } + - name: spark-application + manifest: |- + { + "display_name": "A SparkR application for a Terraform create test", + "location": "us-west2", + "namespace": "default", + "project": "my-project", + "serviceinstance": "do-not-delete-dataproc-gdc-instance", + "spark_application_id": "tf-e2e-sparkr-app", + "spark_r_application_config": [ + { + "archive_uris": [ + "file://usr/lib/spark/examples/spark-examples.jar" + ], + "args": [ + "10" + ], + "file_uris": [ + "file://usr/lib/spark/examples/spark-examples.jar" + ], + "main_r_file_uri": "gs://some-bucket/something.R" + } + ] + } + - name: spark-application + manifest: |- + { + "display_name": "A SparkSql application for a Terraform create test", + "location": "us-west2", + "namespace": "default", + "project": "my-project", + "serviceinstance": "do-not-delete-dataproc-gdc-instance", + "spark_application_id": "tf-e2e-sparksql-app", + "spark_sql_application_config": [ + { + "jar_file_uris": [ + "file:///usr/lib/spark/examples/jars/spark-examples.jar" + ], + "query_list": [ + { + "queries": [ + "show tables;" + ] + } + ], + "script_variables": { + "MY_VAR": "1" + } + } + ] + } + - name: spark-application + manifest: |- + { + "display_name": "A SparkSql application for a Terraform create test", + "location": "us-west2", + "namespace": "default", + "project": "my-project", + "serviceinstance": "do-not-delete-dataproc-gdc-instance", + "spark_application_id": "tf-e2e-sparksql-app", + "spark_sql_application_config": [ + { + "jar_file_uris": [ + "file:///usr/lib/spark/examples/jars/spark-examples.jar" + ], + "query_file_uri": "gs://some-bucket/something.sql", + "script_variables": { + "MY_VAR": "1" + } + } + ] + } + argumentDocs: + annotations: |- + - + (Optional) + The annotations to associate with this application. Annotations may be used to store client information, but are not used by the server. + Note: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field effective_annotations for all of the annotations present on the resource. + application_environment: |- + - + (Optional) + An ApplicationEnvironment from which to inherit configuration properties. + create: '- Default is 20 minutes.' + create_time: |- + - + The timestamp when the resource was created. + delete: '- Default is 20 minutes.' + dependency_images: |- + - + (Optional) + List of container image uris for additional file dependencies. Dependent files are sequentially copied from each image. If a file with the same name exists in 2 images then the file from later image is used. + display_name: |- + - + (Optional) + User-provided human-readable name to be used in user interfaces. + effective_annotations: |- + - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/serviceInstances/{{serviceinstance}}/sparkApplications/{{spark_application_id}}' + labels: |- + - + (Optional) + The labels to associate with this application. Labels may be used for filtering and billing tracking. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + The location of the spark application. + monitoring_endpoint: |- + - + URL for a monitoring UI for this application (for eventual Spark PHS/UI support) Out of scope for private GA + name: |- + - + Identifier. The name of the application. Format: projects/{project}/locations/{location}/serviceInstances/{service_instance}/sparkApplications/{application} + namespace: |- + - + (Optional) + The Kubernetes namespace in which to create the application. This namespace must already exist on the cluster. + output_uri: |- + - + An HCFS URI pointing to the location of stdout and stdout of the application Mainly useful for Pantheon and gcloud Not in scope for private GA + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + properties: |- + - + (Optional) + application-specific properties. + pyspark_application_config: |- + - + (Optional) + Represents the PySparkApplicationConfig. + Structure is documented below. + pyspark_application_config.archive_uris: |- + - + (Optional) + HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + pyspark_application_config.args: |- + - + (Optional) + The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + pyspark_application_config.file_uris: |- + - + (Optional) + HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + pyspark_application_config.jar_file_uris: |- + - + (Optional) + HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. + pyspark_application_config.main_python_file_uri: |- + - + (Required) + The HCFS URI of the main Python file to use as the driver. Must be a .py file. + pyspark_application_config.python_file_uris: |- + - + (Optional) + HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. + query_list.queries: |- + - + (Required) + The queries to run. + reconciling: |- + - + Whether the application is currently reconciling. True if the current state of the resource does not match the intended state, and the system is working to reconcile them, whether or not the change was user initiated. + serviceinstance: |- + - + (Required) + The id of the service instance to which this spark application belongs. + spark_application_config: |- + - + (Optional) + Represents the SparkApplicationConfig. + Structure is documented below. + spark_application_config.archive_uris: |- + - + (Optional) + HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + spark_application_config.args: |- + - + (Optional) + The arguments to pass to the driver. Do not include arguments that can be set as application properties, such as --conf, since a collision can occur that causes an incorrect application submission. + spark_application_config.file_uris: |- + - + (Optional) + HCFS URIs of files to be placed in the working directory of each executor. + spark_application_config.jar_file_uris: |- + - + (Optional) + HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. + spark_application_config.main_class: |- + - + (Optional) + The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris. + spark_application_config.main_jar_file_uri: |- + - + (Optional) + The HCFS URI of the jar file that contains the main class. + spark_application_id: |- + - + (Required) + The id of the application + spark_r_application_config: |- + - + (Optional) + Represents the SparkRApplicationConfig. + Structure is documented below. + spark_r_application_config.archive_uris: |- + - + (Optional) + HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + spark_r_application_config.args: |- + - + (Optional) + The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + spark_r_application_config.file_uris: |- + - + (Optional) + HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + spark_r_application_config.main_r_file_uri: |- + - + (Required) + The HCFS URI of the main R file to use as the driver. Must be a .R file. + spark_sql_application_config: |- + - + (Optional) + Represents the SparkRApplicationConfig. + Structure is documented below. + spark_sql_application_config.jar_file_uris: |- + - + (Optional) + HCFS URIs of jar files to be added to the Spark CLASSPATH. + spark_sql_application_config.query_file_uri: |- + - + (Optional) + The HCFS URI of the script that contains SQL queries. + spark_sql_application_config.query_list: |- + - + (Optional) + Represents a list of queries. + Structure is documented below. + spark_sql_application_config.script_variables: |- + - + (Optional) + Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + state: |- + - + The current state. + Possible values: + state_message: |- + - + A message explaining the current state. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + uid: |- + - + System generated unique identifier for this application, formatted as UUID4. + update: '- Default is 20 minutes.' + update_time: |- + - + The timestamp when the resource was most recently updated. + version: |- + - + (Optional) + The Dataproc version of this application. + importStatements: [] google_dataproc_job: subCategory: Dataproc description: Manages a job resource within a Dataproc cluster. @@ -74410,7 +82247,7 @@ resources: "rank": "1" } ], - "federation_id": "", + "federation_id": "metastore-fed", "location": "us-central1", "version": "3.1.2" } @@ -74426,7 +82263,7 @@ resources: } ], "location": "us-central1", - "service_id": "", + "service_id": "metastore-service", "tier": "DEVELOPER" } - name: default @@ -74444,7 +82281,7 @@ resources: "rank": "1" } ], - "federation_id": "", + "federation_id": "metastore-fed", "location": "us-central1", "version": "3.1.2" } @@ -74460,7 +82297,7 @@ resources: } ], "location": "us-central1", - "service_id": "", + "service_id": "metastore-service", "tier": "DEVELOPER" } argumentDocs: @@ -74600,6 +82437,29 @@ resources: "service_id": "metastore-srv", "tier": "DEVELOPER" } + - name: default + manifest: |- + { + "deletion_protection": true, + "hive_metastore_config": [ + { + "version": "2.3.6" + } + ], + "labels": { + "env": "test" + }, + "location": "us-central1", + "maintenance_window": [ + { + "day_of_week": "SUNDAY", + "hour_of_day": 2 + } + ], + "port": 9080, + "service_id": "metastore-srv", + "tier": "DEVELOPER" + } - name: default manifest: |- { @@ -74650,7 +82510,8 @@ resources: ] } ], - "service_id": "metastore-srv" + "service_id": "metastore-srv", + "tier": "DEVELOPER" } references: network_config.consumers.subnetwork: google_compute_subnetwork.subnet.id @@ -74789,6 +82650,7 @@ resources: } ], "location": "us-central1", + "provider": "${google-beta}", "scaling_config": [ { "autoscaling_config": [ @@ -74805,6 +82667,8 @@ resources: ], "service_id": "test-service" } + references: + provider: google-beta - name: test_resource manifest: |- { @@ -74815,6 +82679,7 @@ resources: } ], "location": "us-central1", + "provider": "${google-beta}", "scaling_config": [ { "autoscaling_config": [ @@ -74832,6 +82697,8 @@ resources: ], "service_id": "test-service" } + references: + provider: google-beta - name: test_resource manifest: |- { @@ -74842,6 +82709,7 @@ resources: } ], "location": "us-central1", + "provider": "${google-beta}", "scaling_config": [ { "autoscaling_config": [ @@ -74858,6 +82726,8 @@ resources: ], "service_id": "test-service" } + references: + provider: google-beta - name: test_resource manifest: |- { @@ -74924,6 +82794,10 @@ resources: Default value is MYSQL. Possible values are: MYSQL, SPANNER. delete: '- Default is 60 minutes.' + deletion_protection: |- + - + (Optional) + Indicates if the dataproc metastore should be protected against accidental deletions. effective_labels: |- - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. @@ -75340,7 +83214,7 @@ resources: config.labels: |- - (Optional) - The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a template. + The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. config.lifecycle_config: |- - (Optional) @@ -75376,7 +83250,7 @@ resources: config.staging_bucket: |- - (Optional) - A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets). config.temp_bucket: |- - (Optional) @@ -75424,7 +83298,7 @@ resources: gce_cluster_config.metadata: |- - (Optional) - The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + The Compute Engine metadata entries to add to all instances (see About VM metadata). gce_cluster_config.network: |- - (Optional) @@ -75460,7 +83334,7 @@ resources: gce_cluster_config.tags: |- - (Optional) - The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + The Compute Engine tags to add to all instances (see Manage tags for resources). gce_cluster_config.zone: |- - (Optional) @@ -75541,7 +83415,7 @@ resources: initialization_actions.execution_timeout: |- - (Optional) - Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + Amount of time executable has to complete. Default is 10 minutes (see JSON representation of JSON Mapping - Language Guide (proto 3)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. jobs: |- - (Required) @@ -75597,18 +83471,18 @@ resources: lifecycle_config.auto_delete_time: |- - (Optional) - The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + The time when cluster will be auto-deleted (see JSON representation of JSON Mapping - Language Guide (proto 3)). lifecycle_config.auto_delete_ttl: |- - (Optional) - The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3)). lifecycle_config.idle_delete_ttl: |- - (Optional) - The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3). lifecycle_config.idle_start_time: |- - - Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of JSON Mapping - Language Guide (proto 3)). location: |- - (Required) @@ -75657,7 +83531,7 @@ resources: master_config.min_cpu_platform: |- - (Optional) - Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform. master_config.num_instances: |- - (Optional) @@ -76007,75 +83881,6 @@ resources: (Required) Required. List of allowed values for the parameter. importStatements: [] - google_datastore_index: - subCategory: Datastore - description: Describes a composite index for Firestore in Datastore Mode. - name: google_datastore_index - title: "" - examples: - - name: default - manifest: |- - { - "depends_on": [ - "${google_firestore_database.database}" - ], - "kind": "foo", - "properties": [ - { - "direction": "ASCENDING", - "name": "property_a" - }, - { - "direction": "ASCENDING", - "name": "property_b" - } - ] - } - dependencies: - google_firestore_database.database: |- - { - "delete_protection_state": "DELETE_PROTECTION_DISABLED", - "deletion_policy": "DELETE", - "location_id": "nam5", - "name": "(default)", - "project": "my-project-name", - "type": "DATASTORE_MODE" - } - argumentDocs: - ancestor: |- - - - (Optional) - Policy for including ancestors in the index. - Default value is NONE. - Possible values are: NONE, ALL_ANCESTORS. - create: '- Default is 20 minutes.' - delete: '- Default is 20 minutes.' - id: '- an identifier for the resource with format projects/{{project}}/indexes/{{index_id}}' - index_id: |- - - - The index id. - kind: |- - - - (Required) - The entity kind which the index applies to. - project: |- - - (Optional) The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - properties: |- - - - (Optional) - An ordered list of properties to index on. - Structure is documented below. - properties.direction: |- - - - (Required) - The direction the index should optimize for sorting. - Possible values are: ASCENDING, DESCENDING. - properties.name: |- - - - (Required) - The property name to index. - importStatements: [] google_datastream_connection_profile: subCategory: Datastream description: A set of reusable connection configurations to be used as a source or destination for a stream. @@ -76149,7 +83954,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "POSTGRES_14", - "deletion_protection": "true", + "deletion_protection": true, "name": "my-instance", "region": "us-central1", "settings": [ @@ -76243,7 +84048,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "POSTGRES_14", - "deletion_protection": "true", + "deletion_protection": true, "name": "my-instance", "region": "us-central1", "settings": [ @@ -76314,7 +84119,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "SQLSERVER_2019_STANDARD", - "deletion_protection": "true", + "deletion_protection": true, "name": "sql-server", "region": "us-central1", "root_password": "root-password", @@ -77053,7 +84858,7 @@ resources: "database": "postgres", "hostname": "hostname", "password": "pass", - "port": 3306, + "port": 5432, "username": "user" } ] @@ -77233,6 +85038,9 @@ resources: } ] } + ], + "transaction_logs": [ + {} ] } ] @@ -77279,7 +85087,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "SQLSERVER_2019_STANDARD", - "deletion_protection": "true", + "deletion_protection": true, "name": "sql-server", "region": "us-central1", "root_password": "root-password", @@ -77319,7 +85127,7 @@ resources: - name: default manifest: |- { - "backfill_all": [ + "backfill_none": [ {} ], "destination_config": [ @@ -77327,81 +85135,95 @@ resources: "bigquery_destination_config": [ { "data_freshness": "900s", - "single_target_dataset": [ + "source_hierarchy_datasets": [ { - "dataset_id": "${google_bigquery_dataset.postgres.id}" + "dataset_template": [ + { + "location": "us-central1" + } + ] } ] } ], - "destination_connection_profile": "${google_datastream_connection_profile.destination_connection_profile2.id}" + "destination_connection_profile": "${google_datastream_connection_profile.destination.id}" } ], - "display_name": "postgres to bigQuery", + "display_name": "SQL Server to BigQuery", "location": "us-central1", "source_config": [ { - "mysql_source_config": [ - {} - ], - "source_connection_profile": "${google_datastream_connection_profile.source_connection_profile.id}" + "source_connection_profile": "${google_datastream_connection_profile.source.id}", + "sql_server_source_config": [ + { + "change_tables": [ + {} + ], + "include_objects": [ + { + "schemas": [ + { + "schema": "schema", + "tables": [ + { + "table": "table" + } + ] + } + ] + } + ] + } + ] } ], - "stream_id": "postgres-bigquery" + "stream_id": "stream" } references: - destination_config.bigquery_destination_config.single_target_dataset.dataset_id: google_bigquery_dataset.postgres.id - destination_config.destination_connection_profile: google_datastream_connection_profile.destination_connection_profile2.id - source_config.source_connection_profile: google_datastream_connection_profile.source_connection_profile.id + destination_config.destination_connection_profile: google_datastream_connection_profile.destination.id + source_config.source_connection_profile: google_datastream_connection_profile.source.id dependencies: - google_bigquery_dataset.postgres: |- - { - "dataset_id": "postgres", - "description": "Database of postgres", - "friendly_name": "postgres", - "location": "us-central1" - } - google_datastream_connection_profile.destination_connection_profile2: |- + google_datastream_connection_profile.destination: |- { "bigquery_profile": [ {} ], - "connection_profile_id": "dest-profile", - "display_name": "Connection profile", + "connection_profile_id": "destination-profile", + "display_name": "BigQuery Destination", "location": "us-central1" } - google_datastream_connection_profile.source_connection_profile: |- + google_datastream_connection_profile.source: |- { "connection_profile_id": "source-profile", - "display_name": "Source connection profile", + "display_name": "SQL Server Source", "location": "us-central1", - "mysql_profile": [ + "sql_server_profile": [ { + "database": "${google_sql_database.db.name}", "hostname": "${google_sql_database_instance.instance.public_ip_address}", "password": "${google_sql_user.user.password}", + "port": 1433, "username": "${google_sql_user.user.name}" } ] } google_sql_database.db: |- { + "depends_on": [ + "${google_sql_user.user}" + ], "instance": "${google_sql_database_instance.instance.name}", "name": "db" } google_sql_database_instance.instance: |- { - "database_version": "MYSQL_8_0", - "deletion_protection": false, - "name": "instance-name", + "database_version": "SQLSERVER_2019_STANDARD", + "deletion_protection": true, + "name": "sql-server", "region": "us-central1", + "root_password": "root-password", "settings": [ { - "backup_configuration": [ - { - "binary_log_enabled": true, - "enabled": true - } - ], "ip_configuration": [ { "authorized_networks": [ @@ -77423,21 +85245,15 @@ resources: ] } ], - "tier": "db-f1-micro" + "tier": "db-custom-2-4096" } ] } google_sql_user.user: |- { - "host": "%", "instance": "${google_sql_database_instance.instance.name}", - "name": "my-user", - "password": "${random_password.pwd.result}" - } - random_password.pwd: |- - { - "length": 16, - "special": false + "name": "user", + "password": "password" } - name: default manifest: |- @@ -77445,18 +85261,15 @@ resources: "backfill_none": [ {} ], - "depends_on": [ - "${google_kms_crypto_key_iam_member.bigquery_key_user}" - ], "destination_config": [ { "bigquery_destination_config": [ { + "data_freshness": "900s", "source_hierarchy_datasets": [ { "dataset_template": [ { - "kms_key_name": "bigquery-kms-name", "location": "us-central1" } ] @@ -77464,72 +85277,84 @@ resources: ] } ], - "destination_connection_profile": "${google_datastream_connection_profile.destination_connection_profile.id}" + "destination_connection_profile": "${google_datastream_connection_profile.destination.id}" } ], - "display_name": "my stream", + "display_name": "MySQL to BigQuery", "location": "us-central1", "source_config": [ { "mysql_source_config": [ - {} + { + "gtid": [ + {} + ], + "include_objects": [ + { + "schemas": [ + { + "schema": "schema", + "tables": [ + { + "table": "table" + } + ] + } + ] + } + ] + } ], - "source_connection_profile": "${google_datastream_connection_profile.source_connection_profile.id}" + "source_connection_profile": "${google_datastream_connection_profile.source.id}" } ], - "stream_id": "my-stream" + "stream_id": "\u003c%= ctx[:vars]['stream_id'] %\u003e" } references: - destination_config.destination_connection_profile: google_datastream_connection_profile.destination_connection_profile.id - source_config.source_connection_profile: google_datastream_connection_profile.source_connection_profile.id + destination_config.destination_connection_profile: google_datastream_connection_profile.destination.id + source_config.source_connection_profile: google_datastream_connection_profile.source.id dependencies: - google_datastream_connection_profile.destination_connection_profile: |- + google_datastream_connection_profile.destination: |- { "bigquery_profile": [ {} ], - "connection_profile_id": "destination-profile", - "display_name": "Connection profile", + "connection_profile_id": "\u003c%= ctx[:vars]['destination_connection_profile_id'] %\u003e", + "display_name": "BigQuery Destination", "location": "us-central1" } - google_datastream_connection_profile.source_connection_profile: |- + google_datastream_connection_profile.source: |- { - "connection_profile_id": "source-profile", - "display_name": "Source connection profile", + "connection_profile_id": "\u003c%= ctx[:vars]['source_connection_profile_id'] %\u003e", + "display_name": "MySQL Source", "location": "us-central1", "mysql_profile": [ { + "database": "${google_sql_database.db.name}", "hostname": "${google_sql_database_instance.instance.public_ip_address}", "password": "${google_sql_user.user.password}", + "port": 1433, "username": "${google_sql_user.user.name}" } ] } - google_kms_crypto_key_iam_member.bigquery_key_user: |- - { - "crypto_key_id": "bigquery-kms-name", - "member": "serviceAccount:${data.google_bigquery_default_service_account.bq_sa.email}", - "role": "roles/cloudkms.cryptoKeyEncrypterDecrypter" - } google_sql_database.db: |- { + "depends_on": [ + "${google_sql_user.user}" + ], "instance": "${google_sql_database_instance.instance.name}", - "name": "db" + "name": "\u003c%= ctx[:vars]['database_name'] %\u003e" } google_sql_database_instance.instance: |- { "database_version": "MYSQL_8_0", - "deletion_protection": true, - "name": "my-instance", + "deletion_protection": "\u003c%= ctx[:vars]['deletion_protection'] %\u003e", + "name": "\u003c%= ctx[:vars]['mysql_name'] %\u003e", "region": "us-central1", + "root_password": "\u003c%= ctx[:vars]['mysql_root_password'] %\u003e", "settings": [ { - "backup_configuration": [ - { - "binary_log_enabled": true, - "enabled": true - } - ], "ip_configuration": [ { "authorized_networks": [ @@ -77551,50 +85376,38 @@ resources: ] } ], - "tier": "db-f1-micro" + "tier": "db-custom-2-4096" } ] } google_sql_user.user: |- { - "host": "%", "instance": "${google_sql_database_instance.instance.name}", - "name": "user", - "password": "${random_password.pwd.result}" - } - random_password.pwd: |- - { - "length": 16, - "special": false + "name": "\u003c%= ctx[:vars]['database_user'] %\u003e", + "password": "\u003c%= ctx[:vars]['database_password'] %\u003e" } - name: default manifest: |- { - "backfill_none": [ + "backfill_all": [ {} ], "destination_config": [ { "bigquery_destination_config": [ { - "append_only": [ - {} - ], - "source_hierarchy_datasets": [ + "data_freshness": "900s", + "single_target_dataset": [ { - "dataset_template": [ - { - "location": "us-central1" - } - ] + "dataset_id": "${google_bigquery_dataset.postgres.id}" } ] } ], - "destination_connection_profile": "${google_datastream_connection_profile.destination_connection_profile.id}" + "destination_connection_profile": "${google_datastream_connection_profile.destination_connection_profile2.id}" } ], - "display_name": "my stream", + "display_name": "postgres to bigQuery", "location": "us-central1", "source_config": [ { @@ -77604,18 +85417,275 @@ resources: "source_connection_profile": "${google_datastream_connection_profile.source_connection_profile.id}" } ], - "stream_id": "my-stream" + "stream_id": "postgres-bigquery" } references: - destination_config.destination_connection_profile: google_datastream_connection_profile.destination_connection_profile.id + destination_config.bigquery_destination_config.single_target_dataset.dataset_id: google_bigquery_dataset.postgres.id + destination_config.destination_connection_profile: google_datastream_connection_profile.destination_connection_profile2.id source_config.source_connection_profile: google_datastream_connection_profile.source_connection_profile.id dependencies: - google_datastream_connection_profile.destination_connection_profile: |- + google_bigquery_dataset.postgres: |- + { + "dataset_id": "postgres", + "description": "Database of postgres", + "friendly_name": "postgres", + "location": "us-central1" + } + google_datastream_connection_profile.destination_connection_profile2: |- { "bigquery_profile": [ {} ], - "connection_profile_id": "destination-profile", + "connection_profile_id": "dest-profile", + "display_name": "Connection profile", + "location": "us-central1" + } + google_datastream_connection_profile.source_connection_profile: |- + { + "connection_profile_id": "source-profile", + "display_name": "Source connection profile", + "location": "us-central1", + "mysql_profile": [ + { + "hostname": "${google_sql_database_instance.instance.public_ip_address}", + "password": "${google_sql_user.user.password}", + "username": "${google_sql_user.user.name}" + } + ] + } + google_sql_database.db: |- + { + "instance": "${google_sql_database_instance.instance.name}", + "name": "db" + } + google_sql_database_instance.instance: |- + { + "database_version": "MYSQL_8_0", + "deletion_protection": false, + "name": "instance-name", + "region": "us-central1", + "settings": [ + { + "backup_configuration": [ + { + "binary_log_enabled": true, + "enabled": true + } + ], + "ip_configuration": [ + { + "authorized_networks": [ + { + "value": "34.71.242.81" + }, + { + "value": "34.72.28.29" + }, + { + "value": "34.67.6.157" + }, + { + "value": "34.67.234.134" + }, + { + "value": "34.72.239.218" + } + ] + } + ], + "tier": "db-f1-micro" + } + ] + } + google_sql_user.user: |- + { + "host": "%", + "instance": "${google_sql_database_instance.instance.name}", + "name": "my-user", + "password": "${random_password.pwd.result}" + } + random_password.pwd: |- + { + "length": 16, + "special": false + } + - name: default + manifest: |- + { + "backfill_none": [ + {} + ], + "depends_on": [ + "${google_kms_crypto_key_iam_member.bigquery_key_user}" + ], + "destination_config": [ + { + "bigquery_destination_config": [ + { + "source_hierarchy_datasets": [ + { + "dataset_template": [ + { + "kms_key_name": "bigquery-kms-name", + "location": "us-central1" + } + ] + } + ] + } + ], + "destination_connection_profile": "${google_datastream_connection_profile.destination_connection_profile.id}" + } + ], + "display_name": "my stream", + "location": "us-central1", + "source_config": [ + { + "mysql_source_config": [ + {} + ], + "source_connection_profile": "${google_datastream_connection_profile.source_connection_profile.id}" + } + ], + "stream_id": "my-stream" + } + references: + destination_config.destination_connection_profile: google_datastream_connection_profile.destination_connection_profile.id + source_config.source_connection_profile: google_datastream_connection_profile.source_connection_profile.id + dependencies: + google_datastream_connection_profile.destination_connection_profile: |- + { + "bigquery_profile": [ + {} + ], + "connection_profile_id": "destination-profile", + "display_name": "Connection profile", + "location": "us-central1" + } + google_datastream_connection_profile.source_connection_profile: |- + { + "connection_profile_id": "source-profile", + "display_name": "Source connection profile", + "location": "us-central1", + "mysql_profile": [ + { + "hostname": "${google_sql_database_instance.instance.public_ip_address}", + "password": "${google_sql_user.user.password}", + "username": "${google_sql_user.user.name}" + } + ] + } + google_kms_crypto_key_iam_member.bigquery_key_user: |- + { + "crypto_key_id": "bigquery-kms-name", + "member": "serviceAccount:${data.google_bigquery_default_service_account.bq_sa.email}", + "role": "roles/cloudkms.cryptoKeyEncrypterDecrypter" + } + google_sql_database.db: |- + { + "instance": "${google_sql_database_instance.instance.name}", + "name": "db" + } + google_sql_database_instance.instance: |- + { + "database_version": "MYSQL_8_0", + "deletion_protection": true, + "name": "my-instance", + "region": "us-central1", + "settings": [ + { + "backup_configuration": [ + { + "binary_log_enabled": true, + "enabled": true + } + ], + "ip_configuration": [ + { + "authorized_networks": [ + { + "value": "34.71.242.81" + }, + { + "value": "34.72.28.29" + }, + { + "value": "34.67.6.157" + }, + { + "value": "34.67.234.134" + }, + { + "value": "34.72.239.218" + } + ] + } + ], + "tier": "db-f1-micro" + } + ] + } + google_sql_user.user: |- + { + "host": "%", + "instance": "${google_sql_database_instance.instance.name}", + "name": "user", + "password": "${random_password.pwd.result}" + } + random_password.pwd: |- + { + "length": 16, + "special": false + } + - name: default + manifest: |- + { + "backfill_none": [ + {} + ], + "destination_config": [ + { + "bigquery_destination_config": [ + { + "append_only": [ + {} + ], + "source_hierarchy_datasets": [ + { + "dataset_template": [ + { + "location": "us-central1" + } + ] + } + ] + } + ], + "destination_connection_profile": "${google_datastream_connection_profile.destination_connection_profile.id}" + } + ], + "display_name": "my stream", + "location": "us-central1", + "source_config": [ + { + "mysql_source_config": [ + {} + ], + "source_connection_profile": "${google_datastream_connection_profile.source_connection_profile.id}" + } + ], + "stream_id": "my-stream" + } + references: + destination_config.destination_connection_profile: google_datastream_connection_profile.destination_connection_profile.id + source_config.source_connection_profile: google_datastream_connection_profile.source_connection_profile.id + dependencies: + google_datastream_connection_profile.destination_connection_profile: |- + { + "bigquery_profile": [ + {} + ], + "connection_profile_id": "destination-profile", "display_name": "Connection profile", "location": "us-central1" } @@ -77991,11 +86061,19 @@ resources: (Optional) Tables in the database. Structure is documented below. + mysql_source_config.binary_log_position: |- + - + (Optional) + CDC reader reads from binary logs replication cdc method. mysql_source_config.exclude_objects: |- - (Optional) MySQL objects to exclude from the stream. Structure is documented below. + mysql_source_config.gtid: |- + - + (Optional) + CDC reader reads from gtid based replication. mysql_source_config.include_objects: |- - (Optional) @@ -78254,7 +86332,11 @@ resources: (Optional) If supplied, every created dataset will have its name prefixed by the provided value. The prefix and name will be separated by an underscore. i.e. _. - source_hierarchy_datasets.dataset_template.desired_state: '- (Optional) Desired state of the Stream. Set this field to RUNNING to start the stream, and PAUSED to pause the stream.' + source_hierarchy_datasets.dataset_template.desired_state: |- + - (Optional) Desired state of the Stream. Set this field to RUNNING to start the stream, + NOT_STARTED to create the stream without starting and PAUSED to pause + the stream from a RUNNING state. + Possible values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED source_hierarchy_datasets.dataset_template.kms_key_name: |- - (Optional) @@ -78290,6 +86372,10 @@ resources: (Optional) Tables in the database. Structure is documented below. + sql_server_source_config.change_tables: |- + - + (Optional) + CDC reader reads from change tables. sql_server_source_config.exclude_objects: |- - (Optional) @@ -78308,6 +86394,10 @@ resources: - (Optional) Max concurrent CDC tasks. + sql_server_source_config.transaction_logs: |- + - + (Optional) + CDC reader reads from transaction logs. state: |- - The state of the stream. @@ -78483,6 +86573,698 @@ resources: Structure is documented below. update: '- Default is 60 minutes.' importStatements: [] + google_developer_connect_connection: + subCategory: Developer Connect + description: A connection for GitHub, GitHub Enterprise, GitLab, and GitLab Enterprise. + name: google_developer_connect_connection + title: "" + examples: + - name: my-connection + manifest: |- + { + "connection_id": "tf-test-connection-new", + "depends_on": [ + "${google_project_iam_member.devconnect-secret}" + ], + "github_config": [ + { + "github_app": "FIREBASE" + } + ], + "location": "us-central1", + "provider": "${google-beta}" + } + references: + provider: google-beta + dependencies: + google_project_iam_member.devconnect-secret: |- + { + "member": "${google_project_service_identity.devconnect-p4sa.member}", + "project": "my-project-name", + "provider": "${google-beta}", + "role": "roles/secretmanager.admin" + } + google_project_service_identity.devconnect-p4sa: |- + { + "provider": "${google-beta}", + "service": "developerconnect.googleapis.com" + } + - name: my-connection + manifest: |- + { + "connection_id": "tf-test-connection-cred", + "github_config": [ + { + "authorizer_credential": [ + { + "oauth_token_secret_version": "projects/your-project/secrets/your-secret-id/versions/latest" + } + ], + "github_app": "DEVELOPER_CONNECT" + } + ], + "location": "us-central1" + } + - name: my-connection + manifest: |- + { + "connection_id": "my-connection", + "github_config": [ + { + "app_installation_id": 123123, + "authorizer_credential": [ + { + "oauth_token_secret_version": "${google_secret_manager_secret_version.github-token-secret-version.id}" + } + ], + "github_app": "DEVELOPER_CONNECT" + } + ], + "location": "us-central1" + } + references: + github_config.authorizer_credential.oauth_token_secret_version: google_secret_manager_secret_version.github-token-secret-version.id + dependencies: + google_project_service_identity.devconnect-p4sa: |- + { + "service": "developerconnect.googleapis.com" + } + google_secret_manager_secret.github-token-secret: |- + { + "replication": [ + { + "auto": [ + {} + ] + } + ], + "secret_id": "github-token-secret" + } + google_secret_manager_secret_iam_policy.policy: |- + { + "policy_data": "${data.google_iam_policy.p4sa-secretAccessor.policy_data}", + "secret_id": "${google_secret_manager_secret.github-token-secret.secret_id}" + } + google_secret_manager_secret_version.github-token-secret-version: |- + { + "secret": "${google_secret_manager_secret.github-token-secret.id}", + "secret_data": "${file(\"my-github-token.txt\")}" + } + - name: my-connection + manifest: |- + { + "connection_id": "tf-test-connection", + "github_config": [ + { + "authorizer_credential": [ + { + "oauth_token_secret_version": "projects/devconnect-terraform-creds/secrets/tf-test-do-not-change-github-oauthtoken-e0b9e7/versions/1" + } + ], + "github_app": "DEVELOPER_CONNECT" + } + ], + "location": "us-central1" + } + - name: my-connection + manifest: |- + { + "connection_id": "my-connection", + "github_config": [ + { + "app_installation_id": 123123, + "authorizer_credential": [ + { + "oauth_token_secret_version": "${google_secret_manager_secret_version.github-token-secret-version.id}" + } + ], + "github_app": "DEVELOPER_CONNECT" + } + ], + "location": "us-central1" + } + references: + github_config.authorizer_credential.oauth_token_secret_version: google_secret_manager_secret_version.github-token-secret-version.id + dependencies: + google_secret_manager_secret.github-token-secret: |- + { + "replication": [ + { + "auto": [ + {} + ] + } + ], + "secret_id": "github-token-secret" + } + google_secret_manager_secret_iam_policy.policy: |- + { + "policy_data": "${data.google_iam_policy.p4sa-secretAccessor.policy_data}", + "secret_id": "${google_secret_manager_secret.github-token-secret.secret_id}" + } + google_secret_manager_secret_version.github-token-secret-version: |- + { + "secret": "${google_secret_manager_secret.github-token-secret.id}", + "secret_data": "${file(\"my-github-token.txt\")}" + } + - name: my-connection + manifest: |- + { + "connection_id": "tf-test-connection", + "github_enterprise_config": [ + { + "app_id": 864434, + "app_installation_id": 837537, + "host_uri": "https://ghe.proctor-staging-test.com", + "private_key_secret_version": "projects/devconnect-terraform-creds/secrets/tf-test-ghe-do-not-change-ghe-private-key-f522d2/versions/latest", + "webhook_secret_secret_version": "projects/devconnect-terraform-creds/secrets/tf-test-ghe-do-not-change-ghe-webhook-secret-3c806f/versions/latest" + } + ], + "location": "us-central1" + } + - name: my-connection + manifest: |- + { + "connection_id": "my-connection", + "depends_on": [ + "${google_secret_manager_secret_iam_policy.policy-pk}", + "${google_secret_manager_secret_iam_policy.policy-whs}" + ], + "github_enterprise_config": [ + { + "app_id": 100, + "app_installation_id": 123123, + "host_uri": "https://ghe.com", + "private_key_secret_version": "${google_secret_manager_secret_version.private-key-secret-version.id}", + "webhook_secret_secret_version": "${google_secret_manager_secret_version.webhook-secret-secret-version.id}" + } + ], + "location": "us-central1" + } + references: + github_enterprise_config.private_key_secret_version: google_secret_manager_secret_version.private-key-secret-version.id + github_enterprise_config.webhook_secret_secret_version: google_secret_manager_secret_version.webhook-secret-secret-version.id + dependencies: + google_secret_manager_secret.private-key-secret: |- + { + "replication": [ + { + "auto": [ + {} + ] + } + ], + "secret_id": "ghe-pk-secret" + } + google_secret_manager_secret.webhook-secret-secret: |- + { + "replication": [ + { + "auto": [ + {} + ] + } + ], + "secret_id": "ghe-token-secret" + } + google_secret_manager_secret_iam_policy.policy-pk: |- + { + "policy_data": "${data.google_iam_policy.p4sa-secretAccessor.policy_data}", + "secret_id": "${google_secret_manager_secret.private-key-secret.secret_id}" + } + google_secret_manager_secret_iam_policy.policy-whs: |- + { + "policy_data": "${data.google_iam_policy.p4sa-secretAccessor.policy_data}", + "secret_id": "${google_secret_manager_secret.webhook-secret-secret.secret_id}" + } + google_secret_manager_secret_version.private-key-secret-version: |- + { + "secret": "${google_secret_manager_secret.private-key-secret.id}", + "secret_data": "${file(\"private-key.pem\")}" + } + google_secret_manager_secret_version.webhook-secret-secret-version: |- + { + "secret": "${google_secret_manager_secret.webhook-secret-secret.id}", + "secret_data": "\u003cwebhook-secret-data\u003e" + } + - name: my-connection + manifest: |- + { + "connection_id": "tf-test-connection", + "gitlab_config": [ + { + "authorizer_credential": [ + { + "user_token_secret_version": "projects/devconnect-terraform-creds/secrets/gitlab-auth-cred/versions/latest" + } + ], + "read_authorizer_credential": [ + { + "user_token_secret_version": "projects/devconnect-terraform-creds/secrets/gitlab-read-cred/versions/latest" + } + ], + "webhook_secret_secret_version": "projects/devconnect-terraform-creds/secrets/gitlab-webhook/versions/latest" + } + ], + "location": "us-central1" + } + - name: my-connection + manifest: |- + { + "connection_id": "tf-test-connection", + "gitlab_enterprise_config": [ + { + "authorizer_credential": [ + { + "user_token_secret_version": "projects/devconnect-terraform-creds/secrets/gitlab-enterprise-auth-cred/versions/latest" + } + ], + "host_uri": "https://gle-us-central1.gcb-test.com", + "read_authorizer_credential": [ + { + "user_token_secret_version": "projects/devconnect-terraform-creds/secrets/gitlab-enterprise-read-cred/versions/latest" + } + ], + "webhook_secret_secret_version": "projects/devconnect-terraform-creds/secrets/gitlab-enterprise-webhook/versions/latest" + } + ], + "location": "us-central1" + } + argumentDocs: + annotations: |- + - + (Optional) + Optional. Allows clients to store small amounts of arbitrary data. + Note: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field effective_annotations for all of the annotations present on the resource. + authorizer_credential.oauth_token_secret_version: |- + - + (Required) + Required. A SecretManager resource containing the OAuth token that authorizes + the connection. Format: projects/*/secrets/*/versions/*. + authorizer_credential.user_token_secret_version: |- + - + (Required) + Required. A SecretManager resource containing the user token that authorizes + the Developer Connect connection. Format: + projects/*/secrets/*/versions/*. + authorizer_credential.username: |- + - + (Output) + Output only. The username associated with this token. + connection_id: |- + - + (Required) + Required. Id of the requesting object + If auto-generating Id server-side, remove this field and + connection_id from the method_signature of Create RPC + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. [Output only] Create timestamp + crypto_key_config: |- + - + (Optional) + The crypto key configuration. This field is used by the Customer-managed + encryption keys (CMEK) feature. + Structure is documented below. + crypto_key_config.key_reference: |- + - + (Required) + Required. The name of the key which is used to encrypt/decrypt customer data. For key + in Cloud KMS, the key should be in the format of + projects/*/locations/*/keyRings/*/cryptoKeys/*. + delete: '- Default is 20 minutes.' + delete_time: |- + - + Output only. [Output only] Delete timestamp + disabled: |- + - + (Optional) + Optional. If disabled is set to true, functionality is disabled for this connection. + Repository based API methods and webhooks processing for repositories in + this connection will be disabled. + effective_annotations: |- + - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + etag: |- + - + (Optional) + Optional. This checksum is computed by the server based on the value of other + fields, and may be sent on update and delete requests to ensure the + client has an up-to-date value before proceeding. + github_config: |- + - + (Optional) + Configuration for connections to github.com. + Structure is documented below. + github_config.app_installation_id: |- + - + (Optional) + Optional. GitHub App installation id. + github_config.authorizer_credential: |- + - + (Optional) + Represents an OAuth token of the account that authorized the Connection, + and associated metadata. + Structure is documented below. + github_config.github_app: |- + - + (Required) + Required. Immutable. The GitHub Application that was installed to the GitHub user or + organization. + Possible values: + GIT_HUB_APP_UNSPECIFIED + DEVELOPER_CONNECT + FIREBASE + github_config.installation_uri: |- + - + (Output) + Output only. The URI to navigate to in order to manage the installation associated + with this GitHubConfig. + github_enterprise_config: |- + - + (Optional) + Configuration for connections to an instance of GitHub Enterprise. + Structure is documented below. + github_enterprise_config.app_id: |- + - + (Optional) + Optional. ID of the GitHub App created from the manifest. + github_enterprise_config.app_installation_id: |- + - + (Optional) + Optional. ID of the installation of the GitHub App. + github_enterprise_config.app_slug: |- + - + (Output) + Output only. The URL-friendly name of the GitHub App. + github_enterprise_config.host_uri: |- + - + (Required) + Required. The URI of the GitHub Enterprise host this connection is for. + github_enterprise_config.installation_uri: |- + - + (Output) + Output only. The URI to navigate to in order to manage the installation associated + with this GitHubEnterpriseConfig. + github_enterprise_config.private_key_secret_version: |- + - + (Optional) + Optional. SecretManager resource containing the private key of the GitHub App, + formatted as projects/*/secrets/*/versions/*. + github_enterprise_config.server_version: |- + - + (Output) + Output only. GitHub Enterprise version installed at the host_uri. + github_enterprise_config.service_directory_config: |- + - + (Optional) + ServiceDirectoryConfig represents Service Directory configuration for a + connection. + Structure is documented below. + github_enterprise_config.ssl_ca_certificate: |- + - + (Optional) + Optional. SSL certificate to use for requests to GitHub Enterprise. + github_enterprise_config.webhook_secret_secret_version: |- + - + (Optional) + Optional. SecretManager resource containing the webhook secret of the GitHub App, + formatted as projects/*/secrets/*/versions/*. + gitlab_config: |- + - + (Optional) + Configuration for connections to gitlab.com. + Structure is documented below. + gitlab_config.authorizer_credential: |- + - + (Required) + Represents a personal access token that authorized the Connection, + and associated metadata. + Structure is documented below. + gitlab_config.read_authorizer_credential: |- + - + (Required) + Represents a personal access token that authorized the Connection, + and associated metadata. + Structure is documented below. + gitlab_config.webhook_secret_secret_version: |- + - + (Required) + Required. Immutable. SecretManager resource containing the webhook secret of a GitLab project, + formatted as projects/*/secrets/*/versions/*. This is used to validate + webhooks. + gitlab_enterprise_config: |- + - + (Optional) + Configuration for connections to an instance of GitLab Enterprise. + Structure is documented below. + gitlab_enterprise_config.authorizer_credential: |- + - + (Required) + Represents a personal access token that authorized the Connection, + and associated metadata. + Structure is documented below. + gitlab_enterprise_config.host_uri: |- + - + (Required) + Required. The URI of the GitLab Enterprise host this connection is for. + gitlab_enterprise_config.read_authorizer_credential: |- + - + (Required) + Represents a personal access token that authorized the Connection, + and associated metadata. + Structure is documented below. + gitlab_enterprise_config.server_version: |- + - + (Output) + Output only. Version of the GitLab Enterprise server running on the host_uri. + gitlab_enterprise_config.service_directory_config: |- + - + (Optional) + ServiceDirectoryConfig represents Service Directory configuration for a + connection. + Structure is documented below. + gitlab_enterprise_config.ssl_ca_certificate: |- + - + (Optional) + Optional. SSL Certificate Authority certificate to use for requests to GitLab + Enterprise instance. + gitlab_enterprise_config.webhook_secret_secret_version: |- + - + (Required) + Required. Immutable. SecretManager resource containing the webhook secret of a GitLab project, + formatted as projects/*/secrets/*/versions/*. This is used to validate + webhooks. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/connections/{{connection_id}}' + installation_state: |- + - + Describes stage and necessary actions to be taken by the + user to complete the installation. Used for GitHub and GitHub Enterprise + based connections. + Structure is documented below. + installation_state.action_uri: |- + - + (Optional) + Output only. Link to follow for next action. Empty string if the installation is already + complete. + installation_state.message: |- + - + (Optional) + Output only. Message of what the user should do next to continue the installation. + Empty string if the installation is already complete. + installation_state.stage: |- + - + (Output) + Output only. Current step of the installation process. + Possible values: + STAGE_UNSPECIFIED + PENDING_CREATE_APP + PENDING_USER_OAUTH + PENDING_INSTALL_APP + COMPLETE + labels: |- + - + (Optional) + Optional. Labels as key value pairs + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. + name: |- + - + Identifier. The resource name of the connection, in the format + projects/{project}/locations/{location}/connections/{connection_id}. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + read_authorizer_credential.user_token_secret_version: |- + - + (Required) + Required. A SecretManager resource containing the user token that authorizes + the Developer Connect connection. Format: + projects/*/secrets/*/versions/*. + read_authorizer_credential.username: |- + - + (Output) + Output only. The username associated with this token. + reconciling: |- + - + Output only. Set to true when the connection is being set up or updated in the + background. + service_directory_config.service: |- + - + (Required) + Required. The Service Directory service name. + Format: + projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + uid: |- + - + Output only. A system-assigned unique identifier for a the GitRepositoryLink. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. [Output only] Update timestamp + importStatements: [] + google_developer_connect_git_repository_link: + subCategory: Developer Connect + description: A git repository link to a parent connection. + name: google_developer_connect_git_repository_link + title: "" + examples: + - name: my-repository + manifest: |- + { + "git_repository_link_id": "my-repo", + "location": "us-central1", + "parent_connection": "${google_developer_connect_connection.my-connection.connection_id}", + "remote_uri": "https://github.com/myuser/myrepo.git" + } + references: + parent_connection: google_developer_connect_connection.my-connection.connection_id + dependencies: + google_developer_connect_connection.my-connection: |- + { + "connection_id": "my-connection", + "github_config": [ + { + "app_installation_id": 123123, + "authorizer_credential": [ + { + "oauth_token_secret_version": "${google_secret_manager_secret_version.github-token-secret-version.id}" + } + ], + "github_app": "DEVELOPER_CONNECT" + } + ], + "location": "us-central1" + } + google_secret_manager_secret.github-token-secret: |- + { + "replication": [ + { + "auto": [ + {} + ] + } + ], + "secret_id": "github-token-secret" + } + google_secret_manager_secret_iam_policy.policy: |- + { + "policy_data": "${data.google_iam_policy.p4sa-secretAccessor.policy_data}", + "secret_id": "${google_secret_manager_secret.github-token-secret.secret_id}" + } + google_secret_manager_secret_version.github-token-secret-version: |- + { + "secret": "${google_secret_manager_secret.github-token-secret.id}", + "secret_data": "${file(\"my-github-token.txt\")}" + } + argumentDocs: + annotations: |- + - + (Optional) + Optional. Allows clients to store small amounts of arbitrary data. + Note: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field effective_annotations for all of the annotations present on the resource. + clone_uri: |- + - + (Required) + Required. Git Clone URI. + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. [Output only] Create timestamp + delete: '- Default is 20 minutes.' + delete_time: |- + - + Output only. [Output only] Delete timestamp + effective_annotations: |- + - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + etag: |- + - + (Optional) + Optional. This checksum is computed by the server based on the value of other + fields, and may be sent on update and delete requests to ensure the + client has an up-to-date value before proceeding. + git_repository_link_id: |- + - + (Required) + Required. The ID to use for the repository, which will become the final component of + the repository's resource name. This ID should be unique in the connection. + Allows alphanumeric characters and any of -._~%!$&'()*+,;=@. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/connections/{{parent_connection}}/gitRepositoryLinks/{{git_repository_link_id}}' + labels: |- + - + (Optional) + Optional. Labels as key value pairs + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type developerconnect.googleapis.com/GitRepositoryLink. + name: |- + - + Identifier. Resource name of the repository, in the format + projects/*/locations/*/connections/*/gitRepositoryLinks/*. + parent_connection: |- + - + (Required) + Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type developerconnect.googleapis.com/GitRepositoryLink. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + reconciling: |- + - + Output only. Set to true when the connection is being set up or updated in the + background. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + uid: |- + - + Output only. A system-assigned unique identifier for a the GitRepositoryLink. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. [Output only] Update timestamp + importStatements: [] google_dialogflow_agent: subCategory: Dialogflow description: A Dialogflow agent is a virtual agent that handles conversations with your end-users. @@ -78600,6 +87382,25 @@ resources: "finish_digit": "#", "max_digits": 1 } + ], + "logging_settings": [ + { + "enable_consent_based_redaction": true, + "enable_interaction_logging": true, + "enable_stackdriver_logging": true + } + ], + "speech_settings": [ + { + "endpointer_sensitivity": 30, + "models": { + "count": "3", + "mass": "1.3kg", + "name": "wrench" + }, + "no_speech_timeout": "3.500s", + "use_timeout_based_endpointing": true + } ] } ], @@ -78665,6 +87466,14 @@ resources: - (Optional) Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: + advanced_settings.logging_settings: |- + - + (Optional) + Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: + advanced_settings.speech_settings: |- + - + (Optional) + Settings for speech to text detection. Exposed at the following levels: audio_export_gcs_destination.uri: |- - (Optional) @@ -78707,7 +87516,7 @@ resources: Indicates if automatic spell correction is enabled in detect intent requests. enable_stackdriver_logging: |- - - (Optional) + (Optional, Deprecated) Determines whether this agent should log conversation queries. git_integration_settings: |- - @@ -78748,6 +87557,18 @@ resources: ~> Note: The first time you are deploying an Agent in your project you must configure location settings. This is a one time step but at the moment you can only configure location settings via the Dialogflow CX console. Another options is to use global location so you don't need to manually configure location settings. + logging_settings.enable_consent_based_redaction: |- + - + (Optional) + Enables consent-based end-user input redaction, if true, a pre-defined session parameter $session.params.conversation-redaction will be used to determine if the utterance should be redacted. + logging_settings.enable_interaction_logging: |- + - + (Optional) + Enables DF Interaction logging. + logging_settings.enable_stackdriver_logging: |- + - + (Optional) + Enables Google Cloud Logging. name: |- - The unique identifier of the agent. @@ -78758,6 +87579,24 @@ resources: - (Optional) Name of the SecuritySettings reference for the agent. Format: projects//locations//securitySettings/. + speech_settings.endpointer_sensitivity: |- + - + (Optional) + Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. + speech_settings.models: |- + - + (Optional) + Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + speech_settings.no_speech_timeout: |- + - + (Optional) + Timeout before detecting no speech. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + speech_settings.use_timeout_based_endpointing: |- + - + (Optional) + Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. speech_to_text_settings: |- - (Optional) @@ -79117,6 +87956,25 @@ resources: "finish_digit": "#", "max_digits": 1 } + ], + "logging_settings": [ + { + "enable_consent_based_redaction": true, + "enable_interaction_logging": true, + "enable_stackdriver_logging": true + } + ], + "speech_settings": [ + { + "endpointer_sensitivity": 30, + "models": { + "count": "3", + "mass": "1.3kg", + "name": "wrench" + }, + "no_speech_timeout": "3.500s", + "use_timeout_based_endpointing": true + } ] } ], @@ -79408,6 +88266,14 @@ resources: - (Optional) Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: + advanced_settings.logging_settings: |- + - + (Optional) + Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: + advanced_settings.speech_settings: |- + - + (Optional) + Settings for speech to text detection. Exposed at the following levels: audio_export_gcs_destination.uri: |- - (Optional) @@ -79492,6 +88358,18 @@ resources: - (Optional) Custom metadata. Dialogflow doesn't impose any structure on this. + logging_settings.enable_consent_based_redaction: |- + - + (Optional) + Enables consent-based end-user input redaction, if true, a pre-defined session parameter $session.params.conversation-redaction will be used to determine if the utterance should be redacted. + logging_settings.enable_interaction_logging: |- + - + (Optional) + Enables DF Interaction logging. + logging_settings.enable_stackdriver_logging: |- + - + (Optional) + Enables Google Cloud Logging. messages.channel: |- - (Optional) @@ -79587,6 +88465,24 @@ resources: - (Optional) The new JSON-encoded value of the parameter. A null value clears the parameter. + speech_settings.endpointer_sensitivity: |- + - + (Optional) + Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. + speech_settings.models: |- + - + (Optional) + Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + speech_settings.no_speech_timeout: |- + - + (Optional) + Timeout before detecting no speech. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + speech_settings.use_timeout_based_endpointing: |- + - + (Optional) + Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. telephony_transfer_call.phone_number: |- - (Required) @@ -81853,6 +90749,7 @@ resources: } google_project.agent_project: |- { + "deletion_policy": "DELETE", "name": "my-project", "org_id": "123456789", "project_id": "my-project" @@ -82021,47 +90918,98 @@ resources: "SOLUTION_TYPE_CHAT" ] } + - name: primary + manifest: |- + { + "chat_engine_config": [ + { + "dialogflow_agent_to_link": "${google_dialogflow_cx_agent.agent.id}" + } + ], + "collection_id": "default_collection", + "common_config": [ + { + "company_name": "test-company" + } + ], + "data_store_ids": [ + "${google_discovery_engine_data_store.test_data_store.data_store_id}" + ], + "display_name": "Chat engine", + "engine_id": "chat-engine-id", + "industry_vertical": "GENERIC", + "location": "${google_discovery_engine_data_store.test_data_store.location}" + } + references: + chat_engine_config.dialogflow_agent_to_link: google_dialogflow_cx_agent.agent.id + data_store_ids: google_discovery_engine_data_store.test_data_store.data_store_id + location: google_discovery_engine_data_store.test_data_store.location + dependencies: + google_dialogflow_cx_agent.agent: |- + { + "default_language_code": "en", + "display_name": "dialogflowcx-agent", + "location": "global", + "time_zone": "America/Los_Angeles" + } + google_discovery_engine_data_store.test_data_store: |- + { + "content_config": "NO_CONTENT", + "data_store_id": "data-store", + "display_name": "Structured datastore", + "industry_vertical": "GENERIC", + "location": "global", + "solution_types": [ + "SOLUTION_TYPE_CHAT" + ] + } argumentDocs: - chat_engine_config: |- - - - (Required) - Configurations for a chat Engine. - Structure is documented below. - chat_engine_config.agent_creation_config: |- - - - (Required) - The configuration to generate the Dialogflow agent that is associated to this Engine. - Structure is documented below. - chat_engine_config.agent_creation_config.business: |- + agent_creation_config.business: |- - (Optional) Name of the company, organization or other entity that the agent represents. Used for knowledge connector LLM prompt and for knowledge search. - chat_engine_config.agent_creation_config.common_config: |- + agent_creation_config.common_config: |- - (Optional) Common config spec that specifies the metadata of the engine. Structure is documented below. - chat_engine_config.agent_creation_config.default_language_code: |- + agent_creation_config.default_language_code: |- - (Required) The default language of the agent as a language tag. See Language Support for a list of the currently supported language codes. - chat_engine_config.agent_creation_config.industry_vertical: |- + agent_creation_config.industry_vertical: |- - (Optional) The industry vertical that the chat engine registers. Vertical on Engine has to match vertical of the DataStore linked to the engine. Default value is GENERIC. Possible values are: GENERIC. - chat_engine_config.agent_creation_config.location: |- + agent_creation_config.location: |- - (Optional) Agent location for Agent creation, currently supported values: global/us/eu, it needs to be the same region as the Chat Engine. - chat_engine_config.agent_creation_config.project: |- + agent_creation_config.project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. - chat_engine_config.agent_creation_config.time_zone: |- + agent_creation_config.time_zone: |- - (Required) The time zone of the agent from the time zone database, e.g., America/New_York, Europe/Paris. + chat_engine_config: |- + - + (Required) + Configurations for a chat Engine. + Structure is documented below. + chat_engine_config.agent_creation_config: |- + - + (Optional) + The configuration to generate the Dialogflow agent that is associated to this Engine. + Exactly one of agent_creation_config or dialogflow_agent_to_link must be set. + Structure is documented below. + chat_engine_config.dialogflow_agent_to_link: |- + - + (Optional) + The resource name of an existing Dialogflow agent to link to this Chat Engine. Format: projects//locations//agents/. + Exactly one of agent_creation_config or dialogflow_agent_to_link must be set. chat_engine_metadata: |- - Additional information of the Chat Engine. @@ -82166,6 +91114,21 @@ resources: ] } argumentDocs: + chunking_config.layout_based_chunking_config: |- + - + (Optional) + Configuration for the layout based chunking. + Structure is documented below. + chunking_config.layout_based_chunking_config.chunk_size: |- + - + (Optional) + The token size limit for each chunk. + Supported values: 100-500 (inclusive). Default value: 500. + chunking_config.layout_based_chunking_config.include_ancestor_headings: |- + - + (Optional) + Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + Default value: False. content_config: |- - (Required) @@ -82189,6 +91152,10 @@ resources: - (Optional) Configurations applied to digital parser. + default_parsing_config.layout_parsing_config: |- + - + (Optional) + Configurations applied to layout parser. default_parsing_config.ocr_parsing_config: |- - (Optional) @@ -82208,6 +91175,11 @@ resources: (Optional) Configuration for Document understanding and enrichment. Structure is documented below. + document_processing_config.chunking_config: |- + - + (Optional) + Whether chunking mode is enabled. + Structure is documented below. document_processing_config.default_parsing_config: |- - (Optional) @@ -82233,7 +91205,7 @@ resources: - (Required) The industry vertical that the data store registers. - Possible values are: GENERIC, MEDIA. + Possible values are: GENERIC, MEDIA, HEALTHCARE_FHIR. location: |- - (Required) @@ -82254,6 +91226,10 @@ resources: (Optional) Configurations applied to digital parser. parsing_config_overrides.file_type: '- (Required) The identifier for this object. Format specified above.' + parsing_config_overrides.layout_parsing_config: |- + - + (Optional) + Configurations applied to layout parser. parsing_config_overrides.ocr_parsing_config: |- - (Optional) @@ -82277,7 +91253,7 @@ resources: - (Optional) The solutions that the data store enrolls. - Each value may be one of: SOLUTION_TYPE_RECOMMENDATION, SOLUTION_TYPE_SEARCH, SOLUTION_TYPE_CHAT. + Each value may be one of: SOLUTION_TYPE_RECOMMENDATION, SOLUTION_TYPE_SEARCH, SOLUTION_TYPE_CHAT, SOLUTION_TYPE_GENERATIVE_CHAT. update: '- Default is 20 minutes.' importStatements: [] google_discovery_engine_schema: @@ -82430,7 +91406,7 @@ resources: (Optional) The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to GENERIC. Vertical on Engine has to match vertical of the DataStore liniked to the engine. Default value is GENERIC. - Possible values are: GENERIC, MEDIA. + Possible values are: GENERIC, MEDIA, HEALTHCARE_FHIR. search_engine_config.project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -82450,6 +91426,148 @@ resources: - Timestamp the Engine was last updated. importStatements: [] + google_discovery_engine_target_site: + subCategory: Discovery Engine + description: TargetSite represents a URI pattern that the users want to confine their search. + name: google_discovery_engine_target_site + title: "" + examples: + - name: basic + manifest: |- + { + "data_store_id": "${google_discovery_engine_data_store.basic.data_store_id}", + "exact_match": false, + "location": "${google_discovery_engine_data_store.basic.location}", + "provided_uri_pattern": "http://cloud.google.com/docs/*", + "type": "INCLUDE" + } + references: + data_store_id: google_discovery_engine_data_store.basic.data_store_id + location: google_discovery_engine_data_store.basic.location + dependencies: + google_discovery_engine_data_store.basic: |- + { + "content_config": "PUBLIC_WEBSITE", + "create_advanced_site_search": false, + "data_store_id": "data-store-id", + "display_name": "tf-test-basic-site-search-datastore", + "industry_vertical": "GENERIC", + "location": "global", + "skip_default_schema_creation": false, + "solution_types": [ + "SOLUTION_TYPE_SEARCH" + ] + } + - name: advanced + manifest: |- + { + "data_store_id": "${google_discovery_engine_data_store.advanced.data_store_id}", + "exact_match": false, + "location": "${google_discovery_engine_data_store.advanced.location}", + "provided_uri_pattern": "http://cloud.google.com/docs/*", + "type": "INCLUDE" + } + references: + data_store_id: google_discovery_engine_data_store.advanced.data_store_id + location: google_discovery_engine_data_store.advanced.location + dependencies: + google_discovery_engine_data_store.advanced: |- + { + "content_config": "PUBLIC_WEBSITE", + "create_advanced_site_search": true, + "data_store_id": "data-store-id", + "display_name": "tf-test-advanced-site-search-datastore", + "industry_vertical": "GENERIC", + "location": "global", + "skip_default_schema_creation": false, + "solution_types": [ + "SOLUTION_TYPE_SEARCH" + ] + } + argumentDocs: + create: '- Default is 60 minutes.' + data_store_id: |- + - + (Required) + The unique id of the data store. + delete: '- Default is 60 minutes.' + exact_match: |- + - + (Optional) + If set to false, a uri_pattern is generated to include all pages whose + address contains the provided_uri_pattern. If set to true, an uri_pattern + is generated to try to be an exact match of the provided_uri_pattern or + just the specific page if the provided_uri_pattern is a specific one. + provided_uri_pattern is always normalized to generate the URI pattern to + be used by the search engine. + failure_reason: |- + - + Site search indexing failure reasons. + Structure is documented below. + failure_reason.quota_failure: |- + - + (Optional) + Site verification state indicating the ownership and validity. + Structure is documented below. + failure_reason.quota_failure.total_required_quota: |- + - + (Optional) + This number is an estimation on how much total quota this project + needs to successfully complete indexing. + generated_uri_pattern: |- + - + This is system-generated based on the provided_uri_pattern. + id: '- an identifier for the resource with format {{name}}' + indexing_status: |- + - + The indexing status. + location: |- + - + (Required) + The geographic location where the data store should reside. The value can + only be one of "global", "us" and "eu". + name: |- + - + The unique full resource name of the target site. Values are of the format + projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/siteSearchEngine/targetSites/{target_site_id}. + This field must be a UTF-8 encoded string with a length limit of 1024 + characters. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + provided_uri_pattern: |- + - + (Required) + The user provided URI pattern from which the generated_uri_pattern is + generated. + root_domain_uri: |- + - + Root domain of the provided_uri_pattern. + site_verification_info: |- + - + Site ownership and validity verification status. + Structure is documented below. + site_verification_info.site_verification_state: |- + - + (Optional) + Site verification state indicating the ownership and validity. + Possible values are: VERIFIED, UNVERIFIED, EXEMPTED. + site_verification_info.verify_time: |- + - + (Optional) + Latest site verification time. + target_site_id: |- + - + The unique id of the target site. + type: |- + - + (Optional) + The possible target site types. + Possible values are: INCLUDE, EXCLUDE. + update_time: |- + - + The target site's last updated time. + importStatements: [] google_dns_managed_zone: subCategory: Cloud DNS description: A zone is a subtree of the DNS namespace under one administrative responsibility. @@ -82608,7 +91726,7 @@ resources: "disabled": true } ], - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "ip_allocation_policy": [ { @@ -83346,23 +92464,84 @@ resources: "dns_name": "prod.mydomain.com.", "name": "prod-zone" } + - name: a + manifest: |- + { + "managed_zone": "${google_dns_managed_zone.prod.name}", + "name": "backend.${google_dns_managed_zone.prod.dns_name}", + "routing_policy": [ + { + "health_check": "${google_compute_health_check.http-health-check.id}", + "primary_backup": [ + { + "backup_geo": [ + { + "health_checked_targets": [ + { + "external_endpoints": [ + "10.130.1.1" + ] + } + ], + "location": "us-west1" + } + ], + "primary": [ + { + "external_endpoints": [ + "10.128.1.1" + ] + } + ], + "trickle_ratio": 0.1 + } + ] + } + ], + "ttl": 300, + "type": "A" + } + references: + managed_zone: google_dns_managed_zone.prod.name + routing_policy.health_check: google_compute_health_check.http-health-check.id + dependencies: + google_compute_health_check.http-health-check: |- + { + "check_interval_sec": 30, + "description": "Health check via http", + "healthy_threshold": 4, + "http_health_check": [ + { + "port_specification": "USE_SERVING_PORT" + } + ], + "name": "http-health-check", + "timeout_sec": 5, + "unhealthy_threshold": 5 + } + google_dns_managed_zone.prod: |- + { + "dns_name": "prod.mydomain.com.", + "name": "prod-zone" + } argumentDocs: geo.health_checked_targets: |- - (Optional) For A and AAAA types only. The list of targets to be health checked. These can be specified along with rrdatas within this item. Structure is documented below. geo.location: '- (Required) The location name defined in Google Cloud.' geo.rrdatas: '- (Optional) Same as rrdatas above.' + health_checked_targets.external_endpoints: '- (Optional) The list of external endpoint addresses to health check.' health_checked_targets.internal_load_balancers: |- - - (Required) The list of internal load balancers to health check. - Structure is documented below. - health_checked_targets.internal_load_balancers.ip_address: '- (Required) The frontend IP address of the load balancer.' - health_checked_targets.internal_load_balancers.ip_protocol: '- (Required) The configured IP protocol of the load balancer. This value is case-sensitive. Possible values: ["tcp", "udp"]' - health_checked_targets.internal_load_balancers.load_balancer_type: '- (Optional) The type of load balancer. This value is case-sensitive. Possible values: ["regionalL4ilb", "regionalL7ilb", "globalL7ilb"]' - health_checked_targets.internal_load_balancers.network_url: '- (Required) The fully qualified url of the network in which the load balancer belongs. This should be formatted like projects/{project}/global/networks/{network} or https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}.' - health_checked_targets.internal_load_balancers.port: '- (Required) The configured port of the load balancer.' - health_checked_targets.internal_load_balancers.project: '- (Required) The ID of the project in which the load balancer belongs.' - health_checked_targets.internal_load_balancers.region: '- (Optional) The region of the load balancer. Only needed for regional load balancers.' + - (Optional) The list of internal load balancers to health check. + Structure is documented below. id: '- an identifier for the resource with format projects/{{project}}/managedZones/{{zone}}/rrsets/{{name}}/{{type}}' + internal_load_balancers.ip_address: '- (Required) The frontend IP address of the load balancer.' + internal_load_balancers.ip_protocol: '- (Required) The configured IP protocol of the load balancer. This value is case-sensitive. Possible values: ["tcp", "udp"]' + internal_load_balancers.load_balancer_type: '- (Optional) The type of load balancer. This value is case-sensitive. Possible values: ["regionalL4ilb", "regionalL7ilb", "globalL7ilb"]' + internal_load_balancers.network_url: '- (Required) The fully qualified url of the network in which the load balancer belongs. This should be formatted like projects/{project}/global/networks/{network} or https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}.' + internal_load_balancers.port: '- (Required) The configured port of the load balancer.' + internal_load_balancers.project: '- (Required) The ID of the project in which the load balancer belongs.' + internal_load_balancers.region: '- (Optional) The region of the load balancer. Only needed for regional load balancers.' managed_zone: |- - (Required) The name of the zone in which this record set will reside. @@ -83386,6 +92565,7 @@ resources: routing_policy.geo: |- - (Optional) The configuration for Geolocation based routing policy. Structure is documented below. + routing_policy.health_check: '- (Optional) Specifies the health check (used with external endpoints).' routing_policy.primary_backup: |- - (Optional) The configuration for a failover policy with global to regional failover. Queries are responded to with the global primary targets, but if none of the primary targets are healthy, then we fallback to a regional failover policy. Structure is documented below. @@ -83466,7 +92646,7 @@ resources: "disabled": true } ], - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "ip_allocation_policy": [ { @@ -84832,7 +94012,7 @@ resources: delete: '- Default is 30 minutes.' details: |- - - A nested object resource + A nested object resource. Structure is documented below. details.cloud_router: |- - @@ -84938,6 +94118,7 @@ resources: - (Optional) A free-text description of the resource. Max length 1024 characters. + effective_labels: for all of the labels present on the resource. id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/zones/{{zone}}/networks/{{network_id}}' labels: |- - @@ -84962,6 +94143,11 @@ resources: project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' update_time: |- - The time when the subnet was last updated. @@ -85043,6 +94229,7 @@ resources: - (Optional) A free-text description of the resource. Max length 1024 characters. + effective_labels: for all of the labels present on the resource. id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/zones/{{zone}}/subnets/{{subnet_id}}' ipv4_cidr: |- - @@ -85079,6 +94266,11 @@ resources: - (Required) A unique ID that identifies this subnet. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' update_time: |- - The time when the subnet was last updated. @@ -85692,6 +94884,13 @@ resources: storage_bytes: |- - The size of the storage used by the backup. As backups share storage, this number is expected to change with backup creation/deletion. + tags: |- + - + (Optional) + A map of resource manager tags. + Resource manager tag keys and values have the same definition as resource manager tags. + Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/{tag_value_id}. + The field is ignored (both PUT & PATCH) when empty. terraform_labels: |- - The combination of labels configured directly on the resource @@ -85785,11 +94984,8 @@ resources: } ], "protocol": "NFS_V4_1", - "provider": "${google-beta}", "tier": "ENTERPRISE" } - references: - provider: google-beta - name: instance manifest: |- { @@ -85864,7 +95060,20 @@ resources: The resource name of the backup, in the format projects/{projectId}/locations/{locationId}/backups/{backupId}, that this file share has been restored from. + fixed_iops.max_iops: |- + - + (Optional) + The number of IOPS to provision for the instance. + max_iops must be in multiple of 1000. id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/instances/{{name}}' + iops_per_tb.max_iops_per_tb: |- + - + (Optional) + The instance max IOPS will be calculated by multiplying + the capacity of the instance (TB) by max_iops_per_tb, + and rounding to the nearest 1000. The instance max IOPS + will be changed dynamically based on the instance + capacity. name: |- - (Required) @@ -85883,6 +95092,14 @@ resources: DIRECT_PEERING. Default value is DIRECT_PEERING. Possible values are: DIRECT_PEERING, PRIVATE_SERVICE_ACCESS. + networks.deletion_protection_enabled: |- + - + (Optional) + Indicates whether the instance is protected against deletion. + networks.deletion_protection_reason: |- + - + (Optional) + The reason for enabling deletion protection. networks.description: |- - (Optional) @@ -85915,12 +95132,18 @@ resources: (Required) The name of the GCE VPC network to which the instance is connected. + networks.performance_config: |- + - + (Optional) + Performance configuration for the instance. If not provided, + the default performance settings will be used. + Structure is documented below. networks.project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. networks.protocol: |- - - (Optional, Beta) + (Optional) Either NFSv3, for using NFS version 3 as file sharing protocol, or NFSv4.1, for using NFS version 4.1 as file sharing protocol. NFSv4.1 can be used with HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE. @@ -85968,6 +95191,19 @@ resources: for not allowing root access. The default is NO_ROOT_SQUASH. Default value is NO_ROOT_SQUASH. Possible values are: NO_ROOT_SQUASH, ROOT_SQUASH. + performance_config.fixed_iops: |- + - + (Optional) + The instance will have a fixed provisioned IOPS value, + which will remain constant regardless of instance + capacity. + Structure is documented below. + performance_config.iops_per_tb: |- + - + (Optional) + The instance provisioned IOPS will change dynamically + based on the capacity of the instance. + Structure is documented below. terraform_labels: |- - The combination of labels configured directly on the resource @@ -86966,7 +96202,7 @@ resources: manifest: |- { "depends_on": [ - "${google_project_service.firebase_database}" + "${time_sleep.wait_60_seconds}" ], "instance_id": "rtdb-project-default-rtdb", "project": "${google_firebase_project.default.project}", @@ -86980,11 +96216,15 @@ resources: dependencies: google_firebase_project.default: |- { + "depends_on": [ + "${google_project_service.firebase}" + ], "project": "${google_project.default.project_id}", "provider": "${google-beta}" } google_project.default: |- { + "deletion_policy": "DELETE", "labels": { "firebase": "enabled" }, @@ -86993,12 +96233,27 @@ resources: "project_id": "rtdb-project", "provider": "${google-beta}" } + google_project_service.firebase: |- + { + "disable_on_destroy": false, + "project": "${google_project.default.project_id}", + "provider": "${google-beta}", + "service": "firebase.googleapis.com" + } google_project_service.firebase_database: |- { + "disable_on_destroy": false, "project": "${google_firebase_project.default.project}", "provider": "${google-beta}", "service": "firebasedatabase.googleapis.com" } + time_sleep.wait_60_seconds: |- + { + "create_duration": "60s", + "depends_on": [ + "${google_project_service.firebase_database}" + ] + } argumentDocs: create: '- Default is 20 minutes.' database_url: |- @@ -87006,7 +96261,7 @@ resources: The database URL in the form of https://{instance-id}.firebaseio.com for us-central1 instances or https://{instance-id}.{region}.firebasedatabase.app in other regions. delete: '- Default is 20 minutes.' - desired_state: '- (Optional) The intended database state.' + desired_state: '- (Optional) The intended database state. Possible values: ACTIVE, DISABLED.' id: '- an identifier for the resource with format projects/{{project}}/locations/{{region}}/instances/{{instance_id}}' instance_id: |- - @@ -87394,6 +96649,7 @@ resources: dependencies: google_cloud_run_v2_service.default: |- { + "deletion_protection": true, "ingress": "INGRESS_TRAFFIC_ALL", "location": "us-central1", "name": "cloud-run-service-via-hosting", @@ -87633,7 +96889,7 @@ resources: If it is not provided, the provider project is used. reconciling: |- - - if true, indicates that Hosting's systems are attmepting to + if true, indicates that Hosting's systems are attempting to make the CustomDomain's state match your preferred state. This is most frequently true when initially provisioning a CustomDomain or when creating a new SSL certificate to match an updated cert_preference @@ -87953,6 +97209,76 @@ resources: "provider": "${google-beta}", "site_id": "site-id" } + - name: default + manifest: |- + { + "config": [ + { + "headers": [ + { + "glob": "/headers/**", + "headers": { + "my-header": "my-value" + } + } + ] + } + ], + "provider": "${google-beta}", + "site_id": "${google_firebase_hosting_site.default.site_id}" + } + references: + provider: google-beta + site_id: google_firebase_hosting_site.default.site_id + dependencies: + google_firebase_hosting_release.default: |- + { + "message": "With custom headers", + "provider": "${google-beta}", + "site_id": "${google_firebase_hosting_site.default.site_id}", + "version_name": "${google_firebase_hosting_version.default.name}" + } + google_firebase_hosting_site.default: |- + { + "project": "my-project-name", + "provider": "${google-beta}", + "site_id": "site-id" + } + - name: default + manifest: |- + { + "config": [ + { + "headers": [ + { + "headers": { + "my-header": "my-value" + }, + "regex": "^~/headers$" + } + ] + } + ], + "provider": "${google-beta}", + "site_id": "${google_firebase_hosting_site.default.site_id}" + } + references: + provider: google-beta + site_id: google_firebase_hosting_site.default.site_id + dependencies: + google_firebase_hosting_release.default: |- + { + "message": "With custom headers", + "provider": "${google-beta}", + "site_id": "${google_firebase_hosting_site.default.site_id}", + "version_name": "${google_firebase_hosting_version.default.name}" + } + google_firebase_hosting_site.default: |- + { + "project": "my-project-name", + "provider": "${google-beta}", + "site_id": "site-id" + } - name: default manifest: |- { @@ -88015,6 +97341,7 @@ resources: dependencies: google_cloud_run_v2_service.default: |- { + "deletion_protection": true, "ingress": "INGRESS_TRAFFIC_ALL", "location": "us-central1", "name": "cloud-run-service-via-hosting", @@ -88111,6 +97438,12 @@ resources: (Optional) The configuration for the behavior of the site. This configuration exists in the firebase.json file. Structure is documented below. + config.headers: |- + - + (Optional) + An array of objects, where each object specifies a URL pattern that, if matched to the request URL path, + triggers Hosting to apply the specified custom response headers. + Structure is documented below. config.redirects: |- - (Optional) @@ -88125,6 +97458,18 @@ resources: Structure is documented below. create: '- Default is 20 minutes.' delete: '- Default is 20 minutes.' + headers.glob: |- + - + (Optional) + The user-supplied glob to match against the request URL path. + headers.headers: |- + - + (Required) + The additional headers to add to the response. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + headers.regex: |- + - + (Optional) + The user-supplied RE2 regular expression to match against the request URL path. id: '- an identifier for the resource with format sites/{{site_id}}/versions/{{version_id}}' name: |- - @@ -88203,6 +97548,7 @@ resources: dependencies: google_project.default: |- { + "deletion_policy": "DELETE", "labels": { "firebase": "enabled" }, @@ -88223,7 +97569,7 @@ resources: If it is not provided, the provider project is used. project_number: |- - - The number of the google project that firebase is enabled on. + The number of the Google Project that Firebase is enabled on. importStatements: [] google_firebase_storage_bucket: subCategory: Cloud Storage for Firebase @@ -88366,19 +97712,32 @@ resources: - name: primary manifest: |- { - "lifecycle": [ + "name": "cloud.firestore", + "project": "my-project-name", + "ruleset_name": "projects/my-project-name/rulesets/${google_firebaserules_ruleset.firestore.name}" + } + dependencies: + google_firebaserules_ruleset.firestore: |- { - "replace_triggered_by": [ - "${google_firebaserules_ruleset.firestore}" + "project": "my-project-name", + "source": [ + { + "files": [ + { + "content": "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }", + "name": "firestore.rules" + } + ] + } ] } - ], + - name: primary + manifest: |- + { "name": "cloud.firestore/database", "project": "my-project-name", "ruleset_name": "projects/my-project-name/rulesets/${google_firebaserules_ruleset.firestore.name}" } - references: - lifecycle.replace_triggered_by: google_firebaserules_ruleset.firestore dependencies: google_firebaserules_ruleset.firestore: |- { @@ -88805,7 +98164,7 @@ resources: Cloud KMS multi-region europe. See https://cloud.google.com/kms/docs/locations. This value should be the KMS key resource ID in the format of projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}. - How to retrive this resource ID is listed at + How to retrieve this resource ID is listed at https://cloud.google.com/kms/docs/getting-resource-ids#getting_the_id_for_a_key_and_version. concurrency_mode: |- - @@ -88927,6 +98286,7 @@ resources: } google_project.project: |- { + "deletion_policy": "DELETE", "name": "project-id", "org_id": "123456789", "project_id": "project-id" @@ -88971,6 +98331,7 @@ resources: } google_project.project: |- { + "deletion_policy": "DELETE", "name": "project-id", "org_id": "123456789", "project_id": "project-id" @@ -89015,6 +98376,7 @@ resources: } google_project.project: |- { + "deletion_policy": "DELETE", "name": "project-id", "org_id": "123456789", "project_id": "project-id" @@ -89059,6 +98421,7 @@ resources: } google_project.project: |- { + "deletion_policy": "DELETE", "name": "project-id", "org_id": "123456789", "project_id": "project-id" @@ -89389,6 +98752,31 @@ resources: "project": "my-project-name", "type": "FIRESTORE_NATIVE" } + - name: my-index + manifest: |- + { + "collection": "atestcollection", + "database": "${google_firestore_database.database.name}", + "fields": [ + { + "field_path": "__name__", + "order": "DESCENDING" + } + ], + "project": "my-project-name" + } + references: + database: google_firestore_database.database.name + dependencies: + google_firestore_database.database: |- + { + "delete_protection_state": "DELETE_PROTECTION_DISABLED", + "deletion_policy": "DELETE", + "location_id": "nam5", + "name": "database-id", + "project": "my-project-name", + "type": "FIRESTORE_NATIVE" + } argumentDocs: collection: |- - @@ -89482,6 +98870,15 @@ resources: } references: parent: google_folder.department1.name + - name: department1 + manifest: |- + { + "display_name": "Department 1", + "parent": "organizations/1234567", + "tags": { + "1234567/env": "staging" + } + } argumentDocs: create_time: |- - Timestamp when the Folder was created. Assigned by the server. @@ -89495,6 +98892,7 @@ resources: parent: |- - (Required) The resource name of the parent Folder or Organization. Must be of the form folders/{folder_id} or organizations/{org_id}. + tags: '- (Optional) A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource.' importStatements: [] google_folder_access_approval_settings: subCategory: Access Approval @@ -89521,6 +98919,7 @@ resources: dependencies: google_folder.my_folder: |- { + "deletion_protection": false, "display_name": "my-folder", "parent": "organizations/123456789" } @@ -89544,6 +98943,7 @@ resources: dependencies: google_folder.my_folder: |- { + "deletion_protection": false, "display_name": "my-folder", "parent": "organizations/123456789" } @@ -89572,6 +98972,7 @@ resources: } google_project.my_project: |- { + "deletion_policy": "DELETE", "folder_id": "${google_folder.my_folder.name}", "name": "My Project", "project_id": "your-project-id" @@ -89762,6 +99163,204 @@ resources: update_time: '- (Computed) The timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds, representing when the variable was last updated. Example: "2016-10-09T12:33:37.578138407Z".' version: '- (Optional) Version of the Policy. Default version is 0.' importStatements: [] + google_gemini_code_repository_index: + subCategory: Gemini for Google Cloud + description: The resource for managing Code Repository Index for Gemini Code Assist. + name: google_gemini_code_repository_index + title: "" + examples: + - name: example + manifest: |- + { + "code_repository_index_id": "", + "kms_key": "projects/projectExample/locations/locationExample/keyRings/keyRingExample/cryptoKeys/cryptoKeyExample", + "location": "us-central1", + "provider": "${google-beta}" + } + references: + provider: google-beta + argumentDocs: + code_repository_index_id: |- + - + (Required) + Required. Id of the Code Repository Index. + create: '- Default is 90 minutes.' + create_time: |- + - + Output only. Create time stamp. + delete: '- Default is 90 minutes.' + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/codeRepositoryIndexes/{{code_repository_index_id}}' + kms_key: |- + - + (Optional) + Optional. Immutable. Customer-managed encryption key name, in the format + projects//locations//keyRings//cryptoKeys/. + labels: |- + - + (Optional) + Optional. Labels as key value pairs. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + The location of the Code Repository Index, for example us-central1. + name: |- + - + Immutable. Identifier. Name of Code Repository Index. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + state: |- + - + Output only. Code Repository Index instance State. + Possible values: + STATE_UNSPECIFIED + CREATING + ACTIVE + DELETING + SUSPENDED + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 90 minutes.' + update_time: |- + - + Output only. Update time stamp. + importStatements: [] + google_gemini_repository_group: + subCategory: Gemini for Google Cloud + description: The resource for managing Repository Group for Gemini Code Assist. + name: google_gemini_repository_group + title: "" + examples: + - name: example + manifest: |- + { + "code_repository_index": "", + "labels": { + "label1": "value1" + }, + "location": "us-central1", + "provider": "${google-beta}", + "repositories": [ + { + "branch_pattern": "main", + "resource": "projects/example-project/locations/us-central1/connections/example-connection/gitRepositoryLinks/example-repo" + } + ], + "repository_group_id": "gen-repository-group-" + } + references: + provider: google-beta + argumentDocs: + code_repository_index: |- + - + (Required) + Required. Id of the Code Repository Index. + create: '- Default is 30 minutes.' + create_time: |- + - + Output only. Create time stamp + delete: '- Default is 30 minutes.' + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/codeRepositoryIndexes/{{code_repository_index}}/repositoryGroups/{{repository_group_id}}' + location: |- + - + (Required) + The location of the Code Repository Index, for example us-central1. + name: |- + - + Immutable. Identifier. name of resource + repositories: |- + - + (Required) + Required. List of repositories to group + Structure is documented below. + repositories.branch_pattern: |- + - + (Required) + Required. The Git branch pattern used for indexing in RE2 syntax. + See https://github.com/google/re2/wiki/syntax for syntax. + repositories.labels: |- + - + (Optional) + Optional. Labels as key value pairs + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + repositories.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + repositories.resource: |- + - + (Required) + Required. The DeveloperConnect repository full resource name, relative resource name + or resource URL to be indexed. + repository_group_id: |- + - + (Required) + Required. Id of the Repository Group. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 30 minutes.' + update_time: |- + - + Output only. Update time stamp + importStatements: [] + google_gemini_repository_group_iam_policy: + subCategory: Gemini for Google Cloud + description: Collection of resources to manage IAM policy for Gemini for Google Cloud RepositoryGroup + name: google_gemini_repository_group_iam_policy + title: "" + examples: + - name: policy + manifest: |- + { + "code_repository_index": "${google_gemini_repository_group.example.code_repository_index}", + "location": "${google_gemini_repository_group.example.location}", + "policy_data": "${data.google_iam_policy.admin.policy_data}", + "project": "${google_gemini_repository_group.example.project}", + "provider": "${google-beta}", + "repository_group_id": "${google_gemini_repository_group.example.repository_group_id}" + } + references: + code_repository_index: google_gemini_repository_group.example.code_repository_index + location: google_gemini_repository_group.example.location + policy_data: data.google_iam_policy.admin.policy_data + project: google_gemini_repository_group.example.project + provider: google-beta + repository_group_id: google_gemini_repository_group.example.repository_group_id + argumentDocs: + etag: '- (Computed) The etag of the IAM policy.' + google_gemini_repository_group_iam_binding: ': Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the repositorygroup are preserved.' + google_gemini_repository_group_iam_member: ': Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the repositorygroup are preserved.' + google_gemini_repository_group_iam_policy: ': Authoritative. Sets the IAM policy for the repositorygroup and replaces any existing policy already attached.' + location: |- + - (Optional) The location of the Code Repository Index, for example us-central1. Used to find the parent resource to bind the IAM policy to. If not specified, + the value will be parsed from the identifier of the parent resource. If no location is provided in the parent identifier and no + location is specified, it is taken from the provider configuration. + member/members: |- + - (Required) Identities that will be granted the privilege in role. + Each entry can have one of the following values: + policy_data: |- + - (Required only by google_gemini_repository_group_iam_policy) The policy data generated by + a google_iam_policy data source. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + role: |- + - (Required) The role that should be applied. Only one + google_gemini_repository_group_iam_binding can be used per role. Note that custom roles must be of the format + [projects|organizations]/{parent-name}/roles/{role-name}. + importStatements: [] google_gke_backup_backup_plan: subCategory: Backup for GKE description: Represents a Backup Plan instance. @@ -89796,7 +99395,7 @@ resources: ] } ], - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "basic-cluster", @@ -89836,7 +99435,7 @@ resources: ] } ], - "deletion_protection": "true", + "deletion_protection": true, "enable_autopilot": true, "ip_allocation_policy": [ {} @@ -89892,7 +99491,7 @@ resources: ] } ], - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "cmek-cluster", @@ -89966,7 +99565,7 @@ resources: ] } ], - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "full-cluster", @@ -90031,7 +99630,7 @@ resources: ] } ], - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "permissive-cluster", @@ -90116,7 +99715,7 @@ resources: ] } ], - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "rpo-daily-cluster", @@ -90222,7 +99821,7 @@ resources: ] } ], - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "rpo-weekly-cluster", @@ -90583,7 +100182,7 @@ resources: ] } ], - "deletion_protection": "", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "restore-all-ns-cluster", @@ -90659,7 +100258,7 @@ resources: ] } ], - "deletion_protection": "", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "rollback-ns-cluster", @@ -90728,7 +100327,7 @@ resources: ] } ], - "deletion_protection": "", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "rollback-app-cluster", @@ -90788,7 +100387,7 @@ resources: ] } ], - "deletion_protection": "", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "all-groupkinds-cluster", @@ -90893,7 +100492,7 @@ resources: ] } ], - "deletion_protection": "", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "rename-ns-cluster", @@ -90992,7 +100591,7 @@ resources: ] } ], - "deletion_protection": "", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "transform-rule-cluster", @@ -91053,7 +100652,7 @@ resources: ] } ], - "deletion_protection": "", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "gitops-mode-cluster", @@ -91148,7 +100747,7 @@ resources: ] } ], - "deletion_protection": "", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "restore-order-cluster", @@ -91215,7 +100814,7 @@ resources: ] } ], - "deletion_protection": "", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1", "name": "volume-res-cluster", @@ -91659,7 +101258,6 @@ resources: } google_gke_hub_membership.membership: |- { - "description": "Membership", "endpoint": [ { "gke_cluster": [ @@ -92035,7 +101633,7 @@ resources: configmanagement.version: |- - (Optional) - Version of ACM installed + Version of Config Sync installed container_resources.limits: |- - (Optional) @@ -92201,7 +101799,7 @@ resources: - (Optional) The full, unique name of this Feature resource - oci.configmanagement.config_sync.oci.version: field is deprecated and will be removed in a future major release. Please use configmanagement.version field to specify the version of ACM installed instead. + oci.configmanagement.config_sync.oci.version: field is deprecated and will be removed in a future major release. Please use configmanagement.version field to specify the version of Config Sync installed instead. oci.gcp_service_account_email: |- - (Optional) @@ -92225,7 +101823,7 @@ resources: oci.version: |- - (Optional, Deprecated) - Version of ACM installed + Version of Config Sync installed pod_toleration.effect: |- - (Optional) @@ -92377,7 +101975,7 @@ resources: - (Optional) Configures the manner in which the template library is installed on the cluster. - Possible values are: INSTALATION_UNSPECIFIED, NOT_INSTALLED, ALL. + Possible values are: INSTALLATION_UNSPECIFIED, NOT_INSTALLED, ALL. terraform_labels: |- - The combination of labels configured directly on the resource @@ -92451,14 +102049,10 @@ resources: { "config_sync": [ { - "git": [ - { - "sync_repo": "https://github.com/hashicorp/terraform" - } - ] + "enabled": true } ], - "version": "1.6.2" + "management": "MANAGEMENT_AUTOMATIC" } ], "feature": "${google_gke_hub_feature.feature.name}", @@ -92503,18 +102097,15 @@ resources: { "config_sync": [ { - "oci": [ + "enabled": true, + "git": [ { - "gcp_service_account_email": "sa@project-id.iam.gserviceaccount.com", - "policy_dir": "config-connector", - "secret_type": "gcpserviceaccount", - "sync_repo": "us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest", - "sync_wait_secs": "20" + "sync_repo": "https://github.com/hashicorp/terraform" } ] } ], - "version": "1.15.1" + "version": "1.19.0" } ], "feature": "${google_gke_hub_feature.feature.name}", @@ -92555,14 +102146,28 @@ resources: - name: feature_member manifest: |- { - "feature": "${google_gke_hub_feature.feature.name}", - "location": "global", - "membership": "${google_gke_hub_membership.membership.membership_id}", - "mesh": [ + "configmanagement": [ { - "management": "MANAGEMENT_AUTOMATIC" + "config_sync": [ + { + "enabled": true, + "oci": [ + { + "gcp_service_account_email": "sa@project-id.iam.gserviceaccount.com", + "policy_dir": "config-connector", + "secret_type": "gcpserviceaccount", + "sync_repo": "us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest", + "sync_wait_secs": "20" + } + ] + } + ], + "version": "1.19.0" } - ] + ], + "feature": "${google_gke_hub_feature.feature.name}", + "location": "global", + "membership": "${google_gke_hub_membership.membership.membership_id}" } references: feature: google_gke_hub_feature.feature.name @@ -92576,8 +102181,11 @@ resources: } google_gke_hub_feature.feature: |- { + "labels": { + "foo": "bar" + }, "location": "global", - "name": "servicemesh" + "name": "configmanagement" } google_gke_hub_membership.membership: |- { @@ -92599,6 +102207,7 @@ resources: { "config_sync": [ { + "enabled": true, "git": [ { "sync_repo": "https://github.com/hashicorp/terraform" @@ -92606,7 +102215,7 @@ resources: ] } ], - "version": "1.6.2" + "version": "1.19.0" } ], "feature": "${google_gke_hub_feature.feature.name}", @@ -92647,6 +102256,46 @@ resources: "location": "us-central1", "membership_id": "my-membership" } + - name: feature_member + manifest: |- + { + "feature": "${google_gke_hub_feature.feature.name}", + "location": "global", + "membership": "${google_gke_hub_membership.membership.membership_id}", + "mesh": [ + { + "management": "MANAGEMENT_AUTOMATIC" + } + ] + } + references: + feature: google_gke_hub_feature.feature.name + membership: google_gke_hub_membership.membership.membership_id + dependencies: + google_container_cluster.cluster: |- + { + "initial_node_count": 1, + "location": "us-central1-a", + "name": "my-cluster" + } + google_gke_hub_feature.feature: |- + { + "location": "global", + "name": "servicemesh" + } + google_gke_hub_membership.membership: |- + { + "endpoint": [ + { + "gke_cluster": [ + { + "resource_link": "//container.googleapis.com/${google_container_cluster.cluster.id}" + } + ] + } + ], + "membership_id": "my-membership" + } - name: feature_member manifest: |- { @@ -92763,28 +102412,39 @@ resources: - (Optional) The set of namespaces to be exempted from the bundle. + config_sync.enabled: |- + - + (Optional) + Whether Config Sync is enabled in the cluster. This field was introduced in Terraform version + 5.41.0, and + needs to be set to true explicitly to install Config Sync. config_sync.git: |- - (Optional) Structure is documented below. config_sync.oci: |- - - (Optional) Supported from ACM versions 1.12.0 onwards. Structure is documented below. + (Optional) Supported from Config Sync versions 1.12.0 onwards. Structure is documented below. config_sync.prevent_drift: |- - (Optional) - Supported from ACM versions 1.10.0 onwards. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to "false", disables the Config Sync admission webhook and does not prevent drifts. + Supported from Config Sync versions 1.10.0 onwards. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. config_sync.source_format: |- - (Optional) Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + config_sync.stop_syncing: |- + - + (Optional) + Set to true to stop syncing configurations for a single cluster. This field is only available on clusters using Config Sync auto-upgrades or on Config Sync version 1.20.0 or later. Defaults: false. configmanagement: |- - (Optional) Config Management-specific spec. Structure is documented below. configmanagement.binauthz: |- - - (Optional) + (Optional, Deprecated) Binauthz configuration for the cluster. Structure is documented below. + This field will be ignored and should not be set. configmanagement.config_sync: |- - (Optional) @@ -92793,14 +102453,27 @@ resources: - (Optional) Hierarchy Controller configuration for the cluster. Structure is documented below. + Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + Use open source Kubernetes Hierarchical Namespace Controller (HNC) instead. + Follow the instructions + to migrate from Hierarchy Controller to HNC. + configmanagement.management: |- + - + (Optional) + Set this field to MANAGEMENT_AUTOMATIC to enable + Config Sync auto-upgrades, + and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades. + This field was introduced in Terraform version 5.41.0. configmanagement.policy_controller: |- - (Optional) Policy Controller configuration for the cluster. Structure is documented below. + Configuring Policy Controller through the configmanagement feature is no longer recommended. + Use the policycontroller feature instead. configmanagement.version: |- - (Optional) - Version of ACM installed. + Version of Config Sync installed. container_resources.limits: |- - (Optional) @@ -93203,7 +102876,7 @@ resources: dependencies: google_container_cluster.primary: |- { - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1-a", "name": "basic-cluster", @@ -93234,7 +102907,7 @@ resources: dependencies: google_container_cluster.primary: |- { - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1-a", "name": "basic-cluster", @@ -93258,7 +102931,7 @@ resources: - (Required) A JSON Web Token (JWT) issuer URI. issuer must start with https:// and // be a valid - with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster (must be locations rather than zones). If the cluster is provisioned with Terraform, this is "https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}". + with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster. If the cluster is provisioned with Terraform, this is "https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}". create: '- Default is 20 minutes.' delete: '- Default is 20 minutes.' description: |- @@ -93280,7 +102953,7 @@ resources: - (Required) Self-link of the GCP resource for the GKE cluster. - For example: //container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster. + For example: //container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster. It can be at the most 1000 characters in length. If the cluster is provisioned with Terraform, this can be "//container.googleapis.com/${google_container_cluster.my-cluster.id}" or google_container_cluster.my-cluster.id. @@ -93339,7 +103012,7 @@ resources: dependencies: google_container_cluster.primary: |- { - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1-a", "name": "basic-cluster", @@ -93497,7 +103170,7 @@ resources: dependencies: google_container_cluster.primary: |- { - "deletion_protection": "true", + "deletion_protection": true, "initial_node_count": 1, "location": "us-central1-a", "name": "basic-cluster", @@ -94291,7 +103964,7 @@ resources: load_balancer.manual_lb_config: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. load_balancer.port_config: |- - @@ -94372,7 +104045,7 @@ resources: network_config.island_mode_cidr: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. network_config.island_mode_cidr.pod_address_cidr_blocks: |- - @@ -95246,12 +104919,12 @@ resources: load_balancer.manual_lb_config: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. load_balancer.metal_lb_config: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. load_balancer.port_config: |- - @@ -95450,7 +105123,7 @@ resources: network_config.island_mode_cidr: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. network_config.multiple_network_interfaces_config: |- - @@ -96086,59 +105759,43 @@ resources: - The time the cluster was last updated, in RFC3339 text format. importStatements: [] - google_gkeonprem_vmware_cluster: + google_gkeonprem_vmware_admin_cluster: subCategory: Anthos On-Prem - description: A Google VMware User Cluster. - name: google_gkeonprem_vmware_cluster + description: A Google VMware Admin Cluster. + name: google_gkeonprem_vmware_admin_cluster title: "" examples: - - name: cluster-basic + - name: admin-cluster-basic manifest: |- { - "admin_cluster_membership": "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test", - "annotations": {}, + "bootstrap_cluster_membership": "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test", "control_plane_node": [ { "cpus": 4, - "memory": 8192, - "replicas": 1 + "memory": 8192 } ], - "description": "test cluster", + "description": "test admin cluster", + "image_type": "ubuntu_containerd", "load_balancer": [ { - "metal_lb_config": [ + "f5_config": [ { - "address_pools": [ - { - "addresses": [ - "10.251.135.19" - ], - "avoid_buggy_ips": true, - "manual_assign": "true", - "pool": "ingress-ip" - }, - { - "addresses": [ - "10.251.135.19" - ], - "avoid_buggy_ips": true, - "manual_assign": "true", - "pool": "lb-test-ip" - } - ] + "address": "10.251.135.22", + "partition": "test-parition", + "snat_pool": "test-snat-pool" } ], "vip_config": [ { - "control_plane_vip": "10.251.133.5", - "ingress_vip": "10.251.135.19" + "addons_vip": "10.251.135.19", + "control_plane_vip": "10.251.133.5" } ] } ], "location": "us-west1", - "name": "cluster-basic", + "name": "basic", "network_config": [ { "dhcp_ip_config": [ @@ -96154,12 +105811,35 @@ resources: ] } ], - "on_prem_version": "1.13.1-gke.35" + "on_prem_version": "1.31.0-gke.35", + "provider": "${google-beta}", + "vcenter": [ + { + "address": "10.0.0.1", + "ca_cert_data": "test ca cert data", + "cluster": "test cluster", + "data_disk": "test data disk", + "datacenter": "test data center", + "datastore": "test data store", + "folder": "test folder", + "resource_pool": "test resource pool" + } + ] } - - name: cluster-f5lb + references: + provider: google-beta + - name: admin-cluster-full manifest: |- { - "admin_cluster_membership": "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test", + "addon_node": [ + { + "auto_resize_config": [ + { + "enabled": true + } + ] + } + ], "annotations": {}, "anti_affinity_groups": [ { @@ -96168,9 +105848,9 @@ resources: ], "authorization": [ { - "admin_users": [ + "viewer_users": [ { - "username": "testuser@gmail.com" + "username": "user1@gmail.com" } ] } @@ -96180,154 +105860,873 @@ resources: "enabled": true } ], + "bootstrap_cluster_membership": "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test", "control_plane_node": [ { - "auto_resize_config": [ - { - "enabled": true - } - ], "cpus": 4, "memory": 8192, - "replicas": 1 - } - ], - "dataplane_v2": [ - { - "advanced_networking": true, - "dataplane_v2_enabled": true, - "windows_dataplane_v2_enabled": true + "replicas": 3 } ], - "description": "test cluster", - "disable_bundled_ingress": true, - "enable_control_plane_v2": true, + "description": "test admin cluster", + "image_type": "ubuntu_containerd", "load_balancer": [ { - "f5_config": [ + "manual_lb_config": [ { - "address": "10.0.0.1", - "partition": "test-partition", - "snat_pool": "test-snap-pool" + "addons_node_port": 30005, + "control_plane_node_port": 30006, + "ingress_http_node_port": 30007, + "ingress_https_node_port": 30008, + "konnectivity_server_node_port": 30009 } ], "vip_config": [ { - "control_plane_vip": "10.251.133.5", - "ingress_vip": "10.251.135.19" + "addons_vip": "10.251.135.19", + "control_plane_vip": "10.251.133.5" } ] } ], "location": "us-west1", - "name": "cluster-f5lb", + "name": "full", "network_config": [ { - "control_plane_v2_config": [ + "ha_control_plane_config": [ { "control_plane_ip_block": [ { - "gateway": "test-gateway", + "gateway": "10.0.0.3", "ips": [ { - "hostname": "test-hostname", - "ip": "10.0.0.1" + "hostname": "hostname", + "ip": "10.0.0.4" } ], - "netmask": "10.0.0.1/32" + "netmask": "10.0.0.3/32" } ] } ], - "dhcp_ip_config": [ - { - "enabled": true - } - ], - "pod_address_cidr_blocks": [ - "192.168.0.0/16" - ], - "service_address_cidr_blocks": [ - "10.96.0.0/12" - ], - "vcenter_network": "test-vcenter-network" - } - ], - "on_prem_version": "1.13.1-gke.35", - "storage": [ - { - "vsphere_csi_disabled": true - } - ], - "vm_tracking_enabled": true - } - - name: cluster-manuallb - manifest: |- - { - "admin_cluster_membership": "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test", - "annotations": {}, - "anti_affinity_groups": [ - { - "aag_config_disabled": true - } - ], - "authorization": [ - { - "admin_users": [ - { - "username": "testuser@gmail.com" - } - ] - } - ], - "auto_repair_config": [ - { - "enabled": true - } - ], - "control_plane_node": [ - { - "auto_resize_config": [ - { - "enabled": true - } - ], - "cpus": 4, - "memory": 8192, - "replicas": 1 - } - ], - "dataplane_v2": [ - { - "advanced_networking": true, - "dataplane_v2_enabled": true, - "windows_dataplane_v2_enabled": true - } - ], - "description": "test cluster", - "enable_control_plane_v2": true, - "load_balancer": [ - { - "manual_lb_config": [ - { - "control_plane_node_port": 30007, - "ingress_http_node_port": 30005, - "ingress_https_node_port": 30006, - "konnectivity_server_node_port": 30008 - } - ], - "vip_config": [ - { - "control_plane_vip": "10.251.133.5", - "ingress_vip": "10.251.135.19" - } - ] - } - ], - "location": "us-west1", - "name": "cluster-manuallb", - "network_config": [ - { + "host_config": [ + { + "dns_search_domains": [ + "test-domain" + ], + "dns_servers": [ + "10.254.41.1" + ], + "ntp_servers": [ + "216.239.35.8" + ] + } + ], + "pod_address_cidr_blocks": [ + "192.168.0.0/16" + ], + "service_address_cidr_blocks": [ + "10.96.0.0/12" + ], + "static_ip_config": [ + { + "ip_blocks": [ + { + "gateway": "10.0.0.1", + "ips": [ + { + "hostname": "hostname", + "ip": "10.0.0.2" + } + ], + "netmask": "10.0.0.3/32" + } + ] + } + ], + "vcenter_network": "test-vcenter-network" + } + ], + "on_prem_version": "1.31.0-gke.35", + "platform_config": [ + { + "required_platform_version": "1.31.0" + } + ], + "provider": "${google-beta}", + "vcenter": [ + { + "address": "10.0.0.1", + "ca_cert_data": "test ca cert data", + "cluster": "test cluster", + "data_disk": "test data disk", + "datacenter": "test data center", + "datastore": "test data store", + "folder": "test folder", + "resource_pool": "test resource pool", + "storage_policy_name": "storage_policy_name" + } + ] + } + references: + provider: google-beta + - name: admin-cluster-metallb + manifest: |- + { + "bootstrap_cluster_membership": "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test", + "control_plane_node": [ + { + "cpus": 4, + "memory": 8192 + } + ], + "description": "test admin cluster", + "image_type": "ubuntu_containerd", + "load_balancer": [ + { + "metal_lb_config": [ + { + "enabled": true + } + ], + "vip_config": [ + { + "addons_vip": "10.251.135.19", + "control_plane_vip": "10.251.133.5" + } + ] + } + ], + "location": "us-west1", + "name": "metallb", + "network_config": [ + { + "dhcp_ip_config": [ + { + "enabled": true + } + ], + "pod_address_cidr_blocks": [ + "192.168.0.0/16" + ], + "service_address_cidr_blocks": [ + "10.96.0.0/12" + ] + } + ], + "on_prem_version": "1.31.0-gke.35", + "provider": "${google-beta}", + "vcenter": [ + { + "address": "10.0.0.1", + "ca_cert_data": "test ca cert data", + "cluster": "test cluster", + "data_disk": "test data disk", + "datacenter": "test data center", + "datastore": "test data store", + "folder": "test folder", + "resource_pool": "test resource pool" + } + ] + } + references: + provider: google-beta + argumentDocs: + addon_node.auto_resize_config: |- + - + (Optional) + Specifies auto resize config. + Structure is documented below. + addon_node.auto_resize_config.enabled: |- + - + (Required) + Whether to enable controle plane node auto resizing. + anti_affinity_groups.aag_config_disabled: |- + - + (Required) + Spread nodes across at least three physical hosts (requires at least three + hosts). + Enabled by default. + authorization.viewer_users: |- + - + (Optional) + Users that will be granted the cluster-admin role on the cluster, providing + full access to the cluster. + Structure is documented below. + authorization.viewer_users.username: |- + - + (Required) + The name of the user, e.g. my-gcp-id@gmail.com. + auto_repair_config.enabled: |- + - + (Required) + Whether auto repair is enabled. + bundles.status: |- + - + (Output) + ResourceStatus representing detailed cluster state. + Structure is documented below. + bundles.version: |- + - + (Output) + The version of the bundle. + conditions.last_transition_time: |- + - + (Output) + Last time the condition transit from one status to another. + conditions.message: |- + - + (Output) + Human-readable message indicating details about last transition. + conditions.reason: |- + - + (Output) + Machine-readable message indicating details about last transition. + conditions.state: |- + - + (Output) + The lifecycle state of the condition. + conditions.type: |- + - + (Output) + Type of the condition. + (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady) + control_plane_node.cpus: |- + - + (Optional) + The number of vCPUs for the control-plane node of the admin cluster. + control_plane_node.memory: |- + - + (Optional) + The number of mebibytes of memory for the control-plane node of the admin cluster. + control_plane_node.replicas: |- + - + (Optional) + The number of control plane nodes for this VMware admin cluster. + create: '- Default is 60 minutes.' + create_time: |- + - + The time the cluster was created, in RFC3339 text format. + delete: '- Default is 60 minutes.' + dhcp_ip_config.enabled: |- + - + (Required) + enabled is a flag to mark if DHCP IP allocation is + used for VMware admin clusters. + effective_annotations: |- + - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + endpoint: |- + - + The DNS name of VMware admin cluster's API server. + etag: |- + - + This checksum is computed by the server based on the value of other + fields, and may be sent on update and delete requests to ensure the + client has an up-to-date value before proceeding. + Allows clients to perform consistent read-modify-writes + through optimistic concurrency control. + f5_config.address: |- + - + (Optional) + The load balancer's IP address. + f5_config.partition: |- + - + (Optional) + he preexisting partition to be used by the load balancer. T + his partition is usually created for the admin cluster for example: + 'my-f5-admin-partition'. + f5_config.snat_pool: |- + - + (Optional) + The pool name. Only necessary, if using SNAT. + fleet: |- + - + Fleet configuration for the cluster. + Structure is documented below. + fleet.membership: |- + - + (Output) + The name of the managed Fleet Membership resource associated to this cluster. + Membership names are formatted as + projects//locations//memberships/. + ha_control_plane_config.control_plane_ip_block: |- + - + (Optional) + Static IP addresses for the control plane nodes. + Structure is documented below. + ha_control_plane_config.control_plane_ip_block.gateway: |- + - + (Required) + The network gateway used by the VMware Admin Cluster. + ha_control_plane_config.control_plane_ip_block.ips: |- + - + (Required) + The node's network configurations used by the VMware Admin Cluster. + Structure is documented below. + ha_control_plane_config.control_plane_ip_block.netmask: |- + - + (Required) + The netmask used by the VMware Admin Cluster. + host_config.dns_search_domains: |- + - + (Optional) + DNS search domains. + host_config.dns_servers: |- + - + (Optional) + DNS servers. + host_config.ntp_servers: |- + - + (Optional) + NTP servers. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/vmwareAdminClusters/{{name}}' + ips.addon_node: |- + - + (Optional) + The VMware admin cluster addon node configuration. + Structure is documented below. + ips.annotations: |- + - + (Optional) + Annotations on the VMware Admin Cluster. + This field has the same restrictions as Kubernetes annotations. + The total size of all keys and values combined is limited to 256k. + Key can have 2 segments: prefix (optional) and name (required), + separated by a slash (/). + Prefix must be a DNS subdomain. + Name must be 63 characters or less, begin and end with alphanumerics, + with dashes (-), underscores (_), dots (.), and alphanumerics between. + ips.anti_affinity_groups: |- + - + (Optional) + AAGConfig specifies whether to spread VMware Admin Cluster nodes across at + least three physical hosts in the datacenter. + Structure is documented below. + ips.authorization: |- + - + (Optional) + The VMware admin cluster authorization configuration. + Structure is documented below. + ips.auto_repair_config: |- + - + (Optional) + Configuration for auto repairing. + Structure is documented below. + ips.bootstrap_cluster_membership: |- + - + (Optional) + The bootstrap cluster this VMware admin cluster belongs to. + ips.control_plane_node: |- + - + (Optional) + The VMware admin cluster control plane node configuration. + Structure is documented below. + ips.description: |- + - + (Optional) + A human readable description of this VMware admin cluster. + ips.effective_annotations: for all of the annotations present on the resource. + ips.hostname: |- + - + (Optional) + Hostname of the machine. VM's name will be used if this field is empty. + ips.image_type: |- + - + (Optional) + The OS image type for the VMware admin cluster. + ips.ip: |- + - + (Required) + IP could be an IP address (like 1.2.3.4) or a CIDR (like 1.2.3.0/24). + ips.load_balancer: |- + - + (Optional) + Specifies the load balancer configuration for VMware admin cluster. + Structure is documented below. + ips.on_prem_version: |- + - + (Optional) + The Anthos clusters on the VMware version for the admin cluster. + ips.platform_config: |- + - + (Optional) + The VMware platform configuration. + Structure is documented below. + ips.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + ips.vcenter: |- + - + (Optional) + Specifies vCenter config for the admin cluster. + Structure is documented below. + load_balancer.f5_config: |- + - + (Optional) + Configuration for F5 Big IP typed load balancers. + Structure is documented below. + load_balancer.manual_lb_config: |- + - + (Optional) + Manually configured load balancers. + Structure is documented below. + load_balancer.metal_lb_config: |- + - + (Optional) + Metal LB load balancers. + Structure is documented below. + load_balancer.vip_config: |- + - + (Required) + Specified the VMware Load Balancer Config + Structure is documented below. + local_name: |- + - + The object name of the VMwareAdminCluster custom resource on the + associated admin cluster. This field is used to support conflicting + names when enrolling existing clusters to the API. When used as a part of + cluster enrollment, this field will differ from the ID in the resource + name. For new clusters, this field will match the user provided cluster ID + and be visible in the last component of the resource name. It is not + modifiable. + All users should use this name to access their cluster using gkectl or + kubectl and should expect to see the local name when viewing admin + cluster controller logs. + location: |- + - + (Required) + The location of the resource. + manual_lb_config.addons_node_port: |- + - + (Optional) + NodePort for add-ons server in the admin cluster. + manual_lb_config.control_plane_node_port: |- + - + (Optional) + NodePort for control plane service. The Kubernetes API server in the admin + cluster is implemented as a Service of type NodePort (ex. 30968). + manual_lb_config.ingress_http_node_port: |- + - + (Optional) + NodePort for ingress service's http. The ingress service in the admin + cluster is implemented as a Service of type NodePort (ex. 32527). + manual_lb_config.ingress_https_node_port: |- + - + (Optional) + NodePort for ingress service's https. The ingress service in the admin + cluster is implemented as a Service of type NodePort (ex. 30139). + manual_lb_config.konnectivity_server_node_port: |- + - + (Optional) + NodePort for konnectivity server service running as a sidecar in each + kube-apiserver pod (ex. 30564). + metal_lb_config.enabled: |- + - + (Optional) + Metal LB is enabled. + name: |- + - + (Required) + The VMware admin cluster resource name. + network_config: |- + - + (Required) + The VMware admin cluster network configuration. + Structure is documented below. + network_config.dhcp_ip_config: |- + - + (Optional) + Configuration settings for a DHCP IP configuration. + Structure is documented below. + network_config.ha_control_plane_config: |- + - + (Optional) + Configuration for HA admin cluster control plane. + Structure is documented below. + network_config.host_config: |- + - + (Optional) + Represents common network settings irrespective of the host's IP address. + Structure is documented below. + network_config.pod_address_cidr_blocks: |- + - + (Required) + All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. + Only a single range is supported. This field cannot be changed after creation. + network_config.service_address_cidr_blocks: |- + - + (Required) + All services in the cluster are assigned an RFC1918 IPv4 address + from these ranges. Only a single range is supported.. This field + cannot be changed after creation. + network_config.static_ip_config: |- + - + (Optional) + Configuration settings for a static IP configuration. + Structure is documented below. + network_config.vcenter_network: |- + - + (Optional) + vcenter_network specifies vCenter network name. + platform_config.bundles: |- + - + (Output) + The list of bundles installed in the admin cluster. + Structure is documented below. + platform_config.platform_version: |- + - + (Output) + The platform version e.g. 1.13.2. + platform_config.required_platform_version: |- + - + (Optional) + The required platform version e.g. 1.13.1. + If the current platform version is lower than the target version, + the platform version will be updated to the target version. + If the target version is not installed in the platform + (bundle versions), download the target version bundle. + platform_config.status: |- + - + (Output) + ResourceStatus representing detailed cluster state. + Structure is documented below. + reconciling: |- + - + If set, there are currently changes in flight to the VMware admin cluster. + state: |- + - + The lifecycle state of the VMware admin cluster. + static_ip_config.ip_blocks: |- + - + (Optional) + Represents the configuration values for static IP allocation to nodes. + Structure is documented below. + static_ip_config.ip_blocks.gateway: |- + - + (Required) + The network gateway used by the VMware Admin Cluster. + static_ip_config.ip_blocks.ips: |- + - + (Required) + The node's network configurations used by the VMware Admin Cluster. + Structure is documented below. + static_ip_config.ip_blocks.netmask: |- + - + (Required) + The netmask used by the VMware Admin Cluster. + status: |- + - + ResourceStatus representing detailed cluster state. + Structure is documented below. + status.conditions: |- + - + (Output) + ResourceConditions provide a standard mechanism for higher-level status reporting from admin cluster controller. + Structure is documented below. + status.error_message: |- + - + (Output) + Human-friendly representation of the error message from the admin cluster + controller. The error message can be temporary as the admin cluster + controller creates a cluster or node pool. If the error message persists + for a longer period of time, it can be used to surface error message to + indicate real problems requiring user intervention. + uid: |- + - + The unique identifier of the VMware Admin Cluster. + update: '- Default is 60 minutes.' + update_time: |- + - + The time the cluster was last updated, in RFC3339 text format. + vcenter.address: |- + - + (Optional) + The vCenter IP address. + vcenter.ca_cert_data: |- + - + (Optional) + Contains the vCenter CA certificate public key for SSL verification. + vcenter.cluster: |- + - + (Optional) + The name of the vCenter cluster for the admin cluster. + vcenter.data_disk: |- + - + (Optional) + The name of the virtual machine disk (VMDK) for the admin cluster. + vcenter.datacenter: |- + - + (Optional) + The name of the vCenter datacenter for the admin cluster. + vcenter.datastore: |- + - + (Optional) + The name of the vCenter datastore for the admin cluster. + vcenter.folder: |- + - + (Optional) + The name of the vCenter folder for the admin cluster. + vcenter.resource_pool: |- + - + (Optional) + The name of the vCenter resource pool for the admin cluster. + vcenter.storage_policy_name: |- + - + (Optional) + The name of the vCenter storage policy for the user cluster. + vip_config.addons_vip: |- + - + (Optional) + The VIP to configure the load balancer for add-ons. + vip_config.control_plane_vip: |- + - + (Required) + The VIP which you previously set aside for the Kubernetes + API of this VMware Admin Cluster. + importStatements: [] + google_gkeonprem_vmware_cluster: + subCategory: Anthos On-Prem + description: A Google VMware User Cluster. + name: google_gkeonprem_vmware_cluster + title: "" + examples: + - name: cluster-basic + manifest: |- + { + "admin_cluster_membership": "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test", + "annotations": {}, + "control_plane_node": [ + { + "cpus": 4, + "memory": 8192, + "replicas": 1 + } + ], + "description": "test cluster", + "load_balancer": [ + { + "metal_lb_config": [ + { + "address_pools": [ + { + "addresses": [ + "10.251.135.19" + ], + "avoid_buggy_ips": true, + "manual_assign": "true", + "pool": "ingress-ip" + }, + { + "addresses": [ + "10.251.135.19" + ], + "avoid_buggy_ips": true, + "manual_assign": "true", + "pool": "lb-test-ip" + } + ] + } + ], + "vip_config": [ + { + "control_plane_vip": "10.251.133.5", + "ingress_vip": "10.251.135.19" + } + ] + } + ], + "location": "us-west1", + "name": "cluster-basic", + "network_config": [ + { + "dhcp_ip_config": [ + { + "enabled": true + } + ], + "pod_address_cidr_blocks": [ + "192.168.0.0/16" + ], + "service_address_cidr_blocks": [ + "10.96.0.0/12" + ] + } + ], + "on_prem_version": "1.13.1-gke.35" + } + - name: cluster-f5lb + manifest: |- + { + "admin_cluster_membership": "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test", + "annotations": {}, + "anti_affinity_groups": [ + { + "aag_config_disabled": true + } + ], + "authorization": [ + { + "admin_users": [ + { + "username": "testuser@gmail.com" + } + ] + } + ], + "auto_repair_config": [ + { + "enabled": true + } + ], + "control_plane_node": [ + { + "auto_resize_config": [ + { + "enabled": true + } + ], + "cpus": 4, + "memory": 8192, + "replicas": 1 + } + ], + "dataplane_v2": [ + { + "advanced_networking": true, + "dataplane_v2_enabled": true, + "windows_dataplane_v2_enabled": true + } + ], + "description": "test cluster", + "disable_bundled_ingress": true, + "enable_control_plane_v2": true, + "load_balancer": [ + { + "f5_config": [ + { + "address": "10.0.0.1", + "partition": "test-partition", + "snat_pool": "test-snap-pool" + } + ], + "vip_config": [ + { + "control_plane_vip": "10.251.133.5", + "ingress_vip": "10.251.135.19" + } + ] + } + ], + "location": "us-west1", + "name": "cluster-f5lb", + "network_config": [ + { + "control_plane_v2_config": [ + { + "control_plane_ip_block": [ + { + "gateway": "test-gateway", + "ips": [ + { + "hostname": "test-hostname", + "ip": "10.0.0.1" + } + ], + "netmask": "10.0.0.1/32" + } + ] + } + ], + "dhcp_ip_config": [ + { + "enabled": true + } + ], + "pod_address_cidr_blocks": [ + "192.168.0.0/16" + ], + "service_address_cidr_blocks": [ + "10.96.0.0/12" + ], + "vcenter_network": "test-vcenter-network" + } + ], + "on_prem_version": "1.13.1-gke.35", + "storage": [ + { + "vsphere_csi_disabled": true + } + ], + "vm_tracking_enabled": true + } + - name: cluster-manuallb + manifest: |- + { + "admin_cluster_membership": "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test", + "annotations": {}, + "anti_affinity_groups": [ + { + "aag_config_disabled": true + } + ], + "authorization": [ + { + "admin_users": [ + { + "username": "testuser@gmail.com" + } + ] + } + ], + "auto_repair_config": [ + { + "enabled": true + } + ], + "control_plane_node": [ + { + "auto_resize_config": [ + { + "enabled": true + } + ], + "cpus": 4, + "memory": 8192, + "replicas": 1 + } + ], + "dataplane_v2": [ + { + "advanced_networking": true, + "dataplane_v2_enabled": true, + "windows_dataplane_v2_enabled": true + } + ], + "description": "test cluster", + "enable_control_plane_v2": true, + "load_balancer": [ + { + "manual_lb_config": [ + { + "control_plane_node_port": 30007, + "ingress_http_node_port": 30005, + "ingress_https_node_port": 30006, + "konnectivity_server_node_port": 30008 + } + ], + "vip_config": [ + { + "control_plane_vip": "10.251.133.5", + "ingress_vip": "10.251.135.19" + } + ] + } + ], + "location": "us-west1", + "name": "cluster-manuallb", + "network_config": [ + { "host_config": [ { "dns_search_domains": [ @@ -97182,7 +107581,7 @@ resources: - (Required) The OS image to be used for each node in a node pool. - Currently cos, ubuntu, ubuntu_containerd and windows are supported. + Currently cos, cos_cgv2, ubuntu, ubuntu_cgv2, ubuntu_containerd and windows are supported. config.labels: |- - (Optional) @@ -97341,18 +107740,6 @@ resources: Tags to apply to VMs. Structure is documented below. importStatements: [] - google_google_project_iam_member_remove: - subCategory: Cloud Platform - description: Ensures that a member:role pairing does not exist in a project's IAM policy. - name: google_google_project_iam_member_remove - title: "" - argumentDocs: - member: |- - - (Required) The IAM principal that should not have the target role. - Each entry can have one of the following values: - project: '- (Required) The project id of the target project.' - role: '- (Required) The target role that should be removed.' - importStatements: [] google_healthcare_consent_store: subCategory: Cloud Healthcare description: The Consent Management API is a tool for tracking user consents and the documentation associated with the consents. @@ -97553,7 +107940,7 @@ resources: encryption_spec: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. encryption_spec.kms_key_name: |- - @@ -97739,7 +108126,7 @@ resources: notification_config: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. notification_config.pubsub_topic: |- - @@ -98045,7 +108432,7 @@ resources: notification_config: |- - (Optional, Deprecated) - A nested object resource + A nested object resource. Structure is documented below. notification_config.pubsub_topic: |- - @@ -98288,7 +108675,7 @@ resources: notification_config: |- - (Optional, Deprecated) - A nested object resource + A nested object resource. Structure is documented below. notification_config.pubsub_topic: |- - @@ -98325,7 +108712,7 @@ resources: parser_config: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. parser_config.allow_null_header: |- - @@ -98395,6 +108782,534 @@ resources: google_healthcare_hl7_v2_store_iam_binding can be used per role. Note that custom roles must be of the format [projects|organizations]/{parent-name}/roles/{role-name}. importStatements: [] + google_healthcare_pipeline_job: + subCategory: Cloud Healthcare + description: PipelineJobs are Long Running Operations on Healthcare API to Map or Reconcile incoming data into FHIR format + name: google_healthcare_pipeline_job + title: "" + examples: + - name: example-pipeline + manifest: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "disable_lineage": true, + "location": "us-central1", + "name": "example_pipeline_job", + "reconciliation_pipeline_job": [ + { + "fhir_store_destination": "${google_healthcare_dataset.dataset.id}/fhirStores/${google_healthcare_fhir_store.fhirstore.name}", + "matching_uri_prefix": "gs://${google_storage_bucket.bucket.name}", + "merge_config": [ + { + "description": "sample description for reconciliation rules", + "whistle_config_source": [ + { + "import_uri_prefix": "gs://${google_storage_bucket.bucket.name}", + "uri": "gs://${google_storage_bucket.bucket.name}/${google_storage_bucket_object.merge_file.name}" + } + ] + } + ] + } + ] + } + references: + dataset: google_healthcare_dataset.dataset.id + dependencies: + google_healthcare_dataset.dataset: |- + { + "location": "us-central1", + "name": "example_dataset" + } + google_healthcare_fhir_store.fhirstore: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "disable_referential_integrity": true, + "enable_update_create": true, + "name": "fhir_store", + "version": "R4" + } + google_storage_bucket.bucket: |- + { + "location": "us-central1", + "name": "example_bucket_name", + "uniform_bucket_level_access": true + } + google_storage_bucket_iam_member.hsa: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-healthcare.iam.gserviceaccount.com", + "role": "roles/storage.objectUser" + } + google_storage_bucket_object.merge_file: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "content": " ", + "name": "merge.wstl" + } + - name: example-pipeline + manifest: |- + { + "backfill_pipeline_job": [ + { + "mapping_pipeline_job": "${google_healthcare_dataset.dataset.id}/pipelinejobs/example_mapping_pipeline" + } + ], + "dataset": "${google_healthcare_dataset.dataset.id}", + "location": "us-central1", + "name": "example_backfill_pipeline" + } + references: + dataset: google_healthcare_dataset.dataset.id + dependencies: + google_healthcare_dataset.dataset: |- + { + "location": "us-central1", + "name": "example_dataset" + } + - name: example-mapping-pipeline + manifest: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "disable_lineage": true, + "labels": { + "example_label_key": "example_label_value" + }, + "location": "us-central1", + "mapping_pipeline_job": [ + { + "fhir_store_destination": "${google_healthcare_dataset.dataset.id}/fhirStores/${google_healthcare_fhir_store.dest_fhirstore.name}", + "fhir_streaming_source": [ + { + "description": "example description for streaming fhirstore", + "fhir_store": "${google_healthcare_dataset.dataset.id}/fhirStores/${google_healthcare_fhir_store.source_fhirstore.name}" + } + ], + "mapping_config": [ + { + "description": "example description for mapping configuration", + "whistle_config_source": [ + { + "import_uri_prefix": "gs://${google_storage_bucket.bucket.name}", + "uri": "gs://${google_storage_bucket.bucket.name}/${google_storage_bucket_object.mapping_file.name}" + } + ] + } + ] + } + ], + "name": "example_mapping_pipeline_job" + } + references: + dataset: google_healthcare_dataset.dataset.id + dependencies: + google_healthcare_dataset.dataset: |- + { + "location": "us-central1", + "name": "example_dataset" + } + google_healthcare_fhir_store.dest_fhirstore: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "disable_referential_integrity": true, + "enable_update_create": true, + "name": "dest_fhir_store", + "version": "R4" + } + google_healthcare_fhir_store.source_fhirstore: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "disable_referential_integrity": true, + "enable_update_create": true, + "name": "source_fhir_store", + "version": "R4" + } + google_storage_bucket.bucket: |- + { + "location": "us-central1", + "name": "example_bucket_name", + "uniform_bucket_level_access": true + } + google_storage_bucket_iam_member.hsa: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-healthcare.iam.gserviceaccount.com", + "role": "roles/storage.objectUser" + } + google_storage_bucket_object.mapping_file: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "content": " ", + "name": "mapping.wstl" + } + - name: recon + manifest: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "disable_lineage": true, + "location": "us-central1", + "name": "example_recon_pipeline_job", + "reconciliation_pipeline_job": [ + { + "fhir_store_destination": "${google_healthcare_dataset.dataset.id}/fhirStores/${google_healthcare_fhir_store.dest_fhirstore.name}", + "matching_uri_prefix": "gs://${google_storage_bucket.bucket.name}", + "merge_config": [ + { + "description": "sample description for reconciliation rules", + "whistle_config_source": [ + { + "import_uri_prefix": "gs://${google_storage_bucket.bucket.name}", + "uri": "gs://${google_storage_bucket.bucket.name}/${google_storage_bucket_object.merge_file.name}" + } + ] + } + ] + } + ] + } + references: + dataset: google_healthcare_dataset.dataset.id + dependencies: + google_healthcare_dataset.dataset: |- + { + "location": "us-central1", + "name": "example_dataset" + } + google_healthcare_fhir_store.dest_fhirstore: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "disable_referential_integrity": true, + "enable_update_create": true, + "name": "dest_fhir_store", + "version": "R4" + } + google_healthcare_fhir_store.source_fhirstore: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "disable_referential_integrity": true, + "enable_update_create": true, + "name": "source_fhir_store", + "version": "R4" + } + google_storage_bucket.bucket: |- + { + "location": "us-central1", + "name": "example_bucket_name", + "uniform_bucket_level_access": true + } + google_storage_bucket_iam_member.hsa: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-healthcare.iam.gserviceaccount.com", + "role": "roles/storage.objectUser" + } + google_storage_bucket_object.mapping_file: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "content": " ", + "name": "mapping.wstl" + } + google_storage_bucket_object.merge_file: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "content": " ", + "name": "merge.wstl" + } + - name: example-mapping-pipeline + manifest: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "depends_on": [ + "${google_healthcare_pipeline_job.recon}" + ], + "disable_lineage": true, + "labels": { + "example_label_key": "example_label_value" + }, + "location": "us-central1", + "mapping_pipeline_job": [ + { + "fhir_streaming_source": [ + { + "description": "example description for streaming fhirstore", + "fhir_store": "${google_healthcare_dataset.dataset.id}/fhirStores/${google_healthcare_fhir_store.source_fhirstore.name}" + } + ], + "mapping_config": [ + { + "description": "example description for mapping configuration", + "whistle_config_source": [ + { + "import_uri_prefix": "gs://${google_storage_bucket.bucket.name}", + "uri": "gs://${google_storage_bucket.bucket.name}/${google_storage_bucket_object.mapping_file.name}" + } + ] + } + ], + "reconciliation_destination": true + } + ], + "name": "example_mapping_pipeline_job" + } + references: + dataset: google_healthcare_dataset.dataset.id + dependencies: + google_healthcare_dataset.dataset: |- + { + "location": "us-central1", + "name": "example_dataset" + } + google_healthcare_fhir_store.dest_fhirstore: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "disable_referential_integrity": true, + "enable_update_create": true, + "name": "dest_fhir_store", + "version": "R4" + } + google_healthcare_fhir_store.source_fhirstore: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "disable_referential_integrity": true, + "enable_update_create": true, + "name": "source_fhir_store", + "version": "R4" + } + google_storage_bucket.bucket: |- + { + "location": "us-central1", + "name": "example_bucket_name", + "uniform_bucket_level_access": true + } + google_storage_bucket_iam_member.hsa: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-healthcare.iam.gserviceaccount.com", + "role": "roles/storage.objectUser" + } + google_storage_bucket_object.mapping_file: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "content": " ", + "name": "mapping.wstl" + } + google_storage_bucket_object.merge_file: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "content": " ", + "name": "merge.wstl" + } + argumentDocs: + backfill_pipeline_job: |- + - + (Optional) + Specifies the backfill configuration. + Structure is documented below. + backfill_pipeline_job.mapping_pipeline_job: |- + - + (Optional) + Specifies the mapping pipeline job to backfill, the name format + should follow: projects/{projectId}/locations/{locationId}/datasets/{datasetId}/pipelineJobs/{pipelineJobId}. + create: '- Default is 20 minutes.' + dataset: |- + - + (Required) + Healthcare Dataset under which the Pipeline Job is to run + delete: '- Default is 20 minutes.' + disable_lineage: |- + - + (Optional) + If true, disables writing lineage for the pipeline. + effective_labels: for all of the labels present on the resource. + fhir_streaming_source.description: |- + - + (Optional) + Describes the streaming FHIR data source. + fhir_streaming_source.fhir_store: |- + - + (Required) + The path to the FHIR store in the format projects/{projectId}/locations/{locationId}/datasets/{datasetId}/fhirStores/{fhirStoreId}. + id: '- an identifier for the resource with format {{dataset}}/pipelineJobs/{{name}}' + labels: |- + - + (Optional) + User-supplied key-value pairs used to organize Pipeline Jobs. + Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of + maximum 128 bytes, and must conform to the following PCRE regular expression: + [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} + Label values are optional, must be between 1 and 63 characters long, have a + UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE + regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} + No more than 64 labels can be associated with a given pipeline. + An object containing a list of "key": value pairs. + Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + location: |- + - + (Required) + Location where the Pipeline Job is to run + mapping_config.description: |- + - + (Optional) + Describes the mapping configuration. + mapping_config.whistle_config_source: |- + - + (Optional) + Specifies the path to the mapping configuration for harmonization pipeline. + Structure is documented below. + mapping_pipeline_job: |- + - + (Optional) + Specifies mapping configuration. + Structure is documented below. + mapping_pipeline_job.fhir_store_destination: |- + - + (Optional) + If set, the mapping pipeline will write snapshots to this + FHIR store without assigning stable IDs. You must + grant your pipeline project's Cloud Healthcare Service + Agent serviceaccount healthcare.fhirResources.executeBundle + and healthcare.fhirResources.create permissions on the + destination store. The destination store must set + [disableReferentialIntegrity][FhirStore.disable_referential_integrity] + to true. The destination store must use FHIR version R4. + Format: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{fhirStoreID}. + mapping_pipeline_job.fhir_streaming_source: |- + - + (Optional) + A streaming FHIR data source. + Structure is documented below. + mapping_pipeline_job.mapping_config: |- + - + (Required) + The location of the mapping configuration. + Structure is documented below. + mapping_pipeline_job.reconciliation_destination: |- + - + (Optional) + If set to true, a mapping pipeline will send output snapshots + to the reconciliation pipeline in its dataset. A reconciliation + pipeline must exist in this dataset before a mapping pipeline + with a reconciliation destination can be created. + merge_config.description: |- + - + (Optional) + Describes the mapping configuration. + merge_config.whistle_config_source: |- + - + (Required) + Specifies the path to the mapping configuration for harmonization pipeline. + Structure is documented below. + name: |- + - + (Required) + Specifies the name of the pipeline job. This field is user-assigned. + reconciliation_pipeline_job: |- + - + (Optional) + Specifies reconciliation configuration. + Structure is documented below. + reconciliation_pipeline_job.fhir_store_destination: |- + - + (Optional) + The harmonized FHIR store to write harmonized FHIR resources to, + in the format of: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{id} + reconciliation_pipeline_job.matching_uri_prefix: |- + - + (Required) + Specifies the top level directory of the matching configs used + in all mapping pipelines, which extract properties for resources + to be matched on. + Example: gs://{bucket-id}/{path/to/matching/configs} + reconciliation_pipeline_job.merge_config: |- + - + (Required) + Specifies the location of the reconciliation configuration. + Structure is documented below. + self_link: |- + - + The fully qualified name of this dataset + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' + whistle_config_source.import_uri_prefix: |- + - + (Required) + Directory path where all the Whistle files are located. + Example: gs://{bucket-id}/{path/to/import-root/dir} + whistle_config_source.uri: |- + - + (Required) + Main configuration file which has the entrypoint or the root function. + Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl. + importStatements: [] + google_healthcare_workspace: + subCategory: Cloud Healthcare + description: A Data Mapper workspace is used to configure Data Mapper access, permissions and data sources for mapping clinical patient data to the FHIR standard. + name: google_healthcare_workspace + title: "" + examples: + - name: default + manifest: |- + { + "dataset": "${google_healthcare_dataset.dataset.id}", + "labels": { + "label1": "labelvalue1" + }, + "name": "example-dm-workspace", + "settings": [ + { + "data_project_ids": [ + "example-data-source-project-id" + ] + } + ] + } + references: + dataset: google_healthcare_dataset.dataset.id + dependencies: + google_healthcare_dataset.dataset: |- + { + "location": "us-central1", + "name": "example-dataset" + } + argumentDocs: + create: '- Default is 20 minutes.' + dataset: |- + - + (Required) + Identifies the dataset addressed by this request. Must be in the format + 'projects/{project}/locations/{location}/datasets/{dataset}' + delete: '- Default is 20 minutes.' + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + id: '- an identifier for the resource with format {{dataset}}/dataMapperWorkspaces/{{name}}' + name: |- + - + (Required) + The name of the workspace, in the format 'projects/{projectId}/locations/{location}/datasets/{datasetId}/dataMapperWorkspaces/{workspaceId}' + settings: |- + - + (Required) + Settings associated with this workspace. + Structure is documented below. + settings.data_project_ids: |- + - + (Required) + Project IDs for data projects hosted in a workspace. + settings.effective_labels: for all of the labels present on the resource. + settings.labels: |- + - + (Optional) + The user labels. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" } + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' + importStatements: [] google_iam_access_boundary_policy: subCategory: Cloud IAM description: Represents a collection of access boundary policies to apply to a given resource. @@ -98465,6 +109380,7 @@ resources: google_project.project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "my-project", "org_id": "123456789", "project_id": "my-project" @@ -98596,6 +109512,7 @@ resources: google_project.project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "my-project", "org_id": "123456789", "project_id": "my-project" @@ -98685,6 +109602,540 @@ resources: The description of the rule. update: '- Default is 20 minutes.' importStatements: [] + google_iam_folders_policy_binding: + subCategory: Cloud IAM + description: A policy binding to a folder + name: google_iam_folders_policy_binding + title: "" + examples: + - name: my-folder-binding + manifest: |- + { + "depends_on": [ + "${time_sleep.wait_120s}" + ], + "display_name": "test folder binding", + "folder": "${google_folder.folder.folder_id}", + "location": "global", + "policy": "organizations/123456789/locations/global/principalAccessBoundaryPolicies/${google_iam_principal_access_boundary_policy.pab_policy.principal_access_boundary_policy_id}", + "policy_binding_id": "test-folder-binding", + "policy_kind": "PRINCIPAL_ACCESS_BOUNDARY", + "target": [ + { + "principal_set": "//cloudresourcemanager.googleapis.com/folders/${google_folder.folder.folder_id}" + } + ] + } + references: + folder: google_folder.folder.folder_id + dependencies: + google_folder.folder: |- + { + "deletion_protection": false, + "display_name": "test folder", + "parent": "organizations/123456789" + } + google_iam_principal_access_boundary_policy.pab_policy: |- + { + "display_name": "test folder binding", + "location": "global", + "organization": "123456789", + "principal_access_boundary_policy_id": "my-pab-policy" + } + time_sleep.wait_120s: |- + { + "create_duration": "120s", + "depends_on": [ + "${google_folder.folder}" + ] + } + argumentDocs: + condition.description: |- + - + (Optional) + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + condition.expression: |- + - + (Optional) + Textual representation of an expression in Common Expression Language syntax. + condition.location: |- + - + (Optional) + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + condition.title: |- + - + (Optional) + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. The time when the policy binding was created. + delete: '- Default is 20 minutes.' + effective_annotations: |- + - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + etag: |- + - + Optional. The etag for the policy binding. If this is provided on update, it must match the server's etag. + folder: |- + - + (Required) + The parent folder for the PolicyBinding. + id: '- an identifier for the resource with format folders/{{folder}}/locations/{{location}}/policyBindings/{{policy_binding_id}}' + location: |- + - + (Required) + The location of the PolicyBinding. + name: |- + - + The name of the policy binding in the format {binding_parent/locations/{location}/policyBindings/{policy_binding_id} + policy: |- + - + (Required) + Required. Immutable. The resource name of the policy to be bound. The binding parent and policy must belong to the same Organization (or Project). + policy_binding_id: |- + - + (Required) + The Policy Binding ID. + policy_uid: |- + - + Output only. The globally unique ID of the policy to be bound. + target: |- + - + (Required) + Target is the full resource name of the resource to which the policy will be bound. Immutable once set. + Structure is documented below. + target.annotations: |- + - + (Optional) + Optional. User defined annotations. See https://google.aip.dev/148#annotations for more details such as format and size limitations + target.condition: |- + - + (Optional) + Represents a textual expression in the Common Expression Language + (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of + CEL are documented at https://github.com/google/cel-spec. + Example (Comparison): + title: "Summary size limit" + description: "Determines if a summary is less than 100 chars" + expression: "document.summary.size() < 100" + Example + (Equality): + title: "Requestor is owner" + description: "Determines if requestor is the document owner" + expression: "document.owner == request.auth.claims.email" Example + (Logic): + title: "Public documents" + description: "Determine whether the document should be publicly visible" + expression: "document.type != 'private' && document.type != 'internal'" + Example (Data Manipulation): + title: "Notification string" + description: "Create a notification string with a timestamp." + expression: "'New message received at ' + string(document.create_time)" + The exact variables and functions that may be referenced within an expression are + determined by the service that evaluates it. See the service documentation for + additional information. + Structure is documented below. + target.display_name: |- + - + (Optional) + Optional. The description of the policy binding. Must be less than or equal to 63 characters. + target.effective_annotations: for all of the annotations present on the resource. + target.policy_kind: |- + - + (Optional) + Immutable. The kind of the policy to attach in this binding. This + field must be one of the following: - Left empty (will be automatically set + to the policy kind) - The input policy kind Possible values: POLICY_KIND_UNSPECIFIED PRINCIPAL_ACCESS_BOUNDARY ACCESS + target.principal_set: |- + - + (Optional) + Required. Immutable. The resource name of the policy to be bound. + The binding parent and policy must belong to the same Organization (or Project). + uid: |- + - + Output only. The globally unique ID of the policy binding. Assigned when the policy binding is created. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. The time when the policy binding was most recently updated. + importStatements: [] + google_iam_organizations_policy_binding: + subCategory: Cloud IAM + description: A policy binding to an organizations + name: google_iam_organizations_policy_binding + title: "" + examples: + - name: my-org-binding + manifest: |- + { + "display_name": "test org binding", + "location": "global", + "organization": "123456789", + "policy": "organizations/123456789/locations/global/principalAccessBoundaryPolicies/${google_iam_principal_access_boundary_policy.pab_policy.principal_access_boundary_policy_id}", + "policy_binding_id": "test-org-binding", + "policy_kind": "PRINCIPAL_ACCESS_BOUNDARY", + "target": [ + { + "principal_set": "//cloudresourcemanager.googleapis.com/organizations/123456789" + } + ] + } + dependencies: + google_iam_principal_access_boundary_policy.pab_policy: |- + { + "display_name": "test org binding", + "location": "global", + "organization": "123456789", + "principal_access_boundary_policy_id": "my-pab-policy" + } + argumentDocs: + condition.description: |- + - + (Optional) + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + condition.expression: |- + - + (Optional) + Textual representation of an expression in Common Expression Language syntax. + condition.location: |- + - + (Optional) + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + condition.title: |- + - + (Optional) + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. The time when the policy binding was created. + delete: '- Default is 20 minutes.' + effective_annotations: |- + - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + etag: |- + - + Optional. The etag for the policy binding. If this is provided on update, it must match the server's etag. + id: '- an identifier for the resource with format organizations/{{organization}}/locations/{{location}}/policyBindings/{{policy_binding_id}}' + location: |- + - + (Required) + The location of the Policy Binding + name: |- + - + The name of the policy binding in the format {binding_parent/locations/{location}/policyBindings/{policy_binding_id} + organization: |- + - + (Required) + The parent organization of the Policy Binding. + policy: |- + - + (Required) + Required. Immutable. The resource name of the policy to be bound. The binding parent and policy must belong to the same Organization (or Project). + policy_binding_id: |- + - + (Required) + The Policy Binding ID. + policy_uid: |- + - + Output only. The globally unique ID of the policy to be bound. + target: |- + - + (Required) + Target is the full resource name of the resource to which the policy will be bound. Immutable once set. + Structure is documented below. + target.annotations: |- + - + (Optional) + Optional. User defined annotations. See https://google.aip.dev/148#annotations for more details such as format and size limitations + target.condition: |- + - + (Optional) + Represents a textual expression in the Common Expression Language + (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of + CEL are documented at https://github.com/google/cel-spec. + Example (Comparison): + title: "Summary size limit" + description: "Determines if a summary is less than 100 chars" + expression: "document.summary.size() < 100" + Example + (Equality): + title: "Requestor is owner" + description: "Determines if requestor is the document owner" + expression: "document.owner == request.auth.claims.email" Example + (Logic): + title: "Public documents" + description: "Determine whether the document should be publicly visible" + expression: "document.type != 'private' && document.type != 'internal'" + Example (Data Manipulation): + title: "Notification string" + description: "Create a notification string with a timestamp." + expression: "'New message received at ' + string(document.create_time)" + The exact variables and functions that may be referenced within an expression are + determined by the service that evaluates it. See the service documentation for + additional information. + Structure is documented below. + target.display_name: |- + - + (Optional) + Optional. The description of the policy binding. Must be less than or equal to 63 characters. + target.effective_annotations: for all of the annotations present on the resource. + target.policy_kind: |- + - + (Optional) + Immutable. The kind of the policy to attach in this binding. This + field must be one of the following: - Left empty (will be automatically set + to the policy kind) - The input policy kind Possible values: POLICY_KIND_UNSPECIFIED PRINCIPAL_ACCESS_BOUNDARY ACCESS + target.principal_set: |- + - + (Optional) + Required. Immutable. The resource name of the policy to be bound. + The binding parent and policy must belong to the same Organization (or Project). + uid: |- + - + Output only. The globally unique ID of the policy binding. Assigned when the policy binding is created. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. The time when the policy binding was most recently updated. + importStatements: [] + google_iam_principal_access_boundary_policy: + subCategory: Cloud IAM + description: An IAM Principal Access Boundary Policy resource + name: google_iam_principal_access_boundary_policy + title: "" + examples: + - name: my-pab-policy + manifest: |- + { + "display_name": "test pab policy", + "location": "global", + "organization": "123456789", + "principal_access_boundary_policy_id": "test-pab-policy" + } + argumentDocs: + //cloudresourcemanager.googleapis.com/folders/123: . + //cloudresourcemanager.googleapis.com/organizations/123: . + //cloudresourcemanager.googleapis.com/projects/123: or //cloudresourcemanager.googleapis.com/projects/my-project-id. + annotations: |- + - + (Optional) + User defined annotations. See https://google.aip.dev/148#annotations + for more details such as format and size limitations + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. The time when the principal access boundary policy was created. + delete: '- Default is 20 minutes.' + details: |- + - + (Optional) + Principal access boundary policy details + Structure is documented below. + details.enforcement_version: |- + - + (Optional) + The version number that indicates which Google Cloud services + are included in the enforcement (e.g. "latest", "1", ...). If empty, the + PAB policy version will be set to the current latest version, and this version + won't get updated when new versions are released. + details.rules: |- + - + (Required) + A list of principal access boundary policy rules. The number of rules in a policy is limited to 500. + Structure is documented below. + display_name: |- + - + (Optional) + The description of the principal access boundary policy. Must be less than or equal to 63 characters. + effective_annotations: for all of the annotations present on the resource. + etag: |- + - + The etag for the principal access boundary. If this is provided on update, it must match the server's etag. + id: '- an identifier for the resource with format organizations/{{organization}}/locations/{{location}}/principalAccessBoundaryPolicies/{{principal_access_boundary_policy_id}}' + location: |- + - + (Required) + The location the principal access boundary policy is in. + name: |- + - + Identifier. The resource name of the principal access boundary policy. The following format is supported: + organizations/{organization_id}/locations/{location}/principalAccessBoundaryPolicies/{policy_id} + organization: |- + - + (Required) + The parent organization of the principal access boundary policy. + principal_access_boundary_policy_id: |- + - + (Required) + The ID to use to create the principal access boundary policy. + This value must start with a lowercase letter followed by up to 62 lowercase letters, numbers, hyphens, or dots. Pattern, /a-z{2,62}/. + rules.description: |- + - + (Optional) + The description of the principal access boundary policy rule. Must be less than or equal to 256 characters. + rules.effect: |- + - + (Required) + The access relationship of principals to the resources in this rule. + Possible values: ALLOW + rules.resources: |- + - + (Required) + A list of Cloud Resource Manager resources. The resource + and all the descendants are included. The number of resources in a policy + is limited to 500 across all rules. + The following resource types are supported: + uid: |- + - + Output only. The globally unique ID of the principal access boundary policy. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. The time when the principal access boundary policy was most recently updated. + importStatements: [] + google_iam_projects_policy_binding: + subCategory: Cloud IAM + description: A policy binding to a Project + name: google_iam_projects_policy_binding + title: "" + examples: + - name: my-project-binding + manifest: |- + { + "display_name": "test project binding", + "location": "global", + "policy": "organizations/123456789/locations/global/principalAccessBoundaryPolicies/${google_iam_principal_access_boundary_policy.pab_policy.principal_access_boundary_policy_id}", + "policy_binding_id": "test-project-binding", + "policy_kind": "PRINCIPAL_ACCESS_BOUNDARY", + "project": "${data.google_project.project.project_id}", + "target": [ + { + "principal_set": "//cloudresourcemanager.googleapis.com/projects/${data.google_project.project.project_id}" + } + ] + } + references: + project: data.google_project.project.project_id + dependencies: + google_iam_principal_access_boundary_policy.pab_policy: |- + { + "display_name": "test project binding", + "location": "global", + "organization": "123456789", + "principal_access_boundary_policy_id": "my-pab-policy" + } + argumentDocs: + condition.description: |- + - + (Optional) + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + condition.expression: |- + - + (Optional) + Textual representation of an expression in Common Expression Language syntax. + condition.location: |- + - + (Optional) + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + condition.title: |- + - + (Optional) + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. The time when the policy binding was created. + delete: '- Default is 20 minutes.' + effective_annotations: |- + - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + etag: |- + - + Optional. The etag for the policy binding. If this is provided on update, it must match the server's etag. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/policyBindings/{{policy_binding_id}}' + location: |- + - + (Required) + The location of the Policy Binding + name: |- + - + The name of the policy binding in the format {binding_parent/locations/{location}/policyBindings/{policy_binding_id} + policy: |- + - + (Required) + Required. Immutable. The resource name of the policy to be bound. The binding parent and policy must belong to the same Organization (or Project). + policy_binding_id: |- + - + (Required) + The Policy Binding ID. + policy_uid: |- + - + Output only. The globally unique ID of the policy to be bound. + target: |- + - + (Required) + Target is the full resource name of the resource to which the policy will be bound. Immutable once set. + Structure is documented below. + target.annotations: |- + - + (Optional) + Optional. User defined annotations. See https://google.aip.dev/148#annotations for more details such as format and size limitations + target.condition: |- + - + (Optional) + Represents a textual expression in the Common Expression Language + (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of + CEL are documented at https://github.com/google/cel-spec. + Example (Comparison): + title: "Summary size limit" + description: "Determines if a summary is less than 100 chars" + expression: "document.summary.size() < 100" + Example + (Equality): + title: "Requestor is owner" + description: "Determines if requestor is the document owner" + expression: "document.owner == request.auth.claims.email" Example + (Logic): + title: "Public documents" + description: "Determine whether the document should be publicly visible" + expression: "document.type != 'private' && document.type != 'internal'" + Example (Data Manipulation): + title: "Notification string" + description: "Create a notification string with a timestamp." + expression: "'New message received at ' + string(document.create_time)" + The exact variables and functions that may be referenced within an expression are + determined by the service that evaluates it. See the service documentation for + additional information. + Structure is documented below. + target.display_name: |- + - + (Optional) + Optional. The description of the policy binding. Must be less than or equal to 63 characters. + target.effective_annotations: for all of the annotations present on the resource. + target.policy_kind: |- + - + (Optional) + Immutable. The kind of the policy to attach in this binding. This + field must be one of the following: - Left empty (will be automatically set + to the policy kind) - The input policy kind Possible values: POLICY_KIND_UNSPECIFIED PRINCIPAL_ACCESS_BOUNDARY ACCESS + target.principal_set: |- + - + (Optional) + Required. Immutable. The resource name of the policy to be bound. + The binding parent and policy must belong to the same Organization (or Project). + target.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + uid: |- + - + Output only. The globally unique ID of the policy binding. Assigned when the policy binding is created. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. The time when the policy binding was most recently updated. + importStatements: [] google_iam_workforce_pool: subCategory: Cloud IAM description: Represents a collection of external workforces. @@ -99386,6 +110837,34 @@ resources: { "workload_identity_pool_id": "example-pool" } + - name: example + manifest: |- + { + "attribute_condition": " assertion.repository_owner_id == \"123456789\" \u0026\u0026\n attribute.repository == \"gh-org/gh-repo\" \u0026\u0026\n assertion.ref == \"refs/heads/main\" \u0026\u0026\n assertion.ref_type == \"branch\"\n", + "attribute_mapping": { + "attribute.actor": "assertion.actor", + "attribute.aud": "assertion.aud", + "attribute.repository": "assertion.repository", + "google.subject": "assertion.sub" + }, + "description": "GitHub Actions identity pool provider for automated test", + "disabled": true, + "display_name": "Name of provider", + "oidc": [ + { + "issuer_uri": "https://token.actions.githubusercontent.com" + } + ], + "workload_identity_pool_id": "${google_iam_workload_identity_pool.pool.workload_identity_pool_id}", + "workload_identity_pool_provider_id": "example-prvdr" + } + references: + workload_identity_pool_id: google_iam_workload_identity_pool.pool.workload_identity_pool_id + dependencies: + google_iam_workload_identity_pool.pool: |- + { + "workload_identity_pool_id": "example-pool" + } - name: example manifest: |- { @@ -99519,6 +110998,72 @@ resources: { "workload_identity_pool_id": "example-pool" } + - name: example + manifest: |- + { + "attribute_mapping": { + "google.subject": "assertion.subject.dn.cn" + }, + "workload_identity_pool_id": "${google_iam_workload_identity_pool.pool.workload_identity_pool_id}", + "workload_identity_pool_provider_id": "example-prvdr", + "x509": [ + { + "trust_store": [ + { + "trust_anchors": [ + { + "pem_certificate": "${file(\"test-fixtures/trust_anchor.pem\")}" + } + ] + } + ] + } + ] + } + references: + workload_identity_pool_id: google_iam_workload_identity_pool.pool.workload_identity_pool_id + dependencies: + google_iam_workload_identity_pool.pool: |- + { + "workload_identity_pool_id": "example-pool" + } + - name: example + manifest: |- + { + "attribute_mapping": { + "google.subject": "assertion.subject.dn.cn" + }, + "description": "X.509 identity pool provider for automated test", + "disabled": true, + "display_name": "Name of provider", + "workload_identity_pool_id": "${google_iam_workload_identity_pool.pool.workload_identity_pool_id}", + "workload_identity_pool_provider_id": "example-prvdr", + "x509": [ + { + "trust_store": [ + { + "intermediate_cas": [ + { + "pem_certificate": "${file(\"test-fixtures/intermediate_ca.pem\")}" + } + ], + "trust_anchors": [ + { + "pem_certificate": "${file(\"test-fixtures/trust_anchor.pem\")}" + } + ] + } + ] + } + ] + } + references: + workload_identity_pool_id: google_iam_workload_identity_pool.pool.workload_identity_pool_id + dependencies: + google_iam_workload_identity_pool.pool: |- + { + "workload_identity_pool_id": "example-pool" + } argumentDocs: assertion: ': JSON representing the authentication credential issued by the provider.' attribute: |- @@ -99593,6 +111138,11 @@ resources: in IAM bindings. This is also the subject that appears in Cloud Logging logs. Cannot exceed 127 characters. id: '- an identifier for the resource with format projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}' + intermediate_cas.pem_certificate: |- + - + (Optional) + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). name: |- - The resource name of the provider as @@ -99640,6 +111190,11 @@ resources: state: |- - The state of the provider. + trust_anchors.pem_certificate: |- + - + (Optional) + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). update: '- Default is 20 minutes.' workload_identity_pool_id: |- - @@ -99653,6 +111208,35 @@ resources: The ID for the provider, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix gcp- is reserved for use by Google, and may not be specified. + x509: |- + - + (Optional) + An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + Structure is documented below. + x509.trust_store: |- + - + (Required) + A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + Structure is documented below. + x509.trust_store.intermediate_cas: |- + - + (Optional) + Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + Structure is documented below. + x509.trust_store.trust_anchors: |- + - + (Required) + List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + Structure is documented below. importStatements: [] google_iap_app_engine_service_iam_policy: subCategory: Identity-Aware Proxy @@ -99781,6 +111365,7 @@ resources: dependencies: google_project.project: |- { + "deletion_policy": "DELETE", "name": "my-project", "org_id": "123456789", "project_id": "my-project" @@ -99842,6 +111427,7 @@ resources: } google_project.project: |- { + "deletion_policy": "DELETE", "name": "my-project", "org_id": "123456789", "project_id": "my-project" @@ -99873,6 +111459,329 @@ resources: Output only. Client secret of the OAuth client. Note: This property is sensitive and will not be displayed in the plan. importStatements: [] + google_iap_settings: + subCategory: Identity-Aware Proxy + description: IAP settings - manage IAP settings + name: google_iap_settings + title: "" + examples: + - name: iap_settings + manifest: |- + { + "access_settings": [ + { + "allowed_domains_settings": [ + { + "domains": [ + "test.abc.com" + ], + "enable": true + } + ], + "cors_settings": [ + { + "allow_http_options": true + } + ], + "gcip_settings": [ + { + "login_page_uri": "https://test.com/?apiKey=abc" + } + ], + "identity_sources": [ + "WORKFORCE_IDENTITY_FEDERATION" + ], + "oauth_settings": [ + { + "login_hint": "test" + } + ], + "reauth_settings": [ + { + "max_age": "305s", + "method": "SECURE_KEY", + "policy_type": "MINIMUM" + } + ], + "workforce_identity_settings": [ + { + "oauth2": [ + { + "client_id": "test-client-id", + "client_secret": "test-client-secret" + } + ], + "workforce_pools": [ + "wif-pool" + ] + } + ] + } + ], + "application_settings": [ + { + "access_denied_page_settings": [ + { + "access_denied_page_uri": "test-uri", + "generate_troubleshooting_uri": true, + "remediation_token_generation_enabled": false + } + ], + "attribute_propagation_settings": [ + { + "enable": false, + "expression": "attributes.saml_attributes.filter(attribute, attribute.name in [\"test1\", \"test2\"])", + "output_credentials": [ + "HEADER" + ] + } + ], + "cookie_domain": "test.abc.com", + "csm_settings": [ + { + "rctoken_aud": "test-aud-set" + } + ] + } + ], + "name": "projects/${data.google_project.project.number}/iap_web/compute-us-central1/services/${google_compute_region_backend_service.default.name}" + } + dependencies: + google_compute_health_check.default: |- + { + "check_interval_sec": 1, + "name": "iap-bs-health-check", + "tcp_health_check": [ + { + "port": "80" + } + ], + "timeout_sec": 1 + } + google_compute_region_backend_service.default: |- + { + "connection_draining_timeout_sec": 10, + "health_checks": [ + "${google_compute_health_check.default.id}" + ], + "name": "iap-settings-tf", + "region": "us-central1", + "session_affinity": "CLIENT_IP" + } + argumentDocs: + DEFAULT: |- + : This policy acts as a default if no other reauth policy is set. + Possible values are: MINIMUM, DEFAULT. + ENROLLED_SECOND_FACTORS: |- + : User can use any enabled 2nd factor. + Possible values are: LOGIN, SECURE_KEY, ENROLLED_SECOND_FACTORS. + HEADER: ': Propagate attributes in the headers with "x-goog-iap-attr-" prefix.' + JWT: |- + : Propagate attributes in the JWT of the form: + "additional_claims": { "my_attribute": ["value1", "value2"] } + LOGIN: ': Prompts the user to log in again.' + MINIMUM: |- + : This policy acts as a minimum to other policies, lower in the hierarchy. + Effective policy may only be the same or stricter. + RCTOKEN: |- + : Propagate attributes in the RCToken of the form: " + additional_claims": { "my_attribute": ["value1", "value2"] } + Each value may be one of: HEADER, JWT, RCTOKEN. + SECURE_KEY: ': User must use their secure key 2nd factor device.' + WORKFORCE_IDENTITY_FEDERATION: |- + : Use external identities set up on Google Cloud Workforce + Identity Federation. + Each value may be one of: WORKFORCE_IDENTITY_FEDERATION. + access_denied_page_settings.access_denied_page_uri: |- + - + (Optional) + The URI to be redirected to when access is denied. + access_denied_page_settings.generate_troubleshooting_uri: |- + - + (Optional) + Whether to generate a troubleshooting URL on access denied events to this application. + access_denied_page_settings.remediation_token_generation_enabled: |- + - + (Optional) + Whether to generate remediation token on access denied events to this application. + access_settings: |- + - + (Optional) + Top level wrapper for all access related setting in IAP. + Structure is documented below. + access_settings.allowed_domains_settings: |- + - + (Optional) + Settings to configure and enable allowed domains. + Structure is documented below. + access_settings.cors_settings: |- + - + (Optional) + Configuration to allow cross-origin requests via IAP. + Structure is documented below. + access_settings.gcip_settings: |- + - + (Optional) + GCIP claims and endpoint configurations for 3p identity providers. + access_settings.identity_sources: |- + - + (Optional) + Identity sources that IAP can use to authenticate the end user. Only one identity source + can be configured. The possible values are: + access_settings.oauth_settings: |- + - + (Optional) + Settings to configure IAP's OAuth behavior. + Structure is documented below. + access_settings.reauth_settings: |- + - + (Optional) + Settings to configure reauthentication policies in IAP. + Structure is documented below. + access_settings.workforce_identity_settings: |- + - + (Optional) + Settings to configure the workforce identity federation, including workforce pools + and OAuth 2.0 settings. + Structure is documented below. + allowed_domains_settings.domains: |- + - + (Optional) + List of trusted domains. + allowed_domains_settings.enable: |- + - + (Optional) + Configuration for customers to opt in for the feature. + application_settings: |- + - + (Optional) + Top level wrapper for all application related settings in IAP. + Structure is documented below. + application_settings.access_denied_page_settings: |- + - + (Optional) + Customization for Access Denied page. IAP allows customers to define a custom URI + to use as the error page when access is denied to users. If IAP prevents access + to this page, the default IAP error page will be displayed instead. + Structure is documented below. + application_settings.attribute_propagation_settings: |- + - + (Optional) + Settings to configure attribute propagation. + Structure is documented below. + application_settings.cookie_domain: |- + - + (Optional) + The Domain value to set for cookies generated by IAP. This value is not validated by the API, + but will be ignored at runtime if invalid. + application_settings.csm_settings: |- + - + (Optional) + Settings to configure IAP's behavior for a service mesh. + Structure is documented below. + attribute_propagation_settings.enable: |- + - + (Optional) + Whether the provided attribute propagation settings should be evaluated on user requests. + If set to true, attributes returned from the expression will be propagated in the set output credentials. + attribute_propagation_settings.expression: |- + - + (Optional) + Raw string CEL expression. Must return a list of attributes. A maximum of 45 attributes can + be selected. Expressions can select different attribute types from attributes: + attributes.saml_attributes, attributes.iap_attributes. + attribute_propagation_settings.output_credentials: |- + - + (Optional) + Which output credentials attributes selected by the CEL expression should be propagated in. + All attributes will be fully duplicated in each selected output credential. + Possible values are: + cors_settings.allow_http_options: |- + - + (Optional) + Configuration to allow HTTP OPTIONS calls to skip authorization. + If undefined, IAP will not apply any special logic to OPTIONS requests. + create: '- Default is 20 minutes.' + csm_settings.rctoken_aud: |- + - + (Optional) + Audience claim set in the generated RCToken. This value is not validated by IAP. + delete: '- Default is 20 minutes.' + gcip_settings.login_page_uri: |- + - + (Optional) + Login page URI associated with the GCIP tenants. Typically, all resources within + the same project share the same login page, though it could be overridden at the + sub resource level. + gcip_settings.tenant_ids: |- + - + (Optional) + GCIP tenant ids that are linked to the IAP resource. tenantIds could be a string + beginning with a number character to indicate authenticating with GCIP tenant flow, + or in the format of _ to indicate authenticating with GCIP agent flow. If agent flow + is used, tenantIds should only contain one single element, while for tenant flow, + tenantIds can contain multiple elements. + id: '- an identifier for the resource with format {{name}}/iapSettings' + name: |- + - + (Required) + The resource name of the IAP protected resource. Name can have below resources: + oauth_settings.login_hint: |- + - + (Optional) + Domain hint to send as hd=? parameter in OAuth request flow. + Enables redirect to primary IDP by skipping Google's login screen. + (https://developers.google.com/identity/protocols/OpenIDConnect#hd-param) + Note: IAP does not verify that the id token's hd claim matches this value + since access behavior is managed by IAM policies. + oauth_settings.programmatic_clients: |- + - + (Optional) + List of client ids allowed to use IAP programmatically. + oauth2.client_id: |- + - + (Optional) + The OAuth 2.0 client ID registered in the workforce identity + federation OAuth 2.0 Server. + oauth2.client_secret: |- + - + (Optional) + Input only. The OAuth 2.0 client secret created while registering + the client ID. + Note: This property is sensitive and will not be displayed in the plan. + oauth2.client_secret_sha256: |- + - + (Output) + Output only. SHA256 hash value for the client secret. This field + is returned by IAP when the settings are retrieved. + reauth_settings.max_age: |- + - + (Required) + Reauth session lifetime, how long before a user has to reauthenticate again. + A duration in seconds with up to nine fractional digits, ending with 's'. + Example: "3.5s". + reauth_settings.method: |- + - + (Required) + Reauth method requested. The possible values are: + reauth_settings.policy_type: |- + - + (Required) + How IAP determines the effective policy in cases of hierarchical policies. + Policies are merged from higher in the hierarchy to lower in the hierarchy. + The possible values are: + update: '- Default is 20 minutes.' + workforce_identity_settings.oauth2: |- + - + (Optional) + OAuth 2.0 settings for IAP to perform OIDC flow with workforce identity + federation services. + Structure is documented below. + workforce_identity_settings.workforce_pools: |- + - + (Optional) + The workforce pool resources. Only one workforce pool is accepted. + importStatements: [] google_iap_tunnel_dest_group: subCategory: Identity-Aware Proxy description: Tunnel destination groups represent resources that have the same tunnel access restrictions. @@ -100388,7 +112297,7 @@ resources: { "quota": 1000, "quota_duration": "7200s", - "start_time": "" + "start_time": "2014-10-02T15:01:23Z" } ] } @@ -100436,6 +112345,7 @@ resources: google_project.default: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "labels": { "firebase": "enabled" }, @@ -100638,12 +112548,12 @@ resources: quota.sign_up_quota_config: |- - (Optional) - Quota for the Signup endpoint, if overwritten. Signup quota is measured in sign ups per project per hour per IP. + Quota for the Signup endpoint, if overwritten. Signup quota is measured in sign ups per project per hour per IP. None of quota, startTime, or quotaDuration can be skipped. Structure is documented below. quota.sign_up_quota_config.quota: |- - (Optional) - A sign up APIs quota that customers can override temporarily. + A sign up APIs quota that customers can override temporarily. Value can be in between 1 and 1000. quota.sign_up_quota_config.quota_duration: |- - (Optional) @@ -100904,123 +112814,6 @@ resources: If it is not provided, the provider project is used. update: '- Default is 20 minutes.' importStatements: [] - google_identity_platform_project_default_config: - subCategory: Identity Platform - description: There is no persistent data associated with this resource. - name: google_identity_platform_project_default_config - title: "" - examples: - - name: default - manifest: |- - { - "sign_in": [ - { - "allow_duplicate_emails": true, - "anonymous": [ - { - "enabled": true - } - ], - "email": [ - { - "enabled": true, - "password_required": false - } - ], - "phone_number": [ - { - "enabled": true, - "test_phone_numbers": { - "+11231231234": "000000" - } - } - ] - } - ] - } - argumentDocs: - anonymous.enabled: |- - - - (Required) - Whether anonymous user auth is enabled for the project or not. - create: '- Default is 20 minutes.' - delete: '- Default is 20 minutes.' - email.enabled: |- - - - (Optional) - Whether email auth is enabled for the project or not. - email.password_required: |- - - - (Optional) - Whether a password is required for email auth or not. If true, both an email and - password must be provided to sign in. If false, a user may sign in via either - email/password or email link. - hash_config.algorithm: |- - - - (Output) - Different password hash algorithms used in Identity Toolkit. - hash_config.memory_cost: |- - - - (Output) - Memory cost for hash calculation. Used by scrypt and other similar password derivation algorithms. See https://tools.ietf.org/html/rfc7914 for explanation of field. - hash_config.rounds: |- - - - (Output) - How many rounds for hash calculation. Used by scrypt and other similar password derivation algorithms. - hash_config.salt_separator: |- - - - (Output) - Non-printable character to be inserted between the salt and plain text password in base64. - hash_config.signer_key: |- - - - (Output) - Signer key in base64. - id: '- an identifier for the resource with format {{project}}' - name: |- - - - The name of the Config resource. Example: "projects/my-awesome-project/config" - phone_number.enabled: |- - - - (Optional) - Whether phone number auth is enabled for the project or not. - phone_number.test_phone_numbers: |- - - - (Optional) - A map of that can be used for phone auth testing. - project: |- - - (Optional) The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - sign_in: |- - - - (Optional) - Configuration related to local sign in methods. - Structure is documented below. - sign_in.allow_duplicate_emails: |- - - - (Optional) - Whether to allow more than one account to have the same email. - sign_in.anonymous: |- - - - (Optional) - Configuration options related to authenticating an anonymous user. - Structure is documented below. - sign_in.email: |- - - - (Optional) - Configuration options related to authenticating a user by their email address. - Structure is documented below. - sign_in.hash_config: |- - - - (Output) - Output only. Hash config information. - Structure is documented below. - sign_in.phone_number: |- - - - (Optional) - Configuration options related to authenticated a user by their phone number. - Structure is documented below. - update: '- Default is 20 minutes.' - importStatements: [] google_identity_platform_tenant: subCategory: Identity Platform description: Tenant configuration in a multi-tenant project. @@ -101638,7 +113431,7 @@ resources: additional_variable.encryption_key_value: |- - (Optional) - Encription key value of configVariable. + Encryption key value of configVariable. Structure is documented below. additional_variable.integer_value: |- - @@ -101675,7 +113468,7 @@ resources: - (Required) authType of the Connection - Possible values are: USER_PASSWORD, OAUTH2_JWT_BEARER, OAUTH2_CLIENT_CREDENTIALS, SSH_PUBLIC_KEY, OAUTH2_AUTH_CODE_FLOW. + Possible values are: AUTH_TYPE_UNSPECIFIED, USER_PASSWORD, OAUTH2_JWT_BEARER, OAUTH2_CLIENT_CREDENTIALS, SSH_PUBLIC_KEY, OAUTH2_AUTH_CODE_FLOW. auth_config.oauth2_auth_code_flow: |- - (Optional) @@ -101735,7 +113528,7 @@ resources: config_variable.encryption_key_value: |- - (Optional) - Encription key value of configVariable. + Encryption key value of configVariable. Structure is documented below. config_variable.integer_value: |- - @@ -101780,7 +113573,7 @@ resources: description: |- - (Optional) - An arbitrary description for the Conection. + An arbitrary description for the Connection. destination.host: |- - (Optional) @@ -101817,7 +113610,7 @@ resources: encryption_key_value.type: |- - (Required) - Type of Encription Key + Type of Encryption Key Possible values are: GOOGLE_MANAGED, CUSTOMER_MANAGED. eventing_config: |- - @@ -102239,6 +114032,7 @@ resources: google_project.target_project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "tf-test", "org_id": "123456789", "project_id": "tf-test" @@ -102622,11 +114416,11 @@ resources: { "cloud_kms_config": [ { - "key": "${google_kms_crypto_key.cryptokey.id}", - "key_version": "${google_kms_crypto_key_version.test_key.id}", + "key": "${basename(google_kms_crypto_key.cryptokey.id)}", + "key_version": "${basename(google_kms_crypto_key_version.test_key.id)}", "kms_location": "us-east1", "kms_project_id": "${data.google_project.test_project.project_id}", - "kms_ring": "${google_kms_key_ring.keyring.id}" + "kms_ring": "${basename(google_kms_key_ring.keyring.id)}" } ], "create_sample_integrations": true, @@ -102634,10 +114428,7 @@ resources: "run_as_service_account": "${google_service_account.service_account.email}" } references: - cloud_kms_config.key: google_kms_crypto_key.cryptokey.id - cloud_kms_config.key_version: google_kms_crypto_key_version.test_key.id cloud_kms_config.kms_project_id: data.google_project.test_project.project_id - cloud_kms_config.kms_ring: google_kms_key_ring.keyring.id run_as_service_account: google_service_account.service_account.email dependencies: google_kms_crypto_key.cryptokey: |- @@ -102701,10 +114492,6 @@ resources: - (Optional) Indicates if sample integrations should be created along with provisioning. - create_sample_workflows: |- - - - (Optional, Deprecated) - Indicates if sample workflow should be created along with provisioning. delete: '- Default is 20 minutes.' id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/clients' location: |- @@ -102714,10 +114501,6 @@ resources: project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. - provision_gmek: |- - - - (Optional, Deprecated) - Indicates provision with GMEK or CMEK. run_as_service_account: |- - (Optional) @@ -102734,16 +114517,17 @@ resources: "depends_on": [ "${time_sleep.wait_srv_acc_permissions}" ], - "folder": "${google_folder.autokms_folder.folder_id}", + "folder": "${google_folder.autokms_folder.id}", "key_project": "projects/${google_project.key_project.project_id}", "provider": "${google-beta}" } references: - folder: google_folder.autokms_folder.folder_id + folder: google_folder.autokms_folder.id provider: google-beta dependencies: google_folder.autokms_folder: |- { + "deletion_protection": false, "display_name": "my-folder", "parent": "organizations/123456789", "provider": "${google-beta}" @@ -102751,6 +114535,7 @@ resources: google_project.key_project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "depends_on": [ "${google_folder.autokms_folder}" ], @@ -102789,6 +114574,13 @@ resources: "provider": "${google-beta}", "service": "cloudkms.googleapis.com" } + time_sleep.wait_autokey_propagation: |- + { + "create_duration": "30s", + "depends_on": [ + "${google_kms_autokey_config.example-autokeyconfig}" + ] + } time_sleep.wait_enable_service_api: |- { "create_duration": "30s", @@ -103336,7 +115128,7 @@ resources: "${time_sleep.wait_autokey_config}" ], "location": "global", - "name": "example-key-handle", + "name": "tf-test-key-handle", "project": "${google_project.resource_project.project_id}", "provider": "${google-beta}", "resource_type_selector": "storage.googleapis.com/Bucket" @@ -103347,7 +115139,8 @@ resources: dependencies: google_folder.autokms_folder: |- { - "display_name": "folder-example", + "deletion_protection": false, + "display_name": "my-folder", "parent": "organizations/123456789", "provider": "${google-beta}" } @@ -103363,6 +115156,7 @@ resources: google_project.key_project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "depends_on": [ "${google_folder.autokms_folder}" ], @@ -103374,12 +115168,13 @@ resources: google_project.resource_project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "depends_on": [ "${google_folder.autokms_folder}" ], "folder_id": "${google_folder.autokms_folder.folder_id}", - "name": "resources", - "project_id": "resources", + "name": "res-proj", + "project_id": "res-proj", "provider": "${google-beta}" } google_project_iam_member.autokey_project_admin: |- @@ -103937,6 +115732,7 @@ resources: dependencies: google_folder.my_folder: |- { + "deletion_protection": false, "display_name": "folder-name", "parent": "organizations/123456789" } @@ -104142,6 +115938,57 @@ resources: (Optional) The parent of the linked dataset. importStatements: [] + google_logging_log_scope: + subCategory: Cloud (Stackdriver) Logging + description: Describes a group of resources to read log entries from + name: google_logging_log_scope + title: "" + examples: + - name: logging_log_scope + manifest: |- + { + "description": "A log scope configured with Terraform", + "location": "global", + "name": "projects/my-project-name/locations/global/logScopes/my-log-scope", + "parent": "projects/my-project-name", + "resource_names": [ + "projects/my-project-name", + "projects/my-project-name/locations/global/buckets/_Default/views/view1", + "projects/my-project-name/locations/global/buckets/_Default/views/view2" + ] + } + argumentDocs: + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. The creation timestamp of the log scopes. + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + Describes this log scopes. + id: '- an identifier for the resource with format {{parent}}/locations/{{location}}/logScopes/{{name}}' + location: |- + - + (Optional) + The location of the resource. The only supported location is global so far. + name: |- + - + (Required) + The resource name of the log scope. For example: `projects/my-project/locations/global/logScopes/my-log-scope` + parent: |- + - + (Optional) + The parent of the resource. + resource_names: |- + - + (Required) + Names of one or more parent resources : * `projects/[PROJECT_ID]` May alternatively be one or more views : * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` A log scope can include a maximum of 50 projects and a maximum of 100 resources in total. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. The last update timestamp of the log scopes. + importStatements: [] google_logging_log_view: subCategory: Cloud (Stackdriver) Logging description: Describes a view over log entries in a bucket. @@ -105026,6 +116873,7 @@ resources: - name: looker-instance manifest: |- { + "deletion_policy": "DEFAULT", "name": "my-instance", "oauth_config": [ { @@ -105096,6 +116944,21 @@ resources: "public_ip_enabled": true, "region": "us-central1" } + - name: looker-instance + manifest: |- + { + "fips_enabled": true, + "name": "my-instance-fips", + "oauth_config": [ + { + "client_id": "my-client-id", + "client_secret": "my-client-secret" + } + ], + "platform_edition": "LOOKER_CORE_ENTERPRISE_ANNUAL", + "public_ip_enabled": true, + "region": "us-central1" + } - name: looker-instance manifest: |- { @@ -105214,16 +117077,48 @@ resources: "platform_edition": "LOOKER_CORE_STANDARD_ANNUAL", "region": "us-central1" } + - name: looker-instance + manifest: |- + { + "name": "my-instance", + "oauth_config": [ + { + "client_id": "my-client-id", + "client_secret": "my-client-secret" + } + ], + "platform_edition": "LOOKER_CORE_ENTERPRISE_ANNUAL", + "private_ip_enabled": false, + "psc_config": [ + { + "allowed_vpcs": [ + "projects/test-project/global/networks/test" + ] + } + ], + "psc_enabled": true, + "public_ip_enabled": false, + "region": "us-central1" + } + - name: looker-instance + manifest: |- + { + "deletion_policy": "FORCE", + "name": "my-instance", + "oauth_config": [ + { + "client_id": "my-client-id", + "client_secret": "my-client-secret" + } + ], + "platform_edition": "LOOKER_CORE_STANDARD_ANNUAL", + "region": "us-central1" + } argumentDocs: LOOKER_CORE_TRIAL: |- . - Possible values are: LOOKER_CORE_TRIAL, LOOKER_CORE_STANDARD, LOOKER_CORE_STANDARD_ANNUAL, LOOKER_CORE_ENTERPRISE_ANNUAL, LOOKER_CORE_EMBED_ANNUAL. + Possible values are: LOOKER_CORE_TRIAL, LOOKER_CORE_STANDARD, LOOKER_CORE_STANDARD_ANNUAL, LOOKER_CORE_ENTERPRISE_ANNUAL, LOOKER_CORE_EMBED_ANNUAL, LOOKER_CORE_NONPROD_STANDARD_ANNUAL, LOOKER_CORE_NONPROD_ENTERPRISE_ANNUAL, LOOKER_CORE_NONPROD_EMBED_ANNUAL. MONDAY: ', TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY.' - admin_settings: |- - - - (Optional) - Looker instance Admin settings. - Structure is documented below. admin_settings.allowed_email_domains: |- - (Optional) @@ -105232,22 +117127,11 @@ resources: Updating this list will restart the instance. Updating the allowed email domains from terraform means the value provided will be considered as the entire list and not an amendment to the existing list of allowed email domains. - consumer_network: |- - - - (Optional) - Network name in the consumer project in the format of: projects/{project}/global/networks/{network} - Note that the consumer network may be in a different GCP project than the consumer - project that is hosting the Looker Instance. create: '- Default is 90 minutes.' create_time: |- - The time the instance was created in RFC3339 UTC "Zulu" format, accurate to nanoseconds. - custom_domain: |- - - - (Optional) - Custom domain settings for a Looker instance. - Structure is documented below. custom_domain.domain: |- - (Optional) @@ -105257,13 +117141,6 @@ resources: (Output) Status of the custom domain. delete: '- Default is 90 minutes.' - deny_maintenance_period: |- - - - (Optional) - Maintenance denial period for this instance. - You must allow at least 14 days of maintenance availability - between any two deny maintenance periods. - Structure is documented below. deny_maintenance_period.end_date: |- - (Required) @@ -105282,11 +117159,6 @@ resources: egress_public_ip: |- - Public Egress IP (IPv4). - encryption_config: |- - - - (Optional) - Looker instance encryption settings. - Structure is documented below. encryption_config.kms_key_name: |- - (Optional) @@ -105327,14 +117199,6 @@ resources: looker_version: |- - The Looker version that the instance is using. - maintenance_window: |- - - - (Optional) - Maintenance window for an instance. - Maintenance of your instance takes place once a month, and will require - your instance to be restarted during updates, which will temporarily - disrupt service. - Structure is documented below. maintenance_window.day_of_week: |- - (Required) @@ -105350,9 +117214,14 @@ resources: The ID of the instance or a fully qualified identifier for the instance. oauth_config: |- - - (Optional) + (Required) Looker Instance OAuth login settings. Structure is documented below. + oauth_config.admin_settings: |- + - + (Optional) + Looker instance Admin settings. + Structure is documented below. oauth_config.client_id: |- - (Required) @@ -105361,30 +117230,115 @@ resources: - (Required) The client secret for the Oauth config. - platform_edition: |- + oauth_config.consumer_network: |- + - + (Optional) + Network name in the consumer project in the format of: projects/{project}/global/networks/{network} + Note that the consumer network may be in a different GCP project than the consumer + project that is hosting the Looker Instance. + oauth_config.custom_domain: |- + - + (Optional) + Custom domain settings for a Looker instance. + Structure is documented below. + oauth_config.deletion_policy: |- + - (Optional) Policy to determine if the cluster should be deleted forcefully. + If setting deletion_policy = "FORCE", the Looker instance will be deleted regardless + of its nested resources. If set to "DEFAULT", Looker instances that still have + nested resources will return an error. Possible values: DEFAULT, FORCE + oauth_config.deny_maintenance_period: |- + - + (Optional) + Maintenance denial period for this instance. + You must allow at least 14 days of maintenance availability + between any two deny maintenance periods. + Structure is documented below. + oauth_config.encryption_config: |- + - + (Optional) + Looker instance encryption settings. + Structure is documented below. + oauth_config.fips_enabled: |- + - + (Optional) + FIPS 140-2 Encryption enablement for Looker (Google Cloud Core). + oauth_config.maintenance_window: |- + - + (Optional) + Maintenance window for an instance. + Maintenance of your instance takes place once a month, and will require + your instance to be restarted during updates, which will temporarily + disrupt service. + Structure is documented below. + oauth_config.platform_edition: |- - (Optional) Platform editions for a Looker instance. Each edition maps to a set of instance features, like its size. Must be one of these values: - private_ip_enabled: |- + oauth_config.private_ip_enabled: |- - (Optional) Whether private IP is enabled on the Looker instance. - project: |- + oauth_config.project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. - public_ip_enabled: |- + oauth_config.psc_config: |- + - + (Optional) + Information for Private Service Connect (PSC) setup for a Looker instance. + Structure is documented below. + oauth_config.psc_enabled: |- + - + (Optional) + Whether Public Service Connect (PSC) is enabled on the Looker instance + oauth_config.public_ip_enabled: |- - (Optional) Whether public IP is enabled on the Looker instance. - region: |- + oauth_config.region: |- - (Optional) The name of the Looker region of the instance. - reserved_range: |- + oauth_config.reserved_range: |- - (Optional) Name of a reserved IP address range within the consumer network, to be used for private service access connection. User may or may not specify this in a request. + oauth_config.user_metadata: |- + - + (Optional) + Metadata about users for a Looker instance. + These settings are only available when platform edition LOOKER_CORE_STANDARD is set. + There are ten Standard and two Developer users included in the cost of the product. + You can allocate additional Standard, Viewer, and Developer users for this instance. + It is an optional step and can be modified later. + With the Standard edition of Looker (Google Cloud core), you can provision up to 50 + total users, distributed across Viewer, Standard, and Developer. + Structure is documented below. + psc_config.allowed_vpcs: |- + - + (Optional) + List of VPCs that are allowed ingress into the Looker instance. + psc_config.looker_service_attachment_uri: |- + - + (Output) + URI of the Looker service attachment. + psc_config.service_attachments: |- + - + (Optional) + List of egress service attachment configurations. + Structure is documented below. + service_attachments.connection_status: |- + - + (Output) + Status of the service attachment connection. + service_attachments.local_fqdn: |- + - + (Optional) + Fully qualified domain name that will be used in the private DNS record created for the service attachment. + service_attachments.target_service_attachment_uri: |- + - + (Optional) + URI of the service attachment to connect to. start_date.day: |- - (Optional) @@ -105437,17 +117391,6 @@ resources: - The time the instance was updated in RFC3339 UTC "Zulu" format, accurate to nanoseconds. - user_metadata: |- - - - (Optional) - Metadata about users for a Looker instance. - These settings are only available when platform edition LOOKER_CORE_STANDARD is set. - There are ten Standard and two Developer users included in the cost of the product. - You can allocate additional Standard, Viewer, and Developer users for this instance. - It is an optional step and can be modified later. - With the Standard edition of Looker (Google Cloud core), you can provision up to 50 - total users, distributed across Viewer, Standard, and Developer. - Structure is documented below. user_metadata.additional_developer_user_count: |- - (Optional) @@ -105494,15 +117437,12 @@ resources: "key": "value" }, "location": "us-central1", - "provider": "${google-beta}", "rebalance_config": [ { "mode": "NO_REBALANCE" } ] } - references: - provider: google-beta - name: example manifest: |- { @@ -105663,13 +117603,11 @@ resources: }, "location": "us-central1", "partition_count": 2, - "provider": "${google-beta}", "replication_factor": 3, "topic_id": "my-topic" } references: cluster: google_managed_kafka_cluster.cluster.cluster_id - provider: google-beta dependencies: google_managed_kafka_cluster.cluster: |- { @@ -105693,8 +117631,7 @@ resources: ] } ], - "location": "us-central1", - "provider": "${google-beta}" + "location": "us-central1" } argumentDocs: cluster: |- @@ -106003,6 +117940,477 @@ resources: Required. Start time of the window in UTC time. Structure is documented below. importStatements: [] + google_memorystore_instance: + subCategory: Memorystore + description: A Google Cloud Memorystore instance. + name: google_memorystore_instance + title: "" + examples: + - name: instance-basic + manifest: |- + { + "deletion_protection_enabled": false, + "depends_on": [ + "${google_network_connectivity_service_connection_policy.default}" + ], + "desired_psc_auto_connections": [ + { + "network": "${google_compute_network.producer_net.id}", + "project_id": "${data.google_project.project.project_id}" + } + ], + "instance_id": "basic-instance", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], + "location": "us-central1", + "shard_count": 3 + } + references: + desired_psc_auto_connections.network: google_compute_network.producer_net.id + desired_psc_auto_connections.project_id: data.google_project.project.project_id + dependencies: + google_compute_network.producer_net: |- + { + "auto_create_subnetworks": false, + "name": "my-network" + } + google_compute_subnetwork.producer_subnet: |- + { + "ip_cidr_range": "10.0.0.248/29", + "name": "my-subnet", + "network": "${google_compute_network.producer_net.id}", + "region": "us-central1" + } + google_network_connectivity_service_connection_policy.default: |- + { + "description": "my basic service connection policy", + "location": "us-central1", + "name": "my-policy", + "network": "${google_compute_network.producer_net.id}", + "psc_config": [ + { + "subnetworks": [ + "${google_compute_subnetwork.producer_subnet.id}" + ] + } + ], + "service_class": "gcp-memorystore" + } + - name: instance-full + manifest: |- + { + "authorization_mode": "AUTH_DISABLED", + "deletion_protection_enabled": false, + "depends_on": [ + "${google_network_connectivity_service_connection_policy.default}" + ], + "desired_psc_auto_connections": [ + { + "network": "${google_compute_network.producer_net.id}", + "project_id": "${data.google_project.project.project_id}" + } + ], + "engine_configs": { + "maxmemory-policy": "volatile-ttl" + }, + "engine_version": "VALKEY_7_2", + "instance_id": "full-instance", + "labels": { + "abc": "xyz" + }, + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], + "location": "us-central1", + "mode": "CLUSTER", + "node_type": "SHARED_CORE_NANO", + "persistence_config": [ + { + "mode": "RDB", + "rdb_config": [ + { + "rdb_snapshot_period": "ONE_HOUR", + "rdb_snapshot_start_time": "2024-10-02T15:01:23Z" + } + ] + } + ], + "replica_count": 2, + "shard_count": 3, + "transit_encryption_mode": "TRANSIT_ENCRYPTION_DISABLED", + "zone_distribution_config": [ + { + "mode": "SINGLE_ZONE", + "zone": "us-central1-b" + } + ] + } + references: + desired_psc_auto_connections.network: google_compute_network.producer_net.id + desired_psc_auto_connections.project_id: data.google_project.project.project_id + dependencies: + google_compute_network.producer_net: |- + { + "auto_create_subnetworks": false, + "name": "my-network" + } + google_compute_subnetwork.producer_subnet: |- + { + "ip_cidr_range": "10.0.0.248/29", + "name": "my-subnet", + "network": "${google_compute_network.producer_net.id}", + "region": "us-central1" + } + google_network_connectivity_service_connection_policy.default: |- + { + "description": "my basic service connection policy", + "location": "us-central1", + "name": "my-policy", + "network": "${google_compute_network.producer_net.id}", + "psc_config": [ + { + "subnetworks": [ + "${google_compute_subnetwork.producer_subnet.id}" + ] + } + ], + "service_class": "gcp-memorystore" + } + - name: instance-persistence-aof + manifest: |- + { + "deletion_protection_enabled": false, + "depends_on": [ + "${google_network_connectivity_service_connection_policy.default}" + ], + "desired_psc_auto_connections": [ + { + "network": "${google_compute_network.producer_net.id}", + "project_id": "${data.google_project.project.project_id}" + } + ], + "instance_id": "aof-instance", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], + "location": "us-central1", + "persistence_config": [ + { + "aof_config": [ + { + "append_fsync": "EVERY_SEC" + } + ], + "mode": "AOF" + } + ], + "shard_count": 3 + } + references: + desired_psc_auto_connections.network: google_compute_network.producer_net.id + desired_psc_auto_connections.project_id: data.google_project.project.project_id + dependencies: + google_compute_network.producer_net: |- + { + "auto_create_subnetworks": false, + "name": "my-network" + } + google_compute_subnetwork.producer_subnet: |- + { + "ip_cidr_range": "10.0.0.248/29", + "name": "my-subnet", + "network": "${google_compute_network.producer_net.id}", + "region": "us-central1" + } + google_network_connectivity_service_connection_policy.default: |- + { + "description": "my basic service connection policy", + "location": "us-central1", + "name": "my-policy", + "network": "${google_compute_network.producer_net.id}", + "psc_config": [ + { + "subnetworks": [ + "${google_compute_subnetwork.producer_subnet.id}" + ] + } + ], + "service_class": "gcp-memorystore" + } + argumentDocs: + aof_config.append_fsync: |- + - + (Optional) + Optional. The fsync mode. + Possible values: + NEVER + EVERY_SEC + ALWAYS + authorization_mode: |- + - + (Optional) + Optional. Immutable. Authorization mode of the instance. Possible values: + AUTH_DISABLED + IAM_AUTH + create: '- Default is 60 minutes.' + create_time: |- + - + Output only. Creation timestamp of the instance. + delete: '- Default is 30 minutes.' + deletion_protection_enabled: |- + - + (Optional) + Optional. If set to true deletion of the instance will fail. + desired_psc_auto_connections: '- (Optional) Required. Immutable. User inputs for the auto-created PSC connections.' + discovery_endpoints: |- + - + Output only. Endpoints clients can connect to the instance through. Currently only one + discovery endpoint is supported. + Structure is documented below. + discovery_endpoints.address: |- + - + (Output) + Output only. IP address of the exposed endpoint clients connect to. + discovery_endpoints.network: |- + - + (Output) + Output only. The network where the IP address of the discovery endpoint will be + reserved, in the form of + projects/{network_project}/global/networks/{network_id}. + discovery_endpoints.port: |- + - + (Output) + Output only. The port number of the exposed endpoint. + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + endpoints: |- + - + Endpoints for the instance. + engine_configs: |- + - + (Optional) + Optional. User-provided engine configurations for the instance. + engine_version: |- + - + (Optional) + Optional. Immutable. Engine version of the instance. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/instances/{{instance_id}}' + instance_id: |- + - + (Required) + Required. The ID to use for the instance, which will become the final component of + the instance's resource name. + This value is subject to the following restrictions: + labels: |- + - + (Optional) + Optional. Labels to represent user-provided metadata. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type memorystore.googleapis.com/CertificateAuthority. + mode: |- + - + (Optional) + Optional. Standalone or cluster. + Possible values: + CLUSTER + STANDALONE + Possible values are: CLUSTER, STANDALONE. + name: |- + - + Identifier. Unique name of the instance. + Format: projects/{project}/locations/{location}/instances/{instance} + node_config: |- + - + Represents configuration for nodes of the instance. + Structure is documented below. + node_config.size_gb: |- + - + (Output) + Output only. Memory size in GB of the node. + node_type: |- + - + (Optional) + Optional. Immutable. Machine type for individual nodes of the instance. + Possible values: + SHARED_CORE_NANO + HIGHMEM_MEDIUM + HIGHMEM_XLARGE + STANDARD_SMALL + persistence_config: |- + - + (Optional) + Represents persistence configuration for a instance. + Structure is documented below. + persistence_config.aof_config: |- + - + (Optional) + Configuration for AOF based persistence. + Structure is documented below. + persistence_config.mode: |- + - + (Optional) + Optional. Current persistence mode. + Possible values: + DISABLED + RDB + AOF + Possible values are: DISABLED, RDB, AOF. + persistence_config.rdb_config: |- + - + (Optional) + Configuration for RDB based persistence. + Structure is documented below. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + psc_auto_connections: |- + - + Output only. User inputs and resource details of the auto-created PSC connections. + Structure is documented below. + psc_auto_connections.connection_type: |- + - + (Output) + Output Only. Type of a PSC Connection. + Possible values: + CONNECTION_TYPE_DISCOVERY + CONNECTION_TYPE_PRIMARY + CONNECTION_TYPE_READER + psc_auto_connections.forwarding_rule: |- + - + (Output) + Output only. The URI of the consumer side forwarding rule. + Format: + projects/{project}/regions/{region}/forwardingRules/{forwarding_rule} + psc_auto_connections.ip_address: |- + - + (Output) + Output only. The IP allocated on the consumer network for the PSC forwarding rule. + psc_auto_connections.network: |- + - + (Output) + Output only. The consumer network where the IP address resides, in the form of + projects/{project_id}/global/networks/{network_id}. + psc_auto_connections.port: |- + - + (Output) + Output only. Ports of the exposed endpoint. + psc_auto_connections.project_id: |- + - + (Output) + Output only. The consumer project_id where the forwarding rule is created from. + psc_auto_connections.psc_connection_id: |- + - + (Output) + Output only. The PSC connection id of the forwarding rule connected to the + service attachment. + psc_auto_connections.psc_connection_status: |- + - + (Output) + Output Only. The status of the PSC connection: whether a connection exists and ACTIVE or it no longer exists. + Possible values: + ACTIVE + NOT_FOUND + psc_auto_connections.service_attachment: |- + - + (Output) + Output only. The service attachment which is the target of the PSC connection, in the form of projects/{project-id}/regions/{region}/serviceAttachments/{service-attachment-id}. + rdb_config.rdb_snapshot_period: |- + - + (Optional) + Optional. Period between RDB snapshots. + Possible values: + ONE_HOUR + SIX_HOURS + TWELVE_HOURS + TWENTY_FOUR_HOURS + rdb_config.rdb_snapshot_start_time: |- + - + (Optional) + Optional. Time that the first snapshot was/will be attempted, and to which future + snapshots will be aligned. If not provided, the current time will be + used. + replica_count: |- + - + (Optional) + Optional. Number of replica nodes per shard. If omitted the default is 0 replicas. + shard_count: |- + - + (Required) + Required. Number of shards for the instance. + state: |- + - + Output only. Current state of the instance. + Possible values: + CREATING + ACTIVE + UPDATING + DELETING + state_info: |- + - + Additional information about the state of the instance. + Structure is documented below. + state_info.update_info: |- + - + (Output) + Represents information about instance with state UPDATING. + Structure is documented below. + state_info.update_info.target_replica_count: |- + - + (Output) + Output only. Target number of replica nodes per shard for the instance. + state_info.update_info.target_shard_count: |- + - + (Output) + Output only. Target number of shards for the instance. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + transit_encryption_mode: |- + - + (Optional) + Optional. Immutable. In-transit encryption mode of the instance. + Possible values: + TRANSIT_ENCRYPTION_DISABLED + SERVER_AUTHENTICATION + uid: |- + - + Output only. System assigned, unique identifier for the instance. + update: '- Default is 120 minutes.' + update_time: |- + - + Output only. Latest update timestamp of the instance. + zone_distribution_config: |- + - + (Optional) + Zone distribution configuration for allocation of instance resources. + Structure is documented below. + zone_distribution_config.mode: |- + - + (Optional) + Optional. Current zone distribution mode. Defaults to MULTI_ZONE. + Possible values: + MULTI_ZONE + SINGLE_ZONE + Possible values are: MULTI_ZONE, SINGLE_ZONE. + zone_distribution_config.zone: |- + - + (Optional) + Optional. Defines zone where all resources will be allocated with SINGLE_ZONE mode. + Ignored for MULTI_ZONE mode. + importStatements: [] google_migration_center_group: subCategory: Migration Center description: A resource that represents an asset group. @@ -106575,6 +118983,11 @@ resources: Control over how the notification channels in notification_channels are notified when this alert fires, on a per-channel basis. Structure is documented below. + alert_strategy.notification_prompts: |- + - + (Optional) + Control when notifications will be sent out. + Each value may be one of: NOTIFICATION_PROMPT_UNSPECIFIED, OPENED, CLOSED. alert_strategy.notification_rate_limit: |- - (Optional) @@ -106705,6 +119118,15 @@ resources: (Optional) Control over how this alert policy's notification channels are notified. Structure is documented below. + condition_prometheus_query_language.disable_metric_validation: |- + - + (Optional) + Whether to disable metric existence validation for this condition. + This allows alerting policies to be defined on metrics that do not yet + exist, improving advanced customer workflows such as configuring + alerting policies using Terraform. + Users with the monitoring.alertPolicyViewer role are able to see the + name of the non-existent metric in the alerting policy condition. condition_prometheus_query_language.documentation: |- - (Optional) @@ -107486,6 +119908,7 @@ resources: dependencies: google_project.basic: |- { + "deletion_policy": "DELETE", "name": "m-id-display", "org_id": "123456789", "project_id": "m-id" @@ -108770,7 +121193,7 @@ resources: kdc_hostname: |- - (Optional) - Hostname of the Active Directory server used as Kerberos Key Distribution Center. Only requried for volumes using kerberized NFSv4.1 + Hostname of the Active Directory server used as Kerberos Key Distribution Center. Only required for volumes using kerberized NFSv4.1 kdc_ip: |- - (Optional) @@ -109038,7 +121461,7 @@ resources: "labels": { "creator": "testuser" }, - "location": "us-central1", + "location": "us-west1", "name": "test-backup-vault" } argumentDocs: @@ -109086,24 +121509,11 @@ resources: - name: kmsConfig manifest: |- { - "crypto_key_name": "${google_kms_crypto_key.crypto_key.id}", + "crypto_key_name": "crypto-name", "description": "this is a test description", "location": "us-central1", "name": "kms-test" } - references: - crypto_key_name: google_kms_crypto_key.crypto_key.id - dependencies: - google_kms_crypto_key.crypto_key: |- - { - "key_ring": "${google_kms_key_ring.keyring.id}", - "name": "crypto-name" - } - google_kms_key_ring.keyring: |- - { - "location": "us-central1", - "name": "key-ring" - } argumentDocs: create: '- Default is 20 minutes.' crypto_key_name: |- @@ -109196,11 +121606,16 @@ resources: (Optional) Specifies the Active Directory policy to be used. Format: projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}. The policy needs to be in the same location as the storage pool. + allow_auto_tiering: |- + - + (Optional) + Optional. True if the storage pool supports Auto Tiering enabled volumes. Default is false. + Auto-tiering can be enabled after storage pool creation but it can't be disabled once enabled. capacity_gib: |- - (Required) Capacity of the storage pool (in GiB). - create: '- Default is 20 minutes.' + create: '- Default is 45 minutes.' delete: '- Default is 20 minutes.' description: |- - @@ -109242,7 +121657,7 @@ resources: If it is not provided, the provider project is used. replica_zone: |- - - (Optional, Beta) + (Optional) Specifies the replica zone for regional Flex pools. zone and replica_zone values can be swapped to initiate a zone switch. service_level: |- @@ -109263,7 +121678,7 @@ resources: Number of volume in the storage pool. zone: |- - - (Optional, Beta) + (Optional) Specifies the active zone for regional Flex pools. zone and replica_zone values can be swapped to initiate a zone switch. If you want to create a zonal Flex pool, specify a zone name for location and omit zone. @@ -109324,6 +121739,9 @@ resources: - (Required) Capacity of the volume (in GiB). + cold_tier_size_gib: |- + - + Output only. Size of the volume cold tier data in GiB. create: '- Default is 20 minutes.' create_time: |- - @@ -109345,6 +121763,7 @@ resources: - (Optional) Policy to determine if the volume should be deleted forcefully. Volumes may have nested snapshot resources. Deleting such a volume will fail. Setting this parameter to FORCE will delete volumes including nested snapshots. + Possible values: DEFAULT, FORCE. description: |- - (Optional) @@ -109371,7 +121790,7 @@ resources: export_policy.rules.allowed_clients: |- - (Optional) - Defines the client ingress specification (allowed clients) as a comma seperated list with IPv4 CIDRs or IPv4 host addresses. + Defines the client ingress specification (allowed clients) as a comma separated list with IPv4 CIDRs or IPv4 host addresses. export_policy.rules.has_root_access: |- - (Optional) @@ -109431,6 +121850,10 @@ resources: - (Optional) Labels as key value pairs. Example: { "owner": "Bob", "department": "finance", "purpose": "testing" }. + large_capacity: |- + - + (Optional) + Optional. Flag indicating if the volume will be a large capacity volume or a regular volume. ldap_enabled: |- - Flag indicating if the volume is NFS LDAP enabled or not. Inherited from storage pool. @@ -109476,6 +121899,11 @@ resources: - (Output) Protocol to mount with. + multiple_endpoints: |- + - + (Optional) + Optional. Flag indicating if the volume will have an IP address per node for volumes supporting multiple IP endpoints. + Only the volume with largeCapacity will be allowed to have multiple endpoints. name: |- - (Required) @@ -109496,7 +121924,6 @@ resources: Name of the Private Service Access allocated range. Inherited from storage pool. replica_zone: |- - - (Beta) Specifies the replica zone for regional volume. restore_parameters: |- - @@ -109528,7 +121955,7 @@ resources: Possible values are: NTFS, UNIX. service_level: |- - - Service level of the volume. Inherited from storage pool. Supported values are : PREMIUM, EXTERME, STANDARD, FLEX. + Service level of the volume. Inherited from storage pool. Supported values are : PREMIUM, EXTREME, STANDARD, FLEX. share_name: |- - (Required) @@ -109582,11 +122009,27 @@ resources: storage_pool: |- - (Required) - Name of the storage pool to create the volume in. Pool needs enough spare capacity to accomodate the volume. + Name of the storage pool to create the volume in. Pool needs enough spare capacity to accommodate the volume. terraform_labels: |- - The combination of labels configured directly on the resource and default labels configured on the provider. + tiering_policy: |- + - + (Optional) + Tiering policy for the volume. + Structure is documented below. + tiering_policy.cooling_threshold_days: |- + - + (Optional) + Optional. Time in days to mark the volume's data block as cold and make it eligible for tiering, can be range from 7-183. + Default is 31. + tiering_policy.tier_action: |- + - + (Optional) + Optional. Flag indicating if the volume has tiering policy enable/pause. Default is PAUSED. + Default value is PAUSED. + Possible values are: ENABLED, PAUSED. unix_permissions: |- - (Optional) @@ -109613,7 +122056,6 @@ resources: The maximum number of snapshots to keep for the weekly schedule. zone: |- - - (Beta) Specifies the active zone for regional volume. importStatements: [] google_netapp_volume_replication: @@ -109902,6 +122344,100 @@ resources: (Required) The name of the volume to create the snapshot in. importStatements: [] + google_network_connectivity_group: + subCategory: Network Connectivity + description: The NetworkConnectivity Group resource + name: google_network_connectivity_group + title: "" + examples: + - name: primary + manifest: |- + { + "auto_accept": [ + { + "auto_accept_projects": [ + "foo", + "bar" + ] + } + ], + "description": "A sample hub group", + "hub": "${google_network_connectivity_hub.basic_hub.id}", + "labels": { + "label-one": "value-one" + }, + "name": "default" + } + references: + hub: google_network_connectivity_hub.basic_hub.id + dependencies: + google_network_connectivity_hub.basic_hub: |- + { + "description": "A sample hub", + "labels": { + "label-one": "value-one" + }, + "name": "network-connectivity-hub1" + } + argumentDocs: + auto_accept: |- + - + (Optional) + Optional. The auto-accept setting for this group. + Structure is documented below. + auto_accept.auto_accept_projects: |- + - + (Required) + A list of project ids or project numbers for which you want to enable auto-accept. The auto-accept setting is applied to spokes being created or updated in these projects. + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. The time the hub was created. + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + An optional description of the group. + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + hub: |- + - + (Required) + The name of the hub. Hub names must be unique. They use the following form: projects/{projectNumber}/locations/global/hubs/{hubId} + id: '- an identifier for the resource with format projects/{{project}}/locations/global/hubs/{{hub}}/groups/{{name}}' + labels: |- + - + (Optional) + Optional labels in key:value format. For more information about labels, see Requirements for labels. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + name: |- + - + (Required) + The name of the group. Group names must be unique. + Possible values are: default, center, edge. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + route_table: |- + - + Output only. The name of the route table that corresponds to this group. They use the following form: projects/{projectNumber}/locations/global/hubs/{hubId}/routeTables/{route_table_id} + state: |- + - + Output only. The current lifecycle state of this hub. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + uid: |- + - + Output only. The Google-generated UUID for the group. This value is unique across all group resources. If a group is deleted and another with the same name is created, the new route table is assigned a different uniqueId. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. The time the hub was last updated. + importStatements: [] google_network_connectivity_hub: subCategory: Network Connectivity description: The NetworkConnectivity Hub resource @@ -109924,6 +122460,25 @@ resources: "export_psc": true, "name": "basic" } + - name: primary + manifest: |- + { + "description": "A sample mesh hub", + "labels": { + "label-one": "value-one" + }, + "name": "mesh" + } + - name: primary + manifest: |- + { + "description": "A sample star hub", + "labels": { + "label-one": "value-one" + }, + "name": "star", + "preset_topology": "STAR" + } argumentDocs: create: '- Default is 20 minutes.' create_time: |- @@ -109952,6 +122507,11 @@ resources: - (Required) Immutable. The name of the hub. Hub names must be unique. They use the following form: projects/{project_number}/locations/global/hubs/{hub_id} + preset_topology: |- + - + (Optional) + Optional. The topology implemented in this hub. Currently, this field is only used when policyMode = PRESET. The available preset topologies are MESH and STAR. If presetTopology is unspecified and policyMode = PRESET, the presetTopology defaults to MESH. When policyMode = CUSTOM, the presetTopology is set to PRESET_TOPOLOGY_UNSPECIFIED. + Possible values are: MESH, STAR. project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -110076,6 +122636,38 @@ resources: "network": "${google_compute_network.default.id}", "region": "us-central1" } + - name: default + manifest: |- + { + "description": "Test internal range", + "ip_cidr_range": "10.1.0.0/16", + "migration": [ + { + "source": "${google_compute_subnetwork.source.self_link}", + "target": "projects/${data.google_project.target_project.project_id}/regions/us-central1/subnetworks/target-subnet" + } + ], + "name": "migration", + "network": "${google_compute_network.default.self_link}", + "peering": "FOR_SELF", + "usage": "FOR_MIGRATION" + } + references: + migration.source: google_compute_subnetwork.source.self_link + network: google_compute_network.default.self_link + dependencies: + google_compute_network.default: |- + { + "auto_create_subnetworks": false, + "name": "internal-ranges" + } + google_compute_subnetwork.source: |- + { + "ip_cidr_range": "10.1.0.0/16", + "name": "source-subnet", + "network": "${google_compute_network.default.name}", + "region": "us-central1" + } argumentDocs: create: '- Default is 30 minutes.' delete: '- Default is 30 minutes.' @@ -110093,6 +122685,25 @@ resources: - (Optional) User-defined labels. + migration: |- + - + (Optional) + Specification for migration with source and target resource names. + Structure is documented below. + migration.source: |- + - + (Required) + Resource path as an URI of the source resource, for example a subnet. + The project for the source resource should match the project for the + InternalRange. + An example /projects/{project}/regions/{region}/subnetworks/{subnet} + migration.target: |- + - + (Required) + Resource path of the target resource. The target project can be + different, as in the cases when migrating to peer networks. The resource + may not exist yet. + For example /projects/{project}/regions/{region}/subnetworks/{subnet} name: |- - (Required) @@ -110133,7 +122744,7 @@ resources: - (Required) The type of usage set for this InternalRange. - Possible values are: FOR_VPC, EXTERNAL_TO_VPC. + Possible values are: FOR_VPC, EXTERNAL_TO_VPC, FOR_MIGRATION. users: |- - Output only. The list of resources that refer to this internal range. @@ -110634,6 +123245,10 @@ resources: "198.51.100.0/24", "10.10.0.0/16" ], + "include_export_ranges": [ + "198.51.100.0/23", + "10.0.0.0/8" + ], "uri": "${google_compute_network.network.self_link}" } ], @@ -110657,6 +123272,55 @@ resources: }, "name": "hub1" } + - name: primary + manifest: |- + { + "description": "A sample spoke with a linked VPC", + "group": "${google_network_connectivity_group.default_group.id}", + "hub": "${google_network_connectivity_hub.basic_hub.id}", + "labels": { + "label-one": "value-one" + }, + "linked_vpc_network": [ + { + "exclude_export_ranges": [ + "198.51.100.0/24", + "10.10.0.0/16" + ], + "include_export_ranges": [ + "198.51.100.0/23", + "10.0.0.0/8" + ], + "uri": "${google_compute_network.network.self_link}" + } + ], + "location": "global", + "name": "group-spoke1" + } + references: + group: google_network_connectivity_group.default_group.id + hub: google_network_connectivity_hub.basic_hub.id + linked_vpc_network.uri: google_compute_network.network.self_link + dependencies: + google_compute_network.network: |- + { + "auto_create_subnetworks": false, + "name": "net-spoke" + } + google_network_connectivity_group.default_group: |- + { + "description": "A sample hub group", + "hub": "${google_network_connectivity_hub.basic_hub.id}", + "name": "default" + } + google_network_connectivity_hub.basic_hub: |- + { + "description": "A sample hub", + "labels": { + "label-two": "value-one" + }, + "name": "hub1-spoke" + } - name: primary manifest: |- { @@ -110667,6 +123331,9 @@ resources: }, "linked_router_appliance_instances": [ { + "include_import_ranges": [ + "ALL_IPV4_RANGES" + ], "instances": [ { "ip_address": "10.0.0.2", @@ -110730,6 +123397,468 @@ resources: }, "name": "tf-test-hub" } + - name: tunnel1 + manifest: |- + { + "description": "A sample spoke with a linked VPN Tunnel", + "hub": "${google_network_connectivity_hub.basic_hub.id}", + "labels": { + "label-one": "value-one" + }, + "linked_vpn_tunnels": [ + { + "include_import_ranges": [ + "ALL_IPV4_RANGES" + ], + "site_to_site_data_transfer": true, + "uris": [ + "${google_compute_vpn_tunnel.tunnel1.self_link}" + ] + } + ], + "location": "us-central1", + "name": "vpn-tunnel-1-spoke" + } + references: + hub: google_network_connectivity_hub.basic_hub.id + linked_vpn_tunnels.uris: google_compute_vpn_tunnel.tunnel1.self_link + dependencies: + google_compute_external_vpn_gateway.external_vpn_gw: |- + { + "description": "An externally managed VPN gateway", + "interface": [ + { + "id": 0, + "ip_address": "8.8.8.8" + } + ], + "name": "external-vpn-gateway", + "redundancy_type": "SINGLE_IP_INTERNALLY_REDUNDANT" + } + google_compute_ha_vpn_gateway.gateway: |- + { + "name": "vpn-gateway", + "network": "${google_compute_network.network.id}" + } + google_compute_network.network: |- + { + "auto_create_subnetworks": false, + "name": "basic-network" + } + google_compute_router.router: |- + { + "bgp": [ + { + "asn": 64514 + } + ], + "name": "external-vpn-gateway", + "network": "${google_compute_network.network.name}", + "region": "us-central1" + } + google_compute_router_interface.router_interface1: |- + { + "ip_range": "169.254.0.1/30", + "name": "router-interface1", + "region": "us-central1", + "router": "${google_compute_router.router.name}", + "vpn_tunnel": "${google_compute_vpn_tunnel.tunnel1.name}" + } + google_compute_router_interface.router_interface2: |- + { + "ip_range": "169.254.1.1/30", + "name": "router-interface2", + "region": "us-central1", + "router": "${google_compute_router.router.name}", + "vpn_tunnel": "${google_compute_vpn_tunnel.tunnel2.name}" + } + google_compute_router_peer.router_peer1: |- + { + "advertised_route_priority": 100, + "interface": "${google_compute_router_interface.router_interface1.name}", + "name": "router-peer1", + "peer_asn": 64515, + "peer_ip_address": "169.254.0.2", + "region": "us-central1", + "router": "${google_compute_router.router.name}" + } + google_compute_router_peer.router_peer2: |- + { + "advertised_route_priority": 100, + "interface": "${google_compute_router_interface.router_interface2.name}", + "name": "router-peer2", + "peer_asn": 64515, + "peer_ip_address": "169.254.1.2", + "region": "us-central1", + "router": "${google_compute_router.router.name}" + } + google_compute_subnetwork.subnetwork: |- + { + "ip_cidr_range": "10.0.0.0/28", + "name": "basic-subnetwork", + "network": "${google_compute_network.network.self_link}", + "region": "us-central1" + } + google_compute_vpn_tunnel.tunnel1: |- + { + "name": "tunnel1", + "peer_external_gateway": "${google_compute_external_vpn_gateway.external_vpn_gw.id}", + "peer_external_gateway_interface": 0, + "region": "us-central1", + "router": "${google_compute_router.router.id}", + "shared_secret": "a secret message", + "vpn_gateway": "${google_compute_ha_vpn_gateway.gateway.id}", + "vpn_gateway_interface": 0 + } + google_compute_vpn_tunnel.tunnel2: |- + { + "name": "tunnel2", + "peer_external_gateway": "${google_compute_external_vpn_gateway.external_vpn_gw.id}", + "peer_external_gateway_interface": 0, + "region": "us-central1", + "router": " ${google_compute_router.router.id}", + "shared_secret": "a secret message", + "vpn_gateway": "${google_compute_ha_vpn_gateway.gateway.id}", + "vpn_gateway_interface": 1 + } + google_network_connectivity_hub.basic_hub: |- + { + "description": "A sample hub", + "labels": { + "label-two": "value-one" + }, + "name": "basic-hub1" + } + - name: tunnel2 + manifest: |- + { + "description": "A sample spoke with a linked VPN Tunnel", + "hub": "${google_network_connectivity_hub.basic_hub.id}", + "labels": { + "label-one": "value-one" + }, + "linked_vpn_tunnels": [ + { + "include_import_ranges": [ + "ALL_IPV4_RANGES" + ], + "site_to_site_data_transfer": true, + "uris": [ + "${google_compute_vpn_tunnel.tunnel2.self_link}" + ] + } + ], + "location": "us-central1", + "name": "vpn-tunnel-2-spoke" + } + references: + hub: google_network_connectivity_hub.basic_hub.id + linked_vpn_tunnels.uris: google_compute_vpn_tunnel.tunnel2.self_link + dependencies: + google_compute_external_vpn_gateway.external_vpn_gw: |- + { + "description": "An externally managed VPN gateway", + "interface": [ + { + "id": 0, + "ip_address": "8.8.8.8" + } + ], + "name": "external-vpn-gateway", + "redundancy_type": "SINGLE_IP_INTERNALLY_REDUNDANT" + } + google_compute_ha_vpn_gateway.gateway: |- + { + "name": "vpn-gateway", + "network": "${google_compute_network.network.id}" + } + google_compute_network.network: |- + { + "auto_create_subnetworks": false, + "name": "basic-network" + } + google_compute_router.router: |- + { + "bgp": [ + { + "asn": 64514 + } + ], + "name": "external-vpn-gateway", + "network": "${google_compute_network.network.name}", + "region": "us-central1" + } + google_compute_router_interface.router_interface1: |- + { + "ip_range": "169.254.0.1/30", + "name": "router-interface1", + "region": "us-central1", + "router": "${google_compute_router.router.name}", + "vpn_tunnel": "${google_compute_vpn_tunnel.tunnel1.name}" + } + google_compute_router_interface.router_interface2: |- + { + "ip_range": "169.254.1.1/30", + "name": "router-interface2", + "region": "us-central1", + "router": "${google_compute_router.router.name}", + "vpn_tunnel": "${google_compute_vpn_tunnel.tunnel2.name}" + } + google_compute_router_peer.router_peer1: |- + { + "advertised_route_priority": 100, + "interface": "${google_compute_router_interface.router_interface1.name}", + "name": "router-peer1", + "peer_asn": 64515, + "peer_ip_address": "169.254.0.2", + "region": "us-central1", + "router": "${google_compute_router.router.name}" + } + google_compute_router_peer.router_peer2: |- + { + "advertised_route_priority": 100, + "interface": "${google_compute_router_interface.router_interface2.name}", + "name": "router-peer2", + "peer_asn": 64515, + "peer_ip_address": "169.254.1.2", + "region": "us-central1", + "router": "${google_compute_router.router.name}" + } + google_compute_subnetwork.subnetwork: |- + { + "ip_cidr_range": "10.0.0.0/28", + "name": "basic-subnetwork", + "network": "${google_compute_network.network.self_link}", + "region": "us-central1" + } + google_compute_vpn_tunnel.tunnel1: |- + { + "name": "tunnel1", + "peer_external_gateway": "${google_compute_external_vpn_gateway.external_vpn_gw.id}", + "peer_external_gateway_interface": 0, + "region": "us-central1", + "router": "${google_compute_router.router.id}", + "shared_secret": "a secret message", + "vpn_gateway": "${google_compute_ha_vpn_gateway.gateway.id}", + "vpn_gateway_interface": 0 + } + google_compute_vpn_tunnel.tunnel2: |- + { + "name": "tunnel2", + "peer_external_gateway": "${google_compute_external_vpn_gateway.external_vpn_gw.id}", + "peer_external_gateway_interface": 0, + "region": "us-central1", + "router": " ${google_compute_router.router.id}", + "shared_secret": "a secret message", + "vpn_gateway": "${google_compute_ha_vpn_gateway.gateway.id}", + "vpn_gateway_interface": 1 + } + google_network_connectivity_hub.basic_hub: |- + { + "description": "A sample hub", + "labels": { + "label-two": "value-one" + }, + "name": "basic-hub1" + } + - name: primary + manifest: |- + { + "description": "A sample spoke with a linked Interconnect Attachment", + "hub": "${google_network_connectivity_hub.basic_hub.id}", + "labels": { + "label-one": "value-one" + }, + "linked_interconnect_attachments": [ + { + "include_import_ranges": [ + "ALL_IPV4_RANGES" + ], + "site_to_site_data_transfer": true, + "uris": [ + "${google_compute_interconnect_attachment.interconnect-attachment.self_link}" + ] + } + ], + "location": "us-central1", + "name": "interconnect-attachment-spoke" + } + references: + hub: google_network_connectivity_hub.basic_hub.id + linked_interconnect_attachments.uris: google_compute_interconnect_attachment.interconnect-attachment.self_link + dependencies: + google_compute_interconnect_attachment.interconnect-attachment: |- + { + "edge_availability_domain": "AVAILABILITY_DOMAIN_1", + "mtu": 1500, + "name": "partner-interconnect1", + "region": "us-central1", + "router": "${google_compute_router.router.id}", + "type": "PARTNER" + } + google_compute_network.network: |- + { + "auto_create_subnetworks": false, + "name": "basic-network" + } + google_compute_router.router: |- + { + "bgp": [ + { + "asn": 16550 + } + ], + "name": "external-vpn-gateway", + "network": "${google_compute_network.network.name}", + "region": "us-central1" + } + google_network_connectivity_hub.basic_hub: |- + { + "description": "A sample hub", + "labels": { + "label-two": "value-one" + }, + "name": "basic-hub1" + } + - name: linked_vpc_spoke + manifest: |- + { + "hub": "${google_network_connectivity_hub.basic_hub.id}", + "linked_vpc_network": [ + { + "uri": "${google_compute_network.network.self_link}" + } + ], + "location": "global", + "name": "vpc-spoke" + } + references: + hub: google_network_connectivity_hub.basic_hub.id + linked_vpc_network.uri: google_compute_network.network.self_link + dependencies: + google_compute_global_address.address: |- + { + "address_type": "INTERNAL", + "name": "test-address", + "network": "${google_compute_network.network.id}", + "prefix_length": 16, + "purpose": "VPC_PEERING" + } + google_compute_network.network: |- + { + "auto_create_subnetworks": false, + "name": "net-spoke" + } + google_network_connectivity_hub.basic_hub: |- + { + "name": "hub-basic" + } + google_service_networking_connection.peering: |- + { + "network": "${google_compute_network.network.id}", + "reserved_peering_ranges": [ + "${google_compute_global_address.address.name}" + ], + "service": "servicenetworking.googleapis.com" + } + - name: primary + manifest: |- + { + "depends_on": [ + "${google_network_connectivity_spoke.linked_vpc_spoke}" + ], + "description": "A sample spoke with a linked router appliance instance", + "hub": "${google_network_connectivity_hub.basic_hub.id}", + "labels": { + "label-one": "value-one" + }, + "linked_producer_vpc_network": [ + { + "exclude_export_ranges": [ + "198.51.100.0/24", + "10.10.0.0/16" + ], + "network": "${google_compute_network.network.name}", + "peering": "${google_service_networking_connection.peering.peering}" + } + ], + "location": "global", + "name": "producer-spoke" + } + references: + hub: google_network_connectivity_hub.basic_hub.id + linked_producer_vpc_network.network: google_compute_network.network.name + linked_producer_vpc_network.peering: google_service_networking_connection.peering.peering + dependencies: + google_compute_global_address.address: |- + { + "address_type": "INTERNAL", + "name": "test-address", + "network": "${google_compute_network.network.id}", + "prefix_length": 16, + "purpose": "VPC_PEERING" + } + google_compute_network.network: |- + { + "auto_create_subnetworks": false, + "name": "net-spoke" + } + google_network_connectivity_hub.basic_hub: |- + { + "name": "hub-basic" + } + google_service_networking_connection.peering: |- + { + "network": "${google_compute_network.network.id}", + "reserved_peering_ranges": [ + "${google_compute_global_address.address.name}" + ], + "service": "servicenetworking.googleapis.com" + } + - name: primary + manifest: |- + { + "description": "A sample spoke", + "group": "${google_network_connectivity_group.center_group.id}", + "hub": "${google_network_connectivity_hub.star_hub.id}", + "labels": { + "label-one": "value-one" + }, + "linked_vpc_network": [ + { + "uri": "${google_compute_network.network.self_link}" + } + ], + "location": "global", + "name": "vpc-spoke" + } + references: + group: google_network_connectivity_group.center_group.id + hub: google_network_connectivity_hub.star_hub.id + linked_vpc_network.uri: google_compute_network.network.self_link + dependencies: + google_compute_network.network: |- + { + "auto_create_subnetworks": false, + "name": "tf-net" + } + google_network_connectivity_group.center_group: |- + { + "auto_accept": [ + { + "auto_accept_projects": [ + "foo" + ] + } + ], + "hub": "${google_network_connectivity_hub.star_hub.id}", + "name": "center" + } + google_network_connectivity_hub.star_hub: |- + { + "name": "hub-basic", + "preset_topology": "STAR" + } argumentDocs: create: '- Default is 20 minutes.' create_time: |- @@ -110743,6 +123872,10 @@ resources: effective_labels: |- - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + group: |- + - + (Optional) + The name of the group that this spoke is associated with. hub: |- - (Required) @@ -110750,11 +123883,11 @@ resources: id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/spokes/{{name}}' instances.ip_address: |- - - (Optional) + (Required) The IP address on the VM to use for peering. instances.virtual_machine: |- - - (Optional) + (Required) The URI of the virtual machine resource labels: |- - @@ -110767,6 +123900,11 @@ resources: (Optional) A collection of VLAN attachment resources. These resources should be redundant attachments that all advertise the same prefixes to Google Cloud. Alternatively, in active/passive configurations, all attachments should be capable of advertising the same prefixes. Structure is documented below. + linked_interconnect_attachments.include_import_ranges: |- + - + (Optional) + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". linked_interconnect_attachments.site_to_site_data_transfer: |- - (Required) @@ -110775,11 +123913,41 @@ resources: - (Required) The URIs of linked interconnect attachment resources + linked_producer_vpc_network: |- + - + (Optional) + Producer VPC network that is associated with the spoke. + Structure is documented below. + linked_producer_vpc_network.exclude_export_ranges: |- + - + (Optional) + IP ranges encompassing the subnets to be excluded from peering. + linked_producer_vpc_network.include_export_ranges: |- + - + (Optional) + IP ranges allowed to be included from peering. + linked_producer_vpc_network.network: |- + - + (Required) + The URI of the Service Consumer VPC that the Producer VPC is peered with. + linked_producer_vpc_network.peering: |- + - + (Required) + The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state. + linked_producer_vpc_network.producer_network: |- + - + (Output) + The URI of the Producer VPC. linked_router_appliance_instances: |- - (Optional) The URIs of linked Router appliance resources Structure is documented below. + linked_router_appliance_instances.include_import_ranges: |- + - + (Optional) + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". linked_router_appliance_instances.instances: |- - (Required) @@ -110798,6 +123966,10 @@ resources: - (Optional) IP ranges encompassing the subnets to be excluded from peering. + linked_vpc_network.include_export_ranges: |- + - + (Optional) + IP ranges allowed to be included from peering. linked_vpc_network.uri: |- - (Required) @@ -110807,6 +123979,11 @@ resources: (Optional) The URIs of linked VPN tunnel resources Structure is documented below. + linked_vpn_tunnels.include_import_ranges: |- + - + (Optional) + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". linked_vpn_tunnels.site_to_site_data_transfer: |- - (Required) @@ -111109,6 +124286,292 @@ resources: and default labels configured on the provider. update: '- Default is 20 minutes.' importStatements: [] + google_network_management_vpc_flow_logs_config: + subCategory: NetworkManagement + description: VPC Flow Logs Config is a resource that lets you configure Flow Logs for VPC, Interconnect attachments or VPN Tunnels. + name: google_network_management_vpc_flow_logs_config + title: "" + examples: + - name: interconnect-test + manifest: |- + { + "aggregation_interval": "INTERVAL_5_SEC", + "description": "VPC Flow Logs over a VPN Gateway.", + "flow_sampling": 0.5, + "interconnect_attachment": "projects/${data.google_project.project.number}/regions/us-east4/interconnectAttachments/${google_compute_interconnect_attachment.attachment.name}", + "location": "global", + "metadata": "INCLUDE_ALL_METADATA", + "state": "ENABLED", + "vpc_flow_logs_config_id": "full-interconnect-test-id" + } + dependencies: + google_compute_interconnect_attachment.attachment: |- + { + "edge_availability_domain": "AVAILABILITY_DOMAIN_1", + "mtu": 1500, + "name": "full-interconnect-test-id", + "router": "${google_compute_router.router.id}", + "type": "PARTNER" + } + google_compute_network.network: |- + { + "name": "full-interconnect-test-network" + } + google_compute_router.router: |- + { + "bgp": [ + { + "asn": 16550 + } + ], + "name": "full-interconnect-test-router", + "network": "${google_compute_network.network.name}" + } + - name: interconnect-test + manifest: |- + { + "interconnect_attachment": "projects/${data.google_project.project.number}/regions/us-east4/interconnectAttachments/${google_compute_interconnect_attachment.attachment.name}", + "location": "global", + "vpc_flow_logs_config_id": "basic-interconnect-test-id" + } + dependencies: + google_compute_interconnect_attachment.attachment: |- + { + "edge_availability_domain": "AVAILABILITY_DOMAIN_1", + "mtu": 1500, + "name": "basic-interconnect-test-id", + "router": "${google_compute_router.router.id}", + "type": "PARTNER" + } + google_compute_network.network: |- + { + "name": "basic-interconnect-test-network" + } + google_compute_router.router: |- + { + "bgp": [ + { + "asn": 16550 + } + ], + "name": "basic-interconnect-test-router", + "network": "${google_compute_network.network.name}" + } + - name: vpn-test + manifest: |- + { + "location": "global", + "vpc_flow_logs_config_id": "basic-test-id", + "vpn_tunnel": "projects/${data.google_project.project.number}/regions/us-central1/vpnTunnels/${google_compute_vpn_tunnel.tunnel.name}" + } + dependencies: + google_compute_address.vpn_static_ip: |- + { + "name": "basic-test-address" + } + google_compute_forwarding_rule.fr_esp: |- + { + "ip_address": "${google_compute_address.vpn_static_ip.address}", + "ip_protocol": "ESP", + "name": "basic-test-fresp", + "target": "${google_compute_vpn_gateway.target_gateway.id}" + } + google_compute_forwarding_rule.fr_udp500: |- + { + "ip_address": "${google_compute_address.vpn_static_ip.address}", + "ip_protocol": "UDP", + "name": "basic-test-fr500", + "port_range": "500", + "target": "${google_compute_vpn_gateway.target_gateway.id}" + } + google_compute_forwarding_rule.fr_udp4500: |- + { + "ip_address": "${google_compute_address.vpn_static_ip.address}", + "ip_protocol": "UDP", + "name": "basic-test-fr4500", + "port_range": "4500", + "target": "${google_compute_vpn_gateway.target_gateway.id}" + } + google_compute_network.network: |- + { + "name": "basic-test-network" + } + google_compute_route.route: |- + { + "dest_range": "15.0.0.0/24", + "name": "basic-test-route", + "network": "${google_compute_network.network.name}", + "next_hop_vpn_tunnel": "${google_compute_vpn_tunnel.tunnel.id}", + "priority": 1000 + } + google_compute_vpn_gateway.target_gateway: |- + { + "name": "basic-test-gateway", + "network": "${google_compute_network.network.id}" + } + google_compute_vpn_tunnel.tunnel: |- + { + "depends_on": [ + "${google_compute_forwarding_rule.fr_esp}", + "${google_compute_forwarding_rule.fr_udp500}", + "${google_compute_forwarding_rule.fr_udp4500}" + ], + "name": "basic-test-tunnel", + "peer_ip": "15.0.0.120", + "shared_secret": "a secret message", + "target_vpn_gateway": "${google_compute_vpn_gateway.target_gateway.id}" + } + - name: vpn-test + manifest: |- + { + "aggregation_interval": "INTERVAL_5_SEC", + "description": "VPC Flow Logs over a VPN Gateway.", + "flow_sampling": 0.5, + "location": "global", + "metadata": "INCLUDE_ALL_METADATA", + "state": "ENABLED", + "vpc_flow_logs_config_id": "full-test-id", + "vpn_tunnel": "projects/${data.google_project.project.number}/regions/us-central1/vpnTunnels/${google_compute_vpn_tunnel.tunnel.name}" + } + dependencies: + google_compute_address.vpn_static_ip: |- + { + "name": "full-test-address" + } + google_compute_forwarding_rule.fr_esp: |- + { + "ip_address": "${google_compute_address.vpn_static_ip.address}", + "ip_protocol": "ESP", + "name": "full-test-fresp", + "target": "${google_compute_vpn_gateway.target_gateway.id}" + } + google_compute_forwarding_rule.fr_udp500: |- + { + "ip_address": "${google_compute_address.vpn_static_ip.address}", + "ip_protocol": "UDP", + "name": "full-test-fr500", + "port_range": "500", + "target": "${google_compute_vpn_gateway.target_gateway.id}" + } + google_compute_forwarding_rule.fr_udp4500: |- + { + "ip_address": "${google_compute_address.vpn_static_ip.address}", + "ip_protocol": "UDP", + "name": "full-test-fr4500", + "port_range": "4500", + "target": "${google_compute_vpn_gateway.target_gateway.id}" + } + google_compute_network.network: |- + { + "name": "full-test-network" + } + google_compute_route.route: |- + { + "dest_range": "15.0.0.0/24", + "name": "full-test-route", + "network": "${google_compute_network.network.name}", + "next_hop_vpn_tunnel": "${google_compute_vpn_tunnel.tunnel.id}", + "priority": 1000 + } + google_compute_vpn_gateway.target_gateway: |- + { + "name": "full-test-gateway", + "network": "${google_compute_network.network.id}" + } + google_compute_vpn_tunnel.tunnel: |- + { + "depends_on": [ + "${google_compute_forwarding_rule.fr_esp}", + "${google_compute_forwarding_rule.fr_udp500}", + "${google_compute_forwarding_rule.fr_udp4500}" + ], + "name": "full-test-tunnel", + "peer_ip": "15.0.0.120", + "shared_secret": "a secret message", + "target_vpn_gateway": "${google_compute_vpn_gateway.target_gateway.id}" + } + argumentDocs: + aggregation_interval: |- + - + (Optional) + Optional. The aggregation interval for the logs. Default value is + INTERVAL_5_SEC. Possible values: AGGREGATION_INTERVAL_UNSPECIFIED INTERVAL_5_SEC INTERVAL_30_SEC INTERVAL_1_MIN INTERVAL_5_MIN INTERVAL_10_MIN INTERVAL_15_MIN" + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. The time the config was created. + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + Optional. The user-supplied description of the VPC Flow Logs configuration. Maximum + of 512 characters. + effective_labels: for all of the labels present on the resource. + filter_expr: |- + - + (Optional) + Optional. Export filter used to define which VPC Flow Logs should be logged. + flow_sampling: |- + - + (Optional) + Optional. The value of the field must be in (0, 1]. The sampling rate + of VPC Flow Logs where 1.0 means all collected logs are reported. Setting the + sampling rate to 0.0 is not allowed. If you want to disable VPC Flow Logs, use + the state field instead. Default value is 1.0. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/vpcFlowLogsConfigs/{{vpc_flow_logs_config_id}}' + interconnect_attachment: |- + - + (Optional) + Traffic will be logged from the Interconnect Attachment. Format: projects/{project_id}/regions/{region}/interconnectAttachments/{name} + labels: |- + - + (Optional) + Optional. Resource labels to represent user-provided metadata. + location: |- + - + (Required) + Resource ID segment making up resource name. It identifies the resource + within its parent collection as described in https://google.aip.dev/122. See documentation + for resource type networkmanagement.googleapis.com/VpcFlowLogsConfig. + metadata: |- + - + (Optional) + Optional. Configures whether all, none or a subset of metadata fields + should be added to the reported VPC flow logs. Default value is INCLUDE_ALL_METADATA. + Possible values: METADATA_UNSPECIFIED INCLUDE_ALL_METADATA EXCLUDE_ALL_METADATA CUSTOM_METADATA + metadata_fields: |- + - + (Optional) + Optional. Custom metadata fields to include in the reported VPC flow + logs. Can only be specified if "metadata" was set to CUSTOM_METADATA. + name: |- + - + Identifier. Unique name of the configuration using the form: projects/{project_id}/locations/global/vpcFlowLogsConfigs/{vpc_flow_logs_config_id} + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + state: |- + - + (Optional) + Optional. The state of the VPC Flow Log configuration. Default value + is ENABLED. When creating a new configuration, it must be enabled. Possible + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. The time the config was updated. + vpc_flow_logs_config_id: |- + - + (Required) + Required. ID of the VpcFlowLogsConfig. + vpn_tunnel: |- + - + (Optional) + Traffic will be logged from the VPN Tunnel. Format: projects/{project_id}/regions/{region}/vpnTunnels/{name} + importStatements: [] google_network_security_address_group: subCategory: Network security description: AddressGroup is a resource that specifies how a collection of IP/DNS used in Firewall Policy. @@ -111453,6 +124916,467 @@ resources: - Time the AuthorizationPolicy was updated in UTC. importStatements: [] + google_network_security_authz_policy: + subCategory: Network security + description: AuthzPolicy is a resource that allows to forward traffic to a callout backend designed to scan the traffic for security purposes. + name: google_network_security_authz_policy + title: "" + examples: + - name: default + manifest: |- + { + "action": "CUSTOM", + "custom_provider": [ + { + "authz_extension": [ + { + "resources": [ + "${google_network_services_authz_extension.default.id}" + ] + } + ] + } + ], + "description": "my description", + "location": "us-west1", + "name": "my-authz-policy", + "project": "my-project-name", + "target": [ + { + "load_balancing_scheme": "INTERNAL_MANAGED", + "resources": [ + "${google_compute_forwarding_rule.default.self_link}" + ] + } + ] + } + references: + custom_provider.authz_extension.resources: google_network_services_authz_extension.default.id + target.resources: google_compute_forwarding_rule.default.self_link + dependencies: + google_compute_address.default: |- + { + "address_type": "INTERNAL", + "name": "l7-ilb-ip-address", + "project": "my-project-name", + "purpose": "GCE_ENDPOINT", + "region": "us-west1", + "subnetwork": "${google_compute_subnetwork.default.id}" + } + google_compute_forwarding_rule.default: |- + { + "depends_on": [ + "${google_compute_subnetwork.proxy_only}" + ], + "ip_address": "${google_compute_address.default.id}", + "ip_protocol": "TCP", + "load_balancing_scheme": "INTERNAL_MANAGED", + "name": "l7-ilb-forwarding-rule", + "network": "${google_compute_network.default.id}", + "port_range": "80", + "project": "my-project-name", + "region": "us-west1", + "subnetwork": "${google_compute_subnetwork.default.id}", + "target": "${google_compute_region_target_http_proxy.default.id}" + } + google_compute_network.default: |- + { + "auto_create_subnetworks": false, + "name": "lb-network", + "project": "my-project-name" + } + google_compute_region_backend_service.authz_extension: |- + { + "load_balancing_scheme": "INTERNAL_MANAGED", + "name": "authz-service", + "port_name": "grpc", + "project": "my-project-name", + "protocol": "HTTP2", + "region": "us-west1" + } + google_compute_region_backend_service.url_map: |- + { + "health_checks": [ + "${google_compute_region_health_check.default.id}" + ], + "load_balancing_scheme": "INTERNAL_MANAGED", + "name": "l7-ilb-backend-service", + "project": "my-project-name", + "region": "us-west1" + } + google_compute_region_health_check.default: |- + { + "http_health_check": [ + { + "port_specification": "USE_SERVING_PORT" + } + ], + "name": "l7-ilb-basic-check", + "project": "my-project-name", + "region": "us-west1" + } + google_compute_region_target_http_proxy.default: |- + { + "name": "l7-ilb-proxy", + "project": "my-project-name", + "region": "us-west1", + "url_map": "${google_compute_region_url_map.default.id}" + } + google_compute_region_url_map.default: |- + { + "default_service": "${google_compute_region_backend_service.url_map.id}", + "name": "l7-ilb-map", + "project": "my-project-name", + "region": "us-west1" + } + google_compute_subnetwork.default: |- + { + "ip_cidr_range": "10.1.2.0/24", + "name": "backend-subnet", + "network": "${google_compute_network.default.id}", + "project": "my-project-name", + "region": "us-west1" + } + google_compute_subnetwork.proxy_only: |- + { + "ip_cidr_range": "10.129.0.0/23", + "name": "proxy-only-subnet", + "network": "${google_compute_network.default.id}", + "project": "my-project-name", + "purpose": "REGIONAL_MANAGED_PROXY", + "region": "us-west1", + "role": "ACTIVE" + } + google_network_services_authz_extension.default: |- + { + "authority": "ext11.com", + "description": "my description", + "fail_open": false, + "forward_headers": [ + "Authorization" + ], + "load_balancing_scheme": "INTERNAL_MANAGED", + "location": "us-west1", + "name": "my-authz-ext", + "project": "my-project-name", + "service": "${google_compute_region_backend_service.authz_extension.self_link}", + "timeout": "0.1s" + } + argumentDocs: + ALLOW: ', DENY, CUSTOM.' + action: |- + - + (Required) + When the action is CUSTOM, customProvider must be specified. + When the action is ALLOW, only requests matching the policy will be allowed. + When the action is DENY, only requests matching the policy will be denied. + When a request arrives, the policies are evaluated in the following order: + authz_extension.resources: |- + - + (Required) + A list of references to authorization extensions that will be invoked for requests matching this policy. Limited to 1 custom provider. + cloud_iap.enabled: |- + - + (Required) + Enable Cloud IAP at the AuthzPolicy level. + create: '- Default is 30 minutes.' + create_time: |- + - + The timestamp when the resource was created. + custom_provider.authz_extension: |- + - + (Optional) + Delegate authorization decision to user authored Service Extension. Only one of cloudIap or authzExtension can be specified. + Structure is documented below. + custom_provider.cloud_iap: |- + - + (Optional) + Delegates authorization decisions to Cloud IAP. Applicable only for managed load balancers. Enabling Cloud IAP at the AuthzPolicy level is not compatible with Cloud IAP settings in the BackendService. Enabling IAP in both places will result in request failure. Ensure that IAP is enabled in either the AuthzPolicy or the BackendService but not in both places. + Structure is documented below. + delete: '- Default is 30 minutes.' + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + from.not_sources: |- + - + (Optional) + Describes the properties of a request's sources. At least one of sources or notSources must be specified. Limited to 5 sources. A match occurs when ANY source (in sources or notSources) matches the request. Within a single source, the match follows AND semantics across fields and OR semantics within a single field, i.e. a match occurs when ANY principal matches AND ANY ipBlocks match. + Structure is documented below. + from.sources: |- + - + (Optional) + Describes the properties of a request's sources. At least one of sources or notSources must be specified. Limited to 5 sources. A match occurs when ANY source (in sources or notSources) matches the request. Within a single source, the match follows AND semantics across fields and OR semantics within a single field, i.e. a match occurs when ANY principal matches AND ANY ipBlocks match. + Structure is documented below. + header_set.headers: |- + - + (Optional) + A list of headers to match against in http header. The match can be one of exact, prefix, suffix, or contains (substring match). The match follows AND semantics which means all the headers must match. Matches are always case sensitive unless the ignoreCase is set. Limited to 5 matches. + Structure is documented below. + header_set.headers.name: |- + - + (Optional) + Specifies the name of the header in the request. + header_set.headers.value: |- + - + (Optional) + Specifies how the header match will be performed. + Structure is documented below. + hosts.contains: |- + - + (Optional) + The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. + Examples: + hosts.exact: |- + - + (Optional) + The input string must match exactly the string specified here. + Examples: + hosts.ignore_case: |- + - + (Optional) + If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. + hosts.prefix: |- + - + (Optional) + The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. + Examples: + hosts.suffix: |- + - + (Optional) + The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. + Examples: + http_rules.from: |- + - + (Optional) + Describes properties of one or more sources of a request. + Structure is documented below. + http_rules.to: |- + - + (Optional) + Describes properties of one or more targets of a request + Structure is documented below. + http_rules.when: |- + - + (Optional) + CEL expression that describes the conditions to be satisfied for the action. The result of the CEL expression is ANDed with the from and to. Refer to the CEL language reference for a list of available attributes. + iam_service_account.contains: |- + - + (Optional) + The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. + Examples: + iam_service_account.exact: |- + - + (Optional) + The input string must match exactly the string specified here. + Examples: + iam_service_account.ignore_case: |- + - + (Optional) + If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. + iam_service_account.prefix: |- + - + (Optional) + The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. + Examples: + iam_service_account.suffix: |- + - + (Optional) + The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. + Examples: + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/authzPolicies/{{name}}' + location: |- + - + (Required) + The location of the resource. + name: |- + - + (Required) + Identifier. Name of the AuthzPolicy resource. + not_sources.principals: |- + - + (Optional) + A list of identities derived from the client's certificate. This field will not match on a request unless mutual TLS is enabled for the Forwarding rule or Gateway. Each identity is a string whose value is matched against the URI SAN, or DNS SAN or the subject field in the client's certificate. The match can be exact, prefix, suffix or a substring match. One of exact, prefix, suffix or contains must be specified. + Limited to 5 principals. + Structure is documented below. + not_sources.resources: |- + - + (Optional) + A list of resources to match against the resource of the source VM of a request. + Limited to 5 resources. + Structure is documented below. + paths.contains: |- + - + (Optional) + The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. + Examples: + paths.exact: |- + - + (Optional) + The input string must match exactly the string specified here. + Examples: + paths.ignore_case: |- + - + (Optional) + If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. + paths.prefix: |- + - + (Optional) + The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. + Examples: + paths.suffix: |- + - + (Optional) + The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. + Examples: + principals.contains: |- + - + (Optional) + The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. + Examples: + principals.exact: |- + - + (Optional) + The input string must match exactly the string specified here. + Examples: + principals.ignore_case: |- + - + (Optional) + If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. + principals.prefix: |- + - + (Optional) + The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. + Examples: + principals.suffix: |- + - + (Optional) + The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. + Examples: + resources.iam_service_account: |- + - + (Optional) + An IAM service account to match against the source service account of the VM sending the request. + Structure is documented below. + resources.tag_value_id_set: |- + - + (Optional) + A list of resource tag value permanent IDs to match against the resource manager tags value associated with the source VM of a request. + Structure is documented below. + sources.principals: |- + - + (Optional) + A list of identities derived from the client's certificate. This field will not match on a request unless mutual TLS is enabled for the Forwarding rule or Gateway. Each identity is a string whose value is matched against the URI SAN, or DNS SAN or the subject field in the client's certificate. The match can be exact, prefix, suffix or a substring match. One of exact, prefix, suffix or contains must be specified. + Limited to 5 principals. + Structure is documented below. + sources.resources: |- + - + (Optional) + A list of resources to match against the resource of the source VM of a request. + Limited to 5 resources. + Structure is documented below. + tag_value_id_set.ids: |- + - + (Optional) + A list of resource tag value permanent IDs to match against the resource manager tags value associated with the source VM of a request. The match follows AND semantics which means all the ids must match. + Limited to 5 matches. + target: |- + - + (Required) + Specifies the set of resources to which this policy should be applied to. + Structure is documented below. + target.custom_provider: |- + - + (Optional) + Required if the action is CUSTOM. Allows delegating authorization decisions to Cloud IAP or to Service Extensions. One of cloudIap or authzExtension must be specified. + Structure is documented below. + target.description: |- + - + (Optional) + A human-readable description of the resource. + target.effective_labels: for all of the labels present on the resource. + target.http_rules: |- + - + (Optional) + A list of authorization HTTP rules to match against the incoming request.A policy match occurs when at least one HTTP rule matches the request or when no HTTP rules are specified in the policy. At least one HTTP Rule is required for Allow or Deny Action. + Limited to 5 rules. + Structure is documented below. + target.labels: |- + - + (Optional) + Set of labels associated with the AuthzExtension resource. + target.load_balancing_scheme: |- + - + (Required) + All gateways and forwarding rules referenced by this policy and extensions must share the same load balancing scheme. + For more information, refer to Backend services overview. + Possible values are: INTERNAL_MANAGED, EXTERNAL_MANAGED, INTERNAL_SELF_MANAGED. + target.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + target.resources: |- + - + (Optional) + A list of references to the Forwarding Rules on which this policy will be applied. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + to.operations: |- + - + (Optional) + Describes properties of one or more targets of a request. At least one of operations or notOperations must be specified. Limited to 5 operations. A match occurs when ANY operation (in operations or notOperations) matches. Within an operation, the match follows AND semantics across fields and OR semantics within a field, i.e. a match occurs when ANY path matches AND ANY header matches and ANY method matches. + Structure is documented below. + to.operations.header_set: |- + - + (Optional) + A list of headers to match against in http header. + Structure is documented below. + to.operations.hosts: |- + - + (Optional) + A list of HTTP Hosts to match against. The match can be one of exact, prefix, suffix, or contains (substring match). Matches are always case sensitive unless the ignoreCase is set. + Limited to 5 matches. + Structure is documented below. + to.operations.methods: |- + - + (Optional) + A list of HTTP methods to match against. Each entry must be a valid HTTP method name (GET, PUT, POST, HEAD, PATCH, DELETE, OPTIONS). It only allows exact match and is always case sensitive. + to.operations.paths: |- + - + (Optional) + A list of paths to match against. The match can be one of exact, prefix, suffix, or contains (substring match). Matches are always case sensitive unless the ignoreCase is set. + Limited to 5 matches. + Note that this path match includes the query parameters. For gRPC services, this should be a fully-qualified name of the form /package.service/method. + Structure is documented below. + update: '- Default is 30 minutes.' + update_time: |- + - + The timestamp when the resource was updated. + value.contains: |- + - + (Optional) + The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. + Examples: + value.exact: |- + - + (Optional) + The input string must match exactly the string specified here. + Examples: + value.ignore_case: |- + - + (Optional) + If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. + value.prefix: |- + - + (Optional) + The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. + Examples: + value.suffix: |- + - + (Optional) + The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. + Examples: + importStatements: [] google_network_security_client_tls_policy: subCategory: Network security description: ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. @@ -111467,11 +125391,8 @@ resources: "foo": "bar" }, "name": "my-client-tls-policy", - "provider": "${google-beta}", "sni": "secure.example.com" } - references: - provider: google-beta - name: default manifest: |- { @@ -111489,7 +125410,6 @@ resources: "foo": "bar" }, "name": "my-client-tls-policy", - "provider": "${google-beta}", "server_validation_ca": [ { "grpc_endpoint": [ @@ -111497,18 +125417,9 @@ resources: "target_uri": "unix:mypath" } ] - }, - { - "grpc_endpoint": [ - { - "target_uri": "unix:mypath1" - } - ] } ] } - references: - provider: google-beta argumentDocs: certificate_provider_instance.plugin_instance: |- - @@ -111934,8 +125845,9 @@ resources: Server-defined URL of this resource. tls_inspection_policy: |- - - (Optional, Beta) + (Optional) Name of a TlsInspectionPolicy resource that defines how TLS inspection is performed for any rule that enables it. + Note: google_network_security_tls_inspection_policy resource is still in Beta therefore it will need to import the provider. update: '- Default is 30 minutes.' update_time: |- - @@ -112058,487 +125970,1389 @@ resources: A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". importStatements: [] - google_network_security_security_profile: + google_network_security_intercept_deployment: subCategory: Network security - description: A security profile defines the behavior associated to a profile type. - name: google_network_security_security_profile + description: InterceptDeployment represents the collectors within a Zone and is associated with a deployment group. + name: google_network_security_intercept_deployment title: "" examples: - name: default manifest: |- { - "description": "my description", + "forwarding_rule": "${google_compute_forwarding_rule.forwarding_rule.id}", + "intercept_deployment_group": "${google_network_security_intercept_deployment_group.deployment_group.id}", + "intercept_deployment_id": "example-deployment", "labels": { "foo": "bar" }, - "name": "my-security-profile", - "parent": "organizations/123456789", - "type": "THREAT_PREVENTION" + "location": "us-central1-a", + "provider": "${google-beta}" } - - name: default - manifest: |- - { - "description": "my description", - "name": "my-security-profile", - "parent": "organizations/123456789", - "threat_prevention_profile": [ + references: + forwarding_rule: google_compute_forwarding_rule.forwarding_rule.id + intercept_deployment_group: google_network_security_intercept_deployment_group.deployment_group.id + provider: google-beta + dependencies: + google_compute_forwarding_rule.forwarding_rule: |- { - "severity_overrides": [ - { - "action": "ALLOW", - "severity": "INFORMATIONAL" - }, - { - "action": "DENY", - "severity": "HIGH" - } + "backend_service": "${google_compute_region_backend_service.backend_service.id}", + "ip_protocol": "UDP", + "load_balancing_scheme": "INTERNAL", + "name": "example-fwr", + "network": "${google_compute_network.network.name}", + "ports": [ + 6081 ], - "threat_overrides": [ + "provider": "${google-beta}", + "region": "us-central1", + "subnetwork": "${google_compute_subnetwork.subnetwork.name}" + } + google_compute_network.network: |- + { + "auto_create_subnetworks": false, + "name": "example-network", + "provider": "${google-beta}" + } + google_compute_region_backend_service.backend_service: |- + { + "health_checks": [ + "${google_compute_region_health_check.health_check.id}" + ], + "load_balancing_scheme": "INTERNAL", + "name": "example-bs", + "protocol": "UDP", + "provider": "${google-beta}", + "region": "us-central1" + } + google_compute_region_health_check.health_check: |- + { + "http_health_check": [ { - "action": "ALLOW", - "threat_id": "280647" + "port": 80 } - ] + ], + "name": "example-hc", + "provider": "${google-beta}", + "region": "us-central1" + } + google_compute_subnetwork.subnetwork: |- + { + "ip_cidr_range": "10.1.0.0/16", + "name": "example-subnet", + "network": "${google_compute_network.network.name}", + "provider": "${google-beta}", + "region": "us-central1" + } + google_network_security_intercept_deployment_group.deployment_group: |- + { + "intercept_deployment_group_id": "example-dg", + "location": "global", + "network": "${google_compute_network.network.id}", + "provider": "${google-beta}" } - ], - "type": "THREAT_PREVENTION" - } argumentDocs: create: '- Default is 20 minutes.' create_time: |- - - Time the security profile was created in UTC. + Create time stamp delete: '- Default is 20 minutes.' - description: |- + effective_labels: |- - - (Optional) - An optional description of the security profile. The Max length is 512 characters. - effective_labels: for all of the labels present on the resource. - etag: |- + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + forwarding_rule: |- - - This checksum is computed by the server based on the value of other fields, - and may be sent on update and delete requests to ensure the client has an up-to-date - value before proceeding. - id: '- an identifier for the resource with format {{parent}}/locations/{{location}}/securityProfiles/{{name}}' + (Required) + Immutable. The regional load balancer which the intercepted traffic should be forwarded + to. Format is: + projects/{project}/regions/{region}/forwardingRules/{forwardingRule} + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/interceptDeployments/{{intercept_deployment_id}}' + intercept_deployment_group: |- + - + (Required) + Immutable. The Intercept Deployment Group that this resource is part of. Format is: + projects/{project}/locations/global/interceptDeploymentGroups/{interceptDeploymentGroup} + intercept_deployment_id: |- + - + (Required) + Id of the requesting object + If auto-generating Id server-side, remove this field and + intercept_deployment_id from the method_signature of Create RPC labels: |- - (Optional) - A map of key/value label pairs to assign to the resource. + Optional. Labels as key value pairs + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. location: |- - - - (Optional) - The location of the security profile. - The default value is global. - name: |- - (Required) - The name of the security profile resource. - parent: |- - - - (Optional) - The name of the parent this security profile belongs to. - Format: organizations/{organization_id}. - self_link: |- + Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type networksecurity.googleapis.com/InterceptDeployment. + name: |- - - Server-defined URL of this resource. - severity_overrides.action: |- + Identifier. The name of the InterceptDeployment. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + reconciling: |- - - (Required) - Threat action override. - Possible values are: ALERT, ALLOW, DEFAULT_ACTION, DENY. - severity_overrides.severity: |- + Whether reconciling is in progress, recommended per + https://google.aip.dev/128. + state: |- - - (Required) - Severity level to match. - Possible values are: CRITICAL, HIGH, INFORMATIONAL, LOW, MEDIUM. + Current state of the deployment. + Possible values: + STATE_UNSPECIFIED + ACTIVE + CREATING + DELETING + OUT_OF_SYNC + DELETE_FAILED terraform_labels: |- - The combination of labels configured directly on the resource and default labels configured on the provider. - threat_overrides.action: |- - - - (Required) - Threat action. - Possible values are: ALERT, ALLOW, DEFAULT_ACTION, DENY. - threat_overrides.threat_id: |- - - - (Required) - Vendor-specific ID of a threat to override. - threat_overrides.type: |- - - - (Output) - Type of threat. - threat_prevention_profile: |- - - - (Optional) - The threat prevention configuration for the security profile. - Structure is documented below. - threat_prevention_profile.severity_overrides: |- - - - (Optional) - The configuration for overriding threats actions by severity match. - Structure is documented below. - threat_prevention_profile.threat_overrides: |- - - - (Optional) - The configuration for overriding threats actions by threat id match. - If a threat is matched both by configuration provided in severity overrides - and threat overrides, the threat overrides action is applied. - Structure is documented below. - type: |- - - - (Required) - The type of security profile. - Possible values are: THREAT_PREVENTION. update: '- Default is 20 minutes.' update_time: |- - - Time the security profile was updated in UTC. + Update time stamp importStatements: [] - google_network_security_security_profile_group: + google_network_security_intercept_deployment_group: subCategory: Network security - description: A security profile group defines a container for security profiles. - name: google_network_security_security_profile_group + description: A Deployment Group represents the collector deployments across different zones within an organization. + name: google_network_security_intercept_deployment_group title: "" examples: - name: default manifest: |- { - "description": "my description", + "intercept_deployment_group_id": "example-dg", "labels": { "foo": "bar" }, - "name": "sec-profile-group", - "parent": "organizations/123456789", - "threat_prevention_profile": "${google_network_security_security_profile.security_profile.id}" + "location": "global", + "network": "${google_compute_network.network.id}", + "provider": "${google-beta}" } references: - threat_prevention_profile: google_network_security_security_profile.security_profile.id + network: google_compute_network.network.id + provider: google-beta dependencies: - google_network_security_security_profile.security_profile: |- + google_compute_network.network: |- { - "location": "global", - "name": "sec-profile", - "parent": "organizations/123456789", - "type": "THREAT_PREVENTION" + "auto_create_subnetworks": false, + "name": "example-network", + "provider": "${google-beta}" } argumentDocs: + connected_endpoint_groups: |- + - + Output only. The list of Intercept Endpoint Groups that are connected to this resource. + Structure is documented below. + connected_endpoint_groups.name: |- + - + (Output) + Output only. A connected intercept endpoint group. create: '- Default is 20 minutes.' create_time: |- - - Time the security profile group was created in UTC. + Output only. [Output only] Create time stamp delete: '- Default is 20 minutes.' - description: |- + effective_labels: |- - - (Optional) - An optional description of the profile. The Max length is 512 characters. - effective_labels: for all of the labels present on the resource. - etag: |- + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/interceptDeploymentGroups/{{intercept_deployment_group_id}}' + intercept_deployment_group_id: |- - - This checksum is computed by the server based on the value of other fields, - and may be sent on update and delete requests to ensure the client has an up-to-date - value before proceeding. - id: '- an identifier for the resource with format {{parent}}/locations/{{location}}/securityProfileGroups/{{name}}' + (Required) + Required. Id of the requesting object + If auto-generating Id server-side, remove this field and + intercept_deployment_group_id from the method_signature of Create RPC labels: |- - (Optional) - A map of key/value label pairs to assign to the resource. + Optional. Labels as key value pairs + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. location: |- - - (Optional) - The location of the security profile group. - The default value is global. + (Required) + Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type networksecurity.googleapis.com/InterceptDeploymentGroup. name: |- + - + Output only. Identifier. Then name of the InterceptDeploymentGroup. + network: |- - (Required) - The name of the security profile group resource. - parent: |- + Required. Immutable. The network that is being used for the deployment. Format is: + projects/{project}/global/networks/{network}. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + reconciling: |- - - (Optional) - The name of the parent this security profile group belongs to. - Format: organizations/{organization_id}. + Output only. Whether reconciling is in progress, recommended per + https://google.aip.dev/128. + state: |- + - + Output only. Current state of the deployment group. + Possible values: + STATE_UNSPECIFIED + ACTIVE + CREATING + DELETING terraform_labels: |- - The combination of labels configured directly on the resource and default labels configured on the provider. - threat_prevention_profile: |- - - - (Optional) - Reference to a SecurityProfile with the threat prevention configuration for the SecurityProfileGroup. update: '- Default is 20 minutes.' update_time: |- - - Time the security profile group was updated in UTC. + Output only. [Output only] Update time stamp importStatements: [] - google_network_security_server_tls_policy: + google_network_security_intercept_endpoint_group: subCategory: Network security - description: ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. - name: google_network_security_server_tls_policy + description: An intercept endpoint group is a global resource in the consumer account representing the producer’s deployment group. + name: google_network_security_intercept_endpoint_group title: "" examples: - name: default manifest: |- { - "allow_open": "false", - "description": "my description", - "labels": { - "foo": "bar" - }, - "mtls_policy": [ - { - "client_validation_ca": [ - { - "grpc_endpoint": [ - { - "target_uri": "unix:mypath" - } - ] - }, - { - "grpc_endpoint": [ - { - "target_uri": "unix:abc/mypath" - } - ] - }, - { - "certificate_provider_instance": [ - { - "plugin_instance": "google_cloud_private_spiffe" - } - ] - } - ] - } - ], - "name": "my-server-tls-policy", - "provider": "${google-beta}", - "server_certificate": [ - { - "certificate_provider_instance": [ - { - "plugin_instance": "google_cloud_private_spiffe" - } - ] - } - ] - } - references: - provider: google-beta - - name: default - manifest: |- - { - "allow_open": "false", - "description": "my description", + "intercept_deployment_group": "${google_network_security_intercept_deployment_group.deployment_group.id}", + "intercept_endpoint_group_id": "example-eg", "labels": { "foo": "bar" }, "location": "global", - "mtls_policy": [ - { - "client_validation_mode": "ALLOW_INVALID_OR_MISSING_CLIENT_CERT" - } - ], - "name": "my-server-tls-policy", "provider": "${google-beta}" } references: + intercept_deployment_group: google_network_security_intercept_deployment_group.deployment_group.id provider: google-beta - - name: default - manifest: |- - { - "allow_open": "false", - "description": "my description", - "labels": { - "foo": "bar" - }, - "location": "global", - "name": "my-server-tls-policy", - "provider": "${google-beta}", - "server_certificate": [ + dependencies: + google_compute_network.network: |- { - "grpc_endpoint": [ - { - "target_uri": "unix:mypath" - } - ] + "auto_create_subnetworks": false, + "name": "example-network", + "provider": "${google-beta}" } - ] - } - references: - provider: google-beta + google_network_security_intercept_deployment_group.deployment_group: |- + { + "intercept_deployment_group_id": "example-dg", + "location": "global", + "network": "${google_compute_network.network.id}", + "provider": "${google-beta}" + } + argumentDocs: + create: '- Default is 20 minutes.' + create_time: |- + - + Create time stamp. + delete: '- Default is 20 minutes.' + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/interceptEndpointGroups/{{intercept_endpoint_group_id}}' + intercept_deployment_group: |- + - + (Required) + Immutable. The Intercept Deployment Group that this resource is connected to. Format + is: + projects/{project}/locations/global/interceptDeploymentGroups/{interceptDeploymentGroup} + intercept_endpoint_group_id: |- + - + (Required) + ID of the Intercept Endpoint Group. + labels: |- + - + (Optional) + Optional. Labels as key value pairs + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + The location of the Intercept Endpoint Group, currently restricted to global. + name: |- + - + Identifier. The name of the Intercept Endpoint Group. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + reconciling: |- + - + Whether reconciling is in progress, recommended per + https://google.aip.dev/128. + state: |- + - + Current state of the endpoint group. + Possible values: + STATE_UNSPECIFIED + ACTIVE + CLOSED + CREATING + DELETING + OUT_OF_SYNC + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' + update_time: |- + - + Update time stamp. + importStatements: [] + google_network_security_intercept_endpoint_group_association: + subCategory: Network security + description: Creates an association between a VPC and an Intercept Endpoint Group in order to intercept traffic in that VPC. + name: google_network_security_intercept_endpoint_group_association + title: "" + examples: - name: default manifest: |- { - "allow_open": "false", - "description": "my description", + "intercept_endpoint_group": "${google_network_security_intercept_endpoint_group.endpoint_group.id}", + "intercept_endpoint_group_association_id": "example-ega", "labels": { "foo": "bar" }, "location": "global", - "mtls_policy": [ - { - "client_validation_mode": "REJECT_INVALID", - "client_validation_trust_config": "projects/${data.google_project.project.number}/locations/global/trustConfigs/${google_certificate_manager_trust_config.default.name}" - } - ], - "name": "my-server-tls-policy", + "network": "${google_compute_network.consumer_network.id}", "provider": "${google-beta}" } references: + intercept_endpoint_group: google_network_security_intercept_endpoint_group.endpoint_group.id + network: google_compute_network.consumer_network.id provider: google-beta dependencies: - google_certificate_manager_trust_config.default: |- + google_compute_network.consumer_network: |- { - "description": "sample trust config description", - "labels": { - "foo": "bar" - }, + "auto_create_subnetworks": false, + "name": "example-cons-network", + "provider": "${google-beta}" + } + google_compute_network.producer_network: |- + { + "auto_create_subnetworks": false, + "name": "example-prod-network", + "provider": "${google-beta}" + } + google_network_security_intercept_deployment_group.deployment_group: |- + { + "intercept_deployment_group_id": "example-dg", "location": "global", - "name": "my-trust-config", - "provider": "${google-beta}", - "trust_stores": [ - { - "intermediate_cas": [ - { - "pem_certificate": "${file(\"test-fixtures/ca_cert.pem\")}" - } - ], - "trust_anchors": [ - { - "pem_certificate": "${file(\"test-fixtures/ca_cert.pem\")}" - } - ] - } - ] + "network": "${google_compute_network.producer_network.id}", + "provider": "${google-beta}" + } + google_network_security_intercept_endpoint_group.endpoint_group: |- + { + "intercept_deployment_group": "${google_network_security_intercept_deployment_group.deployment_group.id}", + "intercept_endpoint_group_id": "example-eg", + "location": "global", + "provider": "${google-beta}" } argumentDocs: - allow_open: |- - - - (Optional) - This field applies only for Traffic Director policies. It is must be set to false for external HTTPS load balancer policies. - Determines if server allows plaintext connections. If set to true, server allows plain text connections. By default, it is set to false. This setting is not exclusive of other encryption modes. For example, if allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections. See documentation of other encryption modes to confirm compatibility. - Consider using it if you wish to upgrade in place your deployment to TLS while having mixed TLS and non-TLS traffic reaching port :80. - certificate_provider_instance.plugin_instance: |- - - - (Required) - Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance. - client_validation_ca.certificate_provider_instance: |- - - - (Optional) - Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. - Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported. - Structure is documented below. - client_validation_ca.grpc_endpoint: |- - - - (Optional) - gRPC specific configuration to access the gRPC server to obtain the cert and private key. - Structure is documented below. - create: '- Default is 30 minutes.' + create: '- Default is 20 minutes.' create_time: |- - - Time the ServerTlsPolicy was created in UTC. - delete: '- Default is 30 minutes.' - description: |- - - - (Optional) - A free-text description of the resource. Max length 1024 characters. + Create time stamp. + delete: '- Default is 20 minutes.' effective_labels: |- - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. - grpc_endpoint.target_uri: |- + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/interceptEndpointGroupAssociations/{{intercept_endpoint_group_association_id}}' + intercept_endpoint_group: |- - (Required) - The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". - id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}' + Immutable. The Intercept Endpoint Group that this resource is connected to. Format + is: + projects/{project}/locations/global/interceptEndpointGroups/{interceptEndpointGroup}. + intercept_endpoint_group_association_id: |- + - + (Optional) + ID of the Intercept Endpoint Group Association. labels: |- - (Optional) - Set of label tags associated with the ServerTlsPolicy resource. + Optional. Labels as key value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource. location: |- - - (Optional) - The location of the server tls policy. - The default value is global. - mtls_policy: |- - - - (Optional) - This field is required if the policy is used with external HTTPS load balancers. This field can be empty for Traffic Director. - Defines a mechanism to provision peer validation certificates for peer to peer authentication (Mutual TLS - mTLS). If not specified, client certificate will not be requested. The connection is treated as TLS and not mTLS. If allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections. - Structure is documented below. - mtls_policy.client_validation_ca: |- + (Required) + The location of the Intercept Endpoint Group Association, currently restricted to global. + locations_details: |- - - (Optional) - Required if the policy is to be used with Traffic Director. For external HTTPS load balancers it must be empty. - Defines the mechanism to obtain the Certificate Authority certificate to validate the client certificate. + The list of locations that are currently supported by the associated Intercept Deployment Group and their state. Structure is documented below. - mtls_policy.client_validation_mode: |- + locations_details.location: |- - - (Optional) - When the client presents an invalid certificate or no certificate to the load balancer, the clientValidationMode specifies how the client connection is handled. - Required if the policy is to be used with the external HTTPS load balancing. For Traffic Director it must be empty. - Possible values are: CLIENT_VALIDATION_MODE_UNSPECIFIED, ALLOW_INVALID_OR_MISSING_CLIENT_CERT, REJECT_INVALID. - mtls_policy.client_validation_trust_config: |- + (Output) + Location supported by the Intercept Deployment Group, for example us-central1-a + locations_details.state: |- - - (Optional) - Reference to the TrustConfig from certificatemanager.googleapis.com namespace. - If specified, the chain validation will be performed against certificates configured in the given TrustConfig. - Allowed only if the policy is to be used with external HTTPS load balancers. + (Output) + The association state in this location. + Possible values: + STATE_UNSPECIFIED + ACTIVE + OUT_OF_SYNC name: |- + - + Identifier. The name of the Intercept Endpoint Group Association. + network: |- - (Required) - Name of the ServerTlsPolicy resource. + Immutable. The VPC network associated. Format: + projects/{project}/global/networks/{network}. project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. - server_certificate: |- - - - (Optional) - Defines a mechanism to provision client identity (public and private keys) for peer to peer authentication. The presence of this dictates mTLS. - Structure is documented below. - server_certificate.certificate_provider_instance: |- + reconciling: |- - - (Optional) - Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. - Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported. - Structure is documented below. - server_certificate.grpc_endpoint: |- + Whether reconciling is in progress. + state: |- - - (Optional) - gRPC specific configuration to access the gRPC server to obtain the cert and private key. - Structure is documented below. + Current state of the Intercept Endpoint Group Association. + Possible values: + STATE_UNSPECIFIED + ACTIVE + CREATING + DELETING + CLOSED + OUT_OF_SYNC + DELETE_FAILED terraform_labels: |- - The combination of labels configured directly on the resource and default labels configured on the provider. - update: '- Default is 30 minutes.' + update: '- Default is 20 minutes.' update_time: |- - - Time the ServerTlsPolicy was updated in UTC. + Update time stamp. importStatements: [] - google_network_security_tls_inspection_policy: + google_network_security_mirroring_deployment: subCategory: Network security - description: The TlsInspectionPolicy resource contains references to CA pools in Certificate Authority Service and associated metadata. - name: google_network_security_tls_inspection_policy + description: MirroringDeployment represents the collectors within a Zone and is associated with a deployment group. + name: google_network_security_mirroring_deployment title: "" examples: - name: default manifest: |- { - "ca_pool": "${google_privateca_ca_pool.default.id}", - "depends_on": [ - "${google_privateca_ca_pool.default}", - "${google_privateca_certificate_authority.default}", - "${google_privateca_ca_pool_iam_member.tls_inspection_permission}" - ], - "exclude_public_ca_set": false, - "location": "us-central1", - "name": "my-tls-inspection-policy" - } - references: - ca_pool: google_privateca_ca_pool.default.id + "forwarding_rule": "${google_compute_forwarding_rule.forwarding_rule.id}", + "labels": { + "foo": "bar" + }, + "location": "us-central1-a", + "mirroring_deployment_group": "${google_network_security_mirroring_deployment_group.deployment_group.id}", + "mirroring_deployment_id": "example-deployment", + "provider": "${google-beta}" + } + references: + forwarding_rule: google_compute_forwarding_rule.forwarding_rule.id + mirroring_deployment_group: google_network_security_mirroring_deployment_group.deployment_group.id + provider: google-beta + dependencies: + google_compute_forwarding_rule.forwarding_rule: |- + { + "backend_service": "${google_compute_region_backend_service.backend_service.id}", + "ip_protocol": "UDP", + "is_mirroring_collector": true, + "load_balancing_scheme": "INTERNAL", + "name": "example-fwr", + "network": "${google_compute_network.network.name}", + "ports": [ + 6081 + ], + "provider": "${google-beta}", + "region": "us-central1", + "subnetwork": "${google_compute_subnetwork.subnetwork.name}" + } + google_compute_network.network: |- + { + "auto_create_subnetworks": false, + "name": "example-network", + "provider": "${google-beta}" + } + google_compute_region_backend_service.backend_service: |- + { + "health_checks": [ + "${google_compute_region_health_check.health_check.id}" + ], + "load_balancing_scheme": "INTERNAL", + "name": "example-bs", + "protocol": "UDP", + "provider": "${google-beta}", + "region": "us-central1" + } + google_compute_region_health_check.health_check: |- + { + "http_health_check": [ + { + "port": 80 + } + ], + "name": "example-hc", + "provider": "${google-beta}", + "region": "us-central1" + } + google_compute_subnetwork.subnetwork: |- + { + "ip_cidr_range": "10.1.0.0/16", + "name": "example-subnet", + "network": "${google_compute_network.network.name}", + "provider": "${google-beta}", + "region": "us-central1" + } + google_network_security_mirroring_deployment_group.deployment_group: |- + { + "location": "global", + "mirroring_deployment_group_id": "example-dg", + "network": "${google_compute_network.network.id}", + "provider": "${google-beta}" + } + argumentDocs: + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. [Output only] Create time stamp + delete: '- Default is 20 minutes.' + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + forwarding_rule: |- + - + (Required) + Required. Immutable. The regional load balancer which the mirrored traffic should be forwarded + to. Format is: + projects/{project}/regions/{region}/forwardingRules/{forwardingRule} + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/mirroringDeployments/{{mirroring_deployment_id}}' + labels: |- + - + (Optional) + Optional. Labels as key value pairs + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type networksecurity.googleapis.com/MirroringDeployment. + mirroring_deployment_group: |- + - + (Required) + Required. Immutable. The Mirroring Deployment Group that this resource is part of. Format is: + projects/{project}/locations/global/mirroringDeploymentGroups/{mirroringDeploymentGroup} + mirroring_deployment_id: |- + - + (Required) + Required. Id of the requesting object + If auto-generating Id server-side, remove this field and + mirroring_deployment_id from the method_signature of Create RPC + name: |- + - + Immutable. Identifier. The name of the MirroringDeployment. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + reconciling: |- + - + Output only. Whether reconciling is in progress, recommended per + https://google.aip.dev/128. + state: |- + - + Output only. Current state of the deployment. + Possible values: + STATE_UNSPECIFIED + ACTIVE + CREATING + DELETING + OUT_OF_SYNC + DELETE_FAILED + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. [Output only] Update time stamp + importStatements: [] + google_network_security_mirroring_deployment_group: + subCategory: Network security + description: A Deployment Group represents the collector deployments across different zones within an organization. + name: google_network_security_mirroring_deployment_group + title: "" + examples: + - name: default + manifest: |- + { + "labels": { + "foo": "bar" + }, + "location": "global", + "mirroring_deployment_group_id": "example-dg", + "network": "${google_compute_network.network.id}", + "provider": "${google-beta}" + } + references: + network: google_compute_network.network.id + provider: google-beta + dependencies: + google_compute_network.network: |- + { + "auto_create_subnetworks": false, + "name": "example-network", + "provider": "${google-beta}" + } + argumentDocs: + connected_endpoint_groups: |- + - + Output only. The list of Mirroring Endpoint Groups that are connected to this resource. + Structure is documented below. + connected_endpoint_groups.name: |- + - + (Output) + Output only. A connected mirroring endpoint group. + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. [Output only] Create time stamp + delete: '- Default is 20 minutes.' + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/mirroringDeploymentGroups/{{mirroring_deployment_group_id}}' + labels: |- + - + (Optional) + Optional. Labels as key value pairs + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type networksecurity.googleapis.com/MirroringDeploymentGroup. + mirroring_deployment_group_id: |- + - + (Required) + Required. Id of the requesting object + If auto-generating Id server-side, remove this field and + mirroring_deployment_group_id from the method_signature of Create RPC + name: |- + - + Immutable. Identifier. Then name of the MirroringDeploymentGroup. + network: |- + - + (Required) + Required. Immutable. The network that is being used for the deployment. Format is: + projects/{project}/global/networks/{network}. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + reconciling: |- + - + Output only. Whether reconciling is in progress, recommended per + https://google.aip.dev/128. + state: |- + - + Output only. Current state of the deployment group. + Possible values: + STATE_UNSPECIFIED + ACTIVE + CREATING + DELETING + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. [Output only] Update time stamp + importStatements: [] + google_network_security_mirroring_endpoint_group: + subCategory: Network security + description: A mirroring endpoint group is a global resource in the consumer account representing the producer’s deployment group. + name: google_network_security_mirroring_endpoint_group + title: "" + examples: + - name: default + manifest: |- + { + "labels": { + "foo": "bar" + }, + "location": "global", + "mirroring_deployment_group": "${google_network_security_mirroring_deployment_group.deployment_group.id}", + "mirroring_endpoint_group_id": "example-eg", + "provider": "${google-beta}" + } + references: + mirroring_deployment_group: google_network_security_mirroring_deployment_group.deployment_group.id + provider: google-beta + dependencies: + google_compute_network.network: |- + { + "auto_create_subnetworks": false, + "name": "example-network", + "provider": "${google-beta}" + } + google_network_security_mirroring_deployment_group.deployment_group: |- + { + "location": "global", + "mirroring_deployment_group_id": "example-dg", + "network": "${google_compute_network.network.id}", + "provider": "${google-beta}" + } + argumentDocs: + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. [Output only] Create time stamp + delete: '- Default is 20 minutes.' + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/mirroringEndpointGroups/{{mirroring_endpoint_group_id}}' + labels: |- + - + (Optional) + Optional. Labels as key value pairs + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type networksecurity.googleapis.com/MirroringEndpointGroup. + mirroring_deployment_group: |- + - + (Required) + Required. Immutable. The Mirroring Deployment Group that this resource is connected to. Format + is: + projects/{project}/locations/global/mirroringDeploymentGroups/{mirroringDeploymentGroup} + mirroring_endpoint_group_id: |- + - + (Required) + Required. Id of the requesting object + If auto-generating Id server-side, remove this field and + mirroring_endpoint_group_id from the method_signature of Create RPC + name: |- + - + Immutable. Identifier. The name of the MirroringEndpointGroup. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + reconciling: |- + - + Output only. Whether reconciling is in progress, recommended per + https://google.aip.dev/128. + state: |- + - + Output only. Current state of the endpoint group. + Possible values: + STATE_UNSPECIFIED + ACTIVE + CLOSED + CREATING + DELETING + OUT_OF_SYNC + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. [Output only] Update time stamp + importStatements: [] + google_network_security_mirroring_endpoint_group_association: + subCategory: Network security + description: Creates an association between a VPC and a mirroring endpoint group in order to mirror traffic in that VPC. + name: google_network_security_mirroring_endpoint_group_association + title: "" + examples: + - name: default + manifest: |- + { + "labels": { + "foo": "bar" + }, + "location": "global", + "mirroring_endpoint_group": "${google_network_security_mirroring_endpoint_group.endpoint_group.id}", + "mirroring_endpoint_group_association_id": "example-ega", + "network": "${google_compute_network.consumer_network.id}", + "provider": "${google-beta}" + } + references: + mirroring_endpoint_group: google_network_security_mirroring_endpoint_group.endpoint_group.id + network: google_compute_network.consumer_network.id + provider: google-beta + dependencies: + google_compute_network.consumer_network: |- + { + "auto_create_subnetworks": false, + "name": "example-cons-network", + "provider": "${google-beta}" + } + google_compute_network.producer_network: |- + { + "auto_create_subnetworks": false, + "name": "example-prod-network", + "provider": "${google-beta}" + } + google_network_security_mirroring_deployment_group.deployment_group: |- + { + "location": "global", + "mirroring_deployment_group_id": "example-dg", + "network": "${google_compute_network.producer_network.id}", + "provider": "${google-beta}" + } + google_network_security_mirroring_endpoint_group.endpoint_group: |- + { + "location": "global", + "mirroring_deployment_group": "${google_network_security_mirroring_deployment_group.deployment_group.id}", + "mirroring_endpoint_group_id": "example-eg", + "provider": "${google-beta}" + } + argumentDocs: + create: '- Default is 20 minutes.' + create_time: |- + - + Output only. [Output only] Create time stamp + delete: '- Default is 20 minutes.' + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/mirroringEndpointGroupAssociations/{{mirroring_endpoint_group_association_id}}' + labels: |- + - + (Optional) + Optional. Labels as key value pairs + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type networksecurity.googleapis.com/MirroringEndpointGroupAssociation. + locations_details: |- + - + Output only. The list of locations that this association is in and its details. + Structure is documented below. + locations_details.location: |- + - + (Output) + Output only. The cloud location. + locations_details.state: |- + - + (Output) + Output only. The association state in this location. + Possible values: + STATE_UNSPECIFIED + ACTIVE + OUT_OF_SYNC + mirroring_endpoint_group: |- + - + (Required) + Required. Immutable. The Mirroring Endpoint Group that this resource is connected to. Format + is: + projects/{project}/locations/global/mirroringEndpointGroups/{mirroringEndpointGroup} + mirroring_endpoint_group_association_id: |- + - + (Optional) + Optional. Id of the requesting object + If auto-generating Id server-side, remove this field and + mirroring_endpoint_group_association_id from the method_signature of Create + RPC + name: |- + - + Immutable. Identifier. The name of the MirroringEndpointGroupAssociation. + network: |- + - + (Required) + Required. Immutable. The VPC network associated. Format: + projects/{project}/global/networks/{network}. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + reconciling: |- + - + Output only. Whether reconciling is in progress, recommended per + https://google.aip.dev/128. + state: |- + - + Output only. Current state of the endpoint group association. + Possible values: + STATE_UNSPECIFIED + ACTIVE + CREATING + DELETING + CLOSED + OUT_OF_SYNC + DELETE_FAILED + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' + update_time: |- + - + Output only. [Output only] Update time stamp + importStatements: [] + google_network_security_security_profile: + subCategory: Network security + description: A security profile defines the behavior associated to a profile type. + name: google_network_security_security_profile + title: "" + examples: + - name: default + manifest: |- + { + "description": "my description", + "labels": { + "foo": "bar" + }, + "name": "my-security-profile", + "parent": "organizations/123456789", + "type": "THREAT_PREVENTION" + } + - name: default + manifest: |- + { + "description": "my description", + "name": "my-security-profile", + "parent": "organizations/123456789", + "threat_prevention_profile": [ + { + "severity_overrides": [ + { + "action": "ALLOW", + "severity": "INFORMATIONAL" + }, + { + "action": "DENY", + "severity": "HIGH" + } + ], + "threat_overrides": [ + { + "action": "ALLOW", + "threat_id": "280647" + } + ] + } + ], + "type": "THREAT_PREVENTION" + } + argumentDocs: + create: '- Default is 20 minutes.' + create_time: |- + - + Time the security profile was created in UTC. + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + An optional description of the security profile. The Max length is 512 characters. + effective_labels: for all of the labels present on the resource. + etag: |- + - + This checksum is computed by the server based on the value of other fields, + and may be sent on update and delete requests to ensure the client has an up-to-date + value before proceeding. + id: '- an identifier for the resource with format {{parent}}/locations/{{location}}/securityProfiles/{{name}}' + labels: |- + - + (Optional) + A map of key/value label pairs to assign to the resource. + location: |- + - + (Optional) + The location of the security profile. + The default value is global. + name: |- + - + (Required) + The name of the security profile resource. + parent: |- + - + (Optional) + The name of the parent this security profile belongs to. + Format: organizations/{organization_id}. + self_link: |- + - + Server-defined URL of this resource. + severity_overrides.action: |- + - + (Required) + Threat action override. + Possible values are: ALERT, ALLOW, DEFAULT_ACTION, DENY. + severity_overrides.severity: |- + - + (Required) + Severity level to match. + Possible values are: CRITICAL, HIGH, INFORMATIONAL, LOW, MEDIUM. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + threat_overrides.action: |- + - + (Required) + Threat action. + Possible values are: ALERT, ALLOW, DEFAULT_ACTION, DENY. + threat_overrides.threat_id: |- + - + (Required) + Vendor-specific ID of a threat to override. + threat_overrides.type: |- + - + (Output) + Type of threat. + threat_prevention_profile: |- + - + (Optional) + The threat prevention configuration for the security profile. + Structure is documented below. + threat_prevention_profile.severity_overrides: |- + - + (Optional) + The configuration for overriding threats actions by severity match. + Structure is documented below. + threat_prevention_profile.threat_overrides: |- + - + (Optional) + The configuration for overriding threats actions by threat id match. + If a threat is matched both by configuration provided in severity overrides + and threat overrides, the threat overrides action is applied. + Structure is documented below. + type: |- + - + (Required) + The type of security profile. + Possible values are: THREAT_PREVENTION. + update: '- Default is 20 minutes.' + update_time: |- + - + Time the security profile was updated in UTC. + importStatements: [] + google_network_security_security_profile_group: + subCategory: Network security + description: A security profile group defines a container for security profiles. + name: google_network_security_security_profile_group + title: "" + examples: + - name: default + manifest: |- + { + "description": "my description", + "labels": { + "foo": "bar" + }, + "name": "sec-profile-group", + "parent": "organizations/123456789", + "threat_prevention_profile": "${google_network_security_security_profile.security_profile.id}" + } + references: + threat_prevention_profile: google_network_security_security_profile.security_profile.id + dependencies: + google_network_security_security_profile.security_profile: |- + { + "location": "global", + "name": "sec-profile", + "parent": "organizations/123456789", + "type": "THREAT_PREVENTION" + } + argumentDocs: + create: '- Default is 20 minutes.' + create_time: |- + - + Time the security profile group was created in UTC. + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + An optional description of the profile. The Max length is 512 characters. + effective_labels: for all of the labels present on the resource. + etag: |- + - + This checksum is computed by the server based on the value of other fields, + and may be sent on update and delete requests to ensure the client has an up-to-date + value before proceeding. + id: '- an identifier for the resource with format {{parent}}/locations/{{location}}/securityProfileGroups/{{name}}' + labels: |- + - + (Optional) + A map of key/value label pairs to assign to the resource. + location: |- + - + (Optional) + The location of the security profile group. + The default value is global. + name: |- + - + (Required) + The name of the security profile group resource. + parent: |- + - + (Optional) + The name of the parent this security profile group belongs to. + Format: organizations/{organization_id}. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + threat_prevention_profile: |- + - + (Optional) + Reference to a SecurityProfile with the threat prevention configuration for the SecurityProfileGroup. + update: '- Default is 20 minutes.' + update_time: |- + - + Time the security profile group was updated in UTC. + importStatements: [] + google_network_security_server_tls_policy: + subCategory: Network security + description: ServerTlsPolicy is a resource that specifies how a server should authenticate incoming requests. + name: google_network_security_server_tls_policy + title: "" + examples: + - name: default + manifest: |- + { + "allow_open": "false", + "description": "my description", + "labels": { + "foo": "bar" + }, + "mtls_policy": [ + { + "client_validation_ca": [ + { + "grpc_endpoint": [ + { + "target_uri": "unix:mypath" + } + ] + } + ] + } + ], + "name": "my-server-tls-policy", + "server_certificate": [ + { + "certificate_provider_instance": [ + { + "plugin_instance": "google_cloud_private_spiffe" + } + ] + } + ] + } + - name: default + manifest: |- + { + "allow_open": "false", + "description": "my description", + "labels": { + "foo": "bar" + }, + "location": "global", + "mtls_policy": [ + { + "client_validation_mode": "ALLOW_INVALID_OR_MISSING_CLIENT_CERT" + } + ], + "name": "my-server-tls-policy" + } + - name: default + manifest: |- + { + "allow_open": "false", + "description": "my description", + "labels": { + "foo": "bar" + }, + "location": "global", + "name": "my-server-tls-policy", + "server_certificate": [ + { + "grpc_endpoint": [ + { + "target_uri": "unix:mypath" + } + ] + } + ] + } + - name: default + manifest: |- + { + "allow_open": "false", + "description": "my description", + "labels": { + "foo": "bar" + }, + "location": "global", + "mtls_policy": [ + { + "client_validation_mode": "REJECT_INVALID", + "client_validation_trust_config": "projects/${data.google_project.project.number}/locations/global/trustConfigs/${google_certificate_manager_trust_config.default.name}" + } + ], + "name": "my-server-tls-policy" + } + dependencies: + google_certificate_manager_trust_config.default: |- + { + "description": "sample trust config description", + "labels": { + "foo": "bar" + }, + "location": "global", + "name": "my-trust-config", + "trust_stores": [ + { + "intermediate_cas": [ + { + "pem_certificate": "${file(\"test-fixtures/ca_cert.pem\")}" + } + ], + "trust_anchors": [ + { + "pem_certificate": "${file(\"test-fixtures/ca_cert.pem\")}" + } + ] + } + ] + } + argumentDocs: + allow_open: |- + - + (Optional) + This field applies only for Traffic Director policies. It is must be set to false for external HTTPS load balancer policies. + Determines if server allows plaintext connections. If set to true, server allows plain text connections. By default, it is set to false. This setting is not exclusive of other encryption modes. For example, if allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections. See documentation of other encryption modes to confirm compatibility. + Consider using it if you wish to upgrade in place your deployment to TLS while having mixed TLS and non-TLS traffic reaching port :80. + certificate_provider_instance.plugin_instance: |- + - + (Required) + Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance. + client_validation_ca.certificate_provider_instance: |- + - + (Optional) + Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. + Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported. + Structure is documented below. + client_validation_ca.grpc_endpoint: |- + - + (Optional) + gRPC specific configuration to access the gRPC server to obtain the cert and private key. + Structure is documented below. + create: '- Default is 30 minutes.' + create_time: |- + - + Time the ServerTlsPolicy was created in UTC. + delete: '- Default is 30 minutes.' + description: |- + - + (Optional) + A free-text description of the resource. Max length 1024 characters. + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + grpc_endpoint.target_uri: |- + - + (Required) + The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}' + labels: |- + - + (Optional) + Set of label tags associated with the ServerTlsPolicy resource. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Optional) + The location of the server tls policy. + The default value is global. + mtls_policy: |- + - + (Optional) + This field is required if the policy is used with external HTTPS load balancers. This field can be empty for Traffic Director. + Defines a mechanism to provision peer validation certificates for peer to peer authentication (Mutual TLS - mTLS). If not specified, client certificate will not be requested. The connection is treated as TLS and not mTLS. If allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections. + Structure is documented below. + mtls_policy.client_validation_ca: |- + - + (Optional) + Required if the policy is to be used with Traffic Director. For external HTTPS load balancers it must be empty. + Defines the mechanism to obtain the Certificate Authority certificate to validate the client certificate. + Structure is documented below. + mtls_policy.client_validation_mode: |- + - + (Optional) + When the client presents an invalid certificate or no certificate to the load balancer, the clientValidationMode specifies how the client connection is handled. + Required if the policy is to be used with the external HTTPS load balancing. For Traffic Director it must be empty. + Possible values are: CLIENT_VALIDATION_MODE_UNSPECIFIED, ALLOW_INVALID_OR_MISSING_CLIENT_CERT, REJECT_INVALID. + mtls_policy.client_validation_trust_config: |- + - + (Optional) + Reference to the TrustConfig from certificatemanager.googleapis.com namespace. + If specified, the chain validation will be performed against certificates configured in the given TrustConfig. + Allowed only if the policy is to be used with external HTTPS load balancers. + name: |- + - + (Required) + Name of the ServerTlsPolicy resource. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + server_certificate: |- + - + (Optional) + Defines a mechanism to provision client identity (public and private keys) for peer to peer authentication. The presence of this dictates mTLS. + Structure is documented below. + server_certificate.certificate_provider_instance: |- + - + (Optional) + Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. + Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported. + Structure is documented below. + server_certificate.grpc_endpoint: |- + - + (Optional) + gRPC specific configuration to access the gRPC server to obtain the cert and private key. + Structure is documented below. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 30 minutes.' + update_time: |- + - + Time the ServerTlsPolicy was updated in UTC. + importStatements: [] + google_network_security_tls_inspection_policy: + subCategory: Network security + description: The TlsInspectionPolicy resource contains references to CA pools in Certificate Authority Service and associated metadata. + name: google_network_security_tls_inspection_policy + title: "" + examples: + - name: default + manifest: |- + { + "ca_pool": "${google_privateca_ca_pool.default.id}", + "depends_on": [ + "${google_privateca_ca_pool.default}", + "${google_privateca_certificate_authority.default}", + "${google_privateca_ca_pool_iam_member.tls_inspection_permission}" + ], + "exclude_public_ca_set": false, + "location": "us-central1", + "name": "my-tls-inspection-policy" + } + references: + ca_pool: google_privateca_ca_pool.default.id dependencies: google_privateca_ca_pool.default: |- { @@ -112919,6 +127733,116 @@ resources: (Required) FQDNs and URLs. importStatements: [] + google_network_services_authz_extension: + subCategory: Network services + description: AuthzExtension is a resource that allows traffic forwarding to a callout backend service to make an authorization decision. + name: google_network_services_authz_extension + title: "" + examples: + - name: default + manifest: |- + { + "authority": "ext11.com", + "description": "my description", + "fail_open": false, + "forward_headers": [ + "Authorization" + ], + "load_balancing_scheme": "INTERNAL_MANAGED", + "location": "us-west1", + "name": "my-authz-ext", + "project": "my-project-name", + "service": "${google_compute_region_backend_service.default.self_link}", + "timeout": "0.1s" + } + references: + service: google_compute_region_backend_service.default.self_link + dependencies: + google_compute_region_backend_service.default: |- + { + "load_balancing_scheme": "INTERNAL_MANAGED", + "name": "authz-service", + "port_name": "grpc", + "project": "my-project-name", + "protocol": "HTTP2", + "region": "us-west1" + } + argumentDocs: + authority: |- + - + (Required) + The :authority header in the gRPC request sent from Envoy to the extension service. + create: '- Default is 30 minutes.' + create_time: |- + - + The timestamp when the resource was created. + delete: '- Default is 30 minutes.' + description: |- + - + (Optional) + A human-readable description of the resource. + effective_labels: for all of the labels present on the resource. + fail_open: |- + - + (Optional) + Determines how the proxy behaves if the call to the extension fails or times out. + When set to TRUE, request or response processing continues without error. Any subsequent extensions in the extension chain are also executed. When set to FALSE or the default setting of FALSE is used, one of the following happens: + forward_headers: |- + - + (Optional) + List of the HTTP headers to forward to the extension (from the client). If omitted, all headers are sent. Each element is a string indicating the header name. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/authzExtensions/{{name}}' + labels: |- + - + (Optional) + Set of labels associated with the AuthzExtension resource. + load_balancing_scheme: |- + - + (Required) + All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. + For more information, refer to Backend services overview. + Possible values are: INTERNAL_MANAGED, EXTERNAL_MANAGED. + location: |- + - + (Required) + The location of the resource. + metadata: |- + - + (Optional) + The metadata provided here is included as part of the metadata_context (of type google.protobuf.Struct) in the ProcessingRequest message sent to the extension server. The metadata is available under the namespace com.google.authz_extension.. The following variables are supported in the metadata Struct: + {forwarding_rule_id} - substituted with the forwarding rule's fully qualified resource name. + name: |- + - + (Required) + Identifier. Name of the AuthzExtension resource. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + service: |- + - + (Required) + The reference to the service that runs the extension. + To configure a callout extension, service must be a fully-qualified reference to a backend service in the format: + https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService} or https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + timeout: |- + - + (Required) + Specifies the timeout for each individual message on the stream. The timeout must be between 10-10000 milliseconds. + update: '- Default is 30 minutes.' + update_time: |- + - + The timestamp when the resource was updated. + wire_format: |- + - + (Optional) + The format of communication supported by the callout extension. + Default value is EXT_PROC_GRPC. + Possible values are: WIRE_FORMAT_UNSPECIFIED, EXT_PROC_GRPC. + importStatements: [] google_network_services_edge_cache_keyset: subCategory: Network services description: EdgeCacheKeyset represents a collection of public keys used for validating signed requests. @@ -114932,6 +129856,11 @@ resources: project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + routing_mode: |- + - + (Optional) + The routing mode of the Gateway. This field is configurable only for gateways of type SECURE_WEB_GATEWAY. This field is required for gateways of type SECURE_WEB_GATEWAY. + Possible values are: NEXT_HOP_ROUTING_MODE. scope: |- - (Optional) @@ -118899,6 +133828,1445 @@ resources: [projects|organizations]/{parent-name}/roles/{role-name}. runtime_name: '- (Required) Used to find the parent resource to bind the IAM policy to' importStatements: [] + google_oracle_database_autonomous_database: + subCategory: Oracle Database + description: An AutonomousDatabase resource. + name: google_oracle_database_autonomous_database + title: "" + examples: + - name: myADB + manifest: |- + { + "admin_password": "123Abpassword", + "autonomous_database_id": "my-instance", + "cidr": "10.5.0.0/24", + "database": "mydatabase", + "deletion_protection": "true", + "location": "us-east4", + "network": "${data.google_compute_network.default.id}", + "project": "my-project", + "properties": [ + { + "compute_count": "2", + "data_storage_size_tb": "1", + "db_version": "19c", + "db_workload": "OLTP", + "license_type": "LICENSE_INCLUDED" + } + ] + } + references: + network: data.google_compute_network.default.id + - name: myADB + manifest: |- + { + "admin_password": "123Abpassword", + "autonomous_database_id": "my-instance", + "cidr": "10.5.0.0/24", + "database": "mydatabase", + "deletion_protection": "true", + "display_name": "autonomousDatabase displayname", + "labels": { + "label-one": "value-one" + }, + "location": "us-east4", + "network": "${data.google_compute_network.default.id}", + "project": "my-project", + "properties": [ + { + "backup_retention_period_days": "60", + "character_set": "AL32UTF8", + "compute_count": "2", + "customer_contacts": [ + { + "email": "xyz@example.com" + } + ], + "data_storage_size_gb": "48", + "db_edition": "STANDARD_EDITION", + "db_version": "19c", + "db_workload": "OLTP", + "is_auto_scaling_enabled": "true", + "is_storage_auto_scaling_enabled": "false", + "license_type": "BRING_YOUR_OWN_LICENSE", + "maintenance_schedule_type": "REGULAR", + "mtls_connection_required": "false", + "n_character_set": "AL16UTF16", + "operations_insights_state": "NOT_ENABLED", + "private_endpoint_ip": "10.5.0.11", + "private_endpoint_label": "myendpoint" + } + ] + } + references: + network: data.google_compute_network.default.id + argumentDocs: + all_connection_strings.high: |- + - + (Output) + The database service provides the highest level of resources to each SQL + statement. + all_connection_strings.low: |- + - + (Output) + The database service provides the least level of resources to each SQL + statement. + all_connection_strings.medium: |- + - + (Output) + The database service provides a lower level of resources to each SQL + statement. + apex_details.apex_version: |- + - + (Output) + The Oracle APEX Application Development version. + apex_details.ords_version: |- + - + (Output) + The Oracle REST Data Services (ORDS) version. + autonomous_database_id: |- + - + (Required) + The ID of the Autonomous Database to create. This value is restricted + to (^a-z?$) and must be a maximum of 63 + characters in length. The value must start with a letter and end with + a letter or a number. + cidr: |- + - + (Required) + The subnet CIDR range for the Autonmous Database. + connection_strings.all_connection_strings: |- + - + (Output) + A list of all connection strings that can be used to connect to the + Autonomous Database. + Structure is documented below. + connection_strings.dedicated: |- + - + (Output) + The database service provides the least level of resources to each SQL + statement, but supports the most number of concurrent SQL statements. + connection_strings.high: |- + - + (Output) + The database service provides the highest level of resources to each SQL + statement. + connection_strings.low: |- + - + (Output) + The database service provides the least level of resources to each SQL + statement. + connection_strings.medium: |- + - + (Output) + The database service provides a lower level of resources to each SQL + statement. + connection_strings.profiles: |- + - + (Output) + A list of connection string profiles to allow clients to group, filter, and + select values based on the structured metadata. + Structure is documented below. + connection_urls.apex_uri: |- + - + (Output) + Oracle Application Express (APEX) URL. + connection_urls.database_transforms_uri: |- + - + (Output) + The URL of the Database Transforms for the Autonomous Database. + connection_urls.graph_studio_uri: |- + - + (Output) + The URL of the Graph Studio for the Autonomous Database. + connection_urls.machine_learning_notebook_uri: |- + - + (Output) + The URL of the Oracle Machine Learning (OML) Notebook for the Autonomous + Database. + connection_urls.machine_learning_user_management_uri: |- + - + (Output) + The URL of Machine Learning user management the Autonomous Database. + connection_urls.mongo_db_uri: |- + - + (Output) + The URL of the MongoDB API for the Autonomous Database. + connection_urls.ords_uri: |- + - + (Output) + The Oracle REST Data Services (ORDS) URL of the Web Access for the + Autonomous Database. + connection_urls.sql_dev_web_uri: |- + - + (Output) + The URL of the Oracle SQL Developer Web for the Autonomous Database. + create: '- Default is 240 minutes.' + create_time: |- + - + The date and time that the Autonomous Database was created. + customer_contacts.email: |- + - + (Required) + The email address used by Oracle to send notifications regarding databases + and infrastructure. + database: |- + - + (Required) + The name of the Autonomous Database. The database name must be unique in + the project. The name must begin with a letter and can + contain a maximum of 30 alphanumeric characters. + delete: '- Default is 120 minutes.' + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + entitlement_id: |- + - + The ID of the subscription entitlement associated with the Autonomous + Database. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/autonomousDatabases/{{autonomous_database_id}}' + local_standby_db.data_guard_role_changed_time: |- + - + (Output) + The date and time the Autonomous Data Guard role was switched for the + standby Autonomous Database. + local_standby_db.disaster_recovery_role_changed_time: |- + - + (Output) + The date and time the Disaster Recovery role was switched for the standby + Autonomous Database. + local_standby_db.lag_time_duration: |- + - + (Output) + The amount of time, in seconds, that the data of the standby database lags + in comparison to the data of the primary database. + local_standby_db.lifecycle_details: |- + - + (Output) + The additional details about the current lifecycle state of the + Autonomous Database. + local_standby_db.state: |- + - + (Output) + Possible values: + STATE_UNSPECIFIED + PROVISIONING + AVAILABLE + STOPPING + STOPPED + STARTING + TERMINATING + TERMINATED + UNAVAILABLE + RESTORE_IN_PROGRESS + RESTORE_FAILED + BACKUP_IN_PROGRESS + SCALE_IN_PROGRESS + AVAILABLE_NEEDS_ATTENTION + UPDATING + MAINTENANCE_IN_PROGRESS + RESTARTING + RECREATING + ROLE_CHANGE_IN_PROGRESS + UPGRADING + INACCESSIBLE + STANDBY + location: |- + - + (Required) + Resource ID segment making up resource name. See documentation for resource type oracledatabase.googleapis.com/AutonomousDatabaseBackup. + name: |- + - + Identifier. The name of the Autonomous Database resource in the following format: + projects/{project}/locations/{region}/autonomousDatabases/{autonomous_database} + network: |- + - + (Required) + The name of the VPC network used by the Autonomous Database. + Format: projects/{project}/global/networks/{network} + profiles.consumer_group: |- + - + (Output) + The current consumer group being used by the connection. + Possible values: + CONSUMER_GROUP_UNSPECIFIED + HIGH + MEDIUM + LOW + TP + TPURGENT + profiles.display_name: |- + - + (Output) + The display name for the database connection. + profiles.host_format: |- + - + (Output) + The host name format being currently used in connection string. + Possible values: + HOST_FORMAT_UNSPECIFIED + FQDN + IP + profiles.is_regional: |- + - + (Output) + This field indicates if the connection string is regional and is only + applicable for cross-region Data Guard. + profiles.protocol: |- + - + (Output) + The protocol being used by the connection. + Possible values: + PROTOCOL_UNSPECIFIED + TCP + TCPS + profiles.session_mode: |- + - + (Output) + The current session mode of the connection. + Possible values: + SESSION_MODE_UNSPECIFIED + DIRECT + INDIRECT + profiles.syntax_format: |- + - + (Output) + The syntax of the connection string. + Possible values: + SYNTAX_FORMAT_UNSPECIFIED + LONG + EZCONNECT + EZCONNECTPLUS + profiles.tls_authentication: |- + - + (Output) + This field indicates the TLS authentication type of the connection. + Possible values: + TLS_AUTHENTICATION_UNSPECIFIED + SERVER + MUTUAL + profiles.value: |- + - + (Output) + The value of the connection string. + properties: |- + - + (Required) + The properties of an Autonomous Database. + Structure is documented below. + properties.actual_used_data_storage_size_tb: |- + - + (Output) + The amount of storage currently being used for user and system data, in + terabytes. + properties.allocated_storage_size_tb: |- + - + (Output) + The amount of storage currently allocated for the database tables and + billed for, rounded up in terabytes. + properties.apex_details: |- + - + (Output) + Oracle APEX Application Development. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseApex + Structure is documented below. + properties.are_primary_allowlisted_ips_used: |- + - + (Output) + This field indicates the status of Data Guard and Access control for the + Autonomous Database. The field's value is null if Data Guard is disabled + or Access Control is disabled. The field's value is TRUE if both Data Guard + and Access Control are enabled, and the Autonomous Database is using + primary IP access control list (ACL) for standby. The field's value is + FALSE if both Data Guard and Access Control are enabled, and the Autonomous + Database is using a different IP access control list (ACL) for standby + compared to primary. + properties.autonomous_container_database_id: |- + - + (Output) + The Autonomous Container Database OCID. + properties.available_upgrade_versions: |- + - + (Output) + The list of available Oracle Database upgrade versions for an Autonomous + Database. + properties.backup_retention_period_days: |- + - + (Optional) + The retention period for the Autonomous Database. This field is specified + in days, can range from 1 day to 60 days, and has a default value of + 60 days. + properties.character_set: |- + - + (Optional) + The character set for the Autonomous Database. The default is AL32UTF8. + properties.compute_count: |- + - + (Optional) + The number of compute servers for the Autonomous Database. + properties.connection_strings: |- + - + (Output) + The connection string used to connect to the Autonomous Database. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionStrings + Structure is documented below. + properties.connection_urls: |- + - + (Output) + The URLs for accessing Oracle Application Express (APEX) and SQL Developer + Web with a browser from a Compute instance. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionUrls + Structure is documented below. + properties.customer_contacts: |- + - + (Optional) + The list of customer contacts. + Structure is documented below. + properties.data_safe_state: |- + - + (Output) + The current state of the Data Safe registration for the + Autonomous Database. + Possible values: + DATA_SAFE_STATE_UNSPECIFIED + REGISTERING + REGISTERED + DEREGISTERING + NOT_REGISTERED + FAILED + properties.data_storage_size_gb: |- + - + (Optional) + The size of the data stored in the database, in gigabytes. + properties.data_storage_size_tb: |- + - + (Optional) + The size of the data stored in the database, in terabytes. + properties.database_management_state: |- + - + (Output) + The current state of database management for the Autonomous Database. + Possible values: + DATABASE_MANAGEMENT_STATE_UNSPECIFIED + ENABLING + ENABLED + DISABLING + NOT_ENABLED + FAILED_ENABLING + FAILED_DISABLING + properties.db_edition: |- + - + (Optional) + The edition of the Autonomous Databases. + Possible values: + DATABASE_EDITION_UNSPECIFIED + STANDARD_EDITION + ENTERPRISE_EDITION + properties.db_version: |- + - + (Optional) + The Oracle Database version for the Autonomous Database. + properties.db_workload: |- + - + (Required) + Possible values: + DB_WORKLOAD_UNSPECIFIED + OLTP + DW + AJD + APEX + properties.failed_data_recovery_duration: |- + - + (Output) + This field indicates the number of seconds of data loss during a Data + Guard failover. + properties.is_auto_scaling_enabled: |- + - + (Optional) + This field indicates if auto scaling is enabled for the Autonomous Database + CPU core count. + properties.is_local_data_guard_enabled: |- + - + (Output) + This field indicates whether the Autonomous Database has local (in-region) + Data Guard enabled. + properties.is_storage_auto_scaling_enabled: |- + - + (Optional) + This field indicates if auto scaling is enabled for the Autonomous Database + storage. + properties.license_type: |- + - + (Required) + The license type used for the Autonomous Database. + Possible values: + LICENSE_TYPE_UNSPECIFIED + LICENSE_INCLUDED + BRING_YOUR_OWN_LICENSE + properties.lifecycle_details: |- + - + (Output) + The details of the current lifestyle state of the Autonomous Database. + properties.local_adg_auto_failover_max_data_loss_limit: |- + - + (Output) + This field indicates the maximum data loss limit for an Autonomous + Database, in seconds. + properties.local_disaster_recovery_type: |- + - + (Output) + This field indicates the local disaster recovery (DR) type of an + Autonomous Database. + Possible values: + LOCAL_DISASTER_RECOVERY_TYPE_UNSPECIFIED + ADG + BACKUP_BASED + properties.local_standby_db: |- + - + (Output) + Autonomous Data Guard standby database details. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseStandbySummary + Structure is documented below. + properties.maintenance_begin_time: |- + - + (Output) + The date and time when maintenance will begin. + properties.maintenance_end_time: |- + - + (Output) + The date and time when maintenance will end. + properties.maintenance_schedule_type: |- + - + (Optional) + The maintenance schedule of the Autonomous Database. + Possible values: + MAINTENANCE_SCHEDULE_TYPE_UNSPECIFIED + EARLY + REGULAR + properties.memory_per_oracle_compute_unit_gbs: |- + - + (Output) + The amount of memory enabled per ECPU, in gigabytes. + properties.memory_table_gbs: |- + - + (Output) + The memory assigned to in-memory tables in an Autonomous Database. + properties.mtls_connection_required: |- + - + (Optional) + This field specifies if the Autonomous Database requires mTLS connections. + properties.n_character_set: |- + - + (Optional) + The national character set for the Autonomous Database. The default is + AL16UTF16. + properties.next_long_term_backup_time: |- + - + (Output) + The long term backup schedule of the Autonomous Database. + properties.oci_url: |- + - + (Output) + The Oracle Cloud Infrastructure link for the Autonomous Database. + properties.ocid: |- + - + (Output) + OCID of the Autonomous Database. + https://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle + properties.open_mode: |- + - + (Output) + This field indicates the current mode of the Autonomous Database. + Possible values: + OPEN_MODE_UNSPECIFIED + READ_ONLY + READ_WRITE + properties.operations_insights_state: |- + - + (Optional) + Possible values: + OPERATIONS_INSIGHTS_STATE_UNSPECIFIED + ENABLING + ENABLED + DISABLING + NOT_ENABLED + FAILED_ENABLING + FAILED_DISABLING + properties.peer_db_ids: |- + - + (Output) + The list of OCIDs of standby databases located in Autonomous Data Guard + remote regions that are associated with the source database. + properties.permission_level: |- + - + (Output) + The permission level of the Autonomous Database. + Possible values: + PERMISSION_LEVEL_UNSPECIFIED + RESTRICTED + UNRESTRICTED + properties.private_endpoint: |- + - + (Output) + The private endpoint for the Autonomous Database. + properties.private_endpoint_ip: |- + - + (Optional) + The private endpoint IP address for the Autonomous Database. + properties.private_endpoint_label: |- + - + (Optional) + The private endpoint label for the Autonomous Database. + properties.refreshable_mode: |- + - + (Output) + The refresh mode of the cloned Autonomous Database. + Possible values: + REFRESHABLE_MODE_UNSPECIFIED + AUTOMATIC + MANUAL + properties.refreshable_state: |- + - + (Output) + The refresh State of the clone. + Possible values: + REFRESHABLE_STATE_UNSPECIFIED + REFRESHING + NOT_REFRESHING + properties.role: |- + - + (Output) + The Data Guard role of the Autonomous Database. + Possible values: + ROLE_UNSPECIFIED + PRIMARY + STANDBY + DISABLED_STANDBY + BACKUP_COPY + SNAPSHOT_STANDBY + properties.scheduled_operation_details: |- + - + (Output) + The list and details of the scheduled operations of the Autonomous + Database. + Structure is documented below. + properties.sql_web_developer_url: |- + - + (Output) + The SQL Web Developer URL for the Autonomous Database. + properties.state: |- + - + (Output) + Possible values: + STATE_UNSPECIFIED + PROVISIONING + AVAILABLE + STOPPING + STOPPED + STARTING + TERMINATING + TERMINATED + UNAVAILABLE + RESTORE_IN_PROGRESS + RESTORE_FAILED + BACKUP_IN_PROGRESS + SCALE_IN_PROGRESS + AVAILABLE_NEEDS_ATTENTION + UPDATING + MAINTENANCE_IN_PROGRESS + RESTARTING + RECREATING + ROLE_CHANGE_IN_PROGRESS + UPGRADING + INACCESSIBLE + STANDBY + properties.supported_clone_regions: |- + - + (Output) + The list of available regions that can be used to create a clone for the + Autonomous Database. + properties.total_auto_backup_storage_size_gbs: |- + - + (Output) + The storage space used by automatic backups of Autonomous Database, in + gigabytes. + properties.used_data_storage_size_tbs: |- + - + (Output) + The storage space used by Autonomous Database, in gigabytes. + scheduled_operation_details.day_of_week: |- + - + (Output) + Possible values: + DAY_OF_WEEK_UNSPECIFIED + MONDAY + TUESDAY + WEDNESDAY + THURSDAY + FRIDAY + SATURDAY + SUNDAY + scheduled_operation_details.start_time: |- + - + (Output) + Represents a time of day. The date and time zone are either not significant + or are specified elsewhere. An API may choose to allow leap seconds. Related + types are google.type.Date and google.protobuf.Timestamp. + Structure is documented below. + scheduled_operation_details.stop_time: |- + - + (Output) + Represents a time of day. The date and time zone are either not significant + or are specified elsewhere. An API may choose to allow leap seconds. Related + types are google.type.Date and google.protobuf.Timestamp. + Structure is documented below. + start_time.hours: |- + - + (Output) + Hours of day in 24 hour format. Should be from 0 to 23. An API may choose + to allow the value "24:00:00" for scenarios like business closing time. + start_time.minutes: |- + - + (Output) + Minutes of hour of day. Must be from 0 to 59. + start_time.nanos: |- + - + (Output) + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + start_time.seconds: |- + - + (Output) + Seconds of minutes of the time. Must normally be from 0 to 59. An API may + allow the value 60 if it allows leap-seconds. + stop_time.admin_password: |- + - + (Optional) + The password for the default ADMIN user. + stop_time.deletion_protection: '- (Optional) Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail.' + stop_time.display_name: |- + - + (Optional) + The display name for the Autonomous Database. The name does not have to + be unique within your project. + stop_time.hours: |- + - + (Output) + Hours of day in 24 hour format. Should be from 0 to 23. An API may choose + to allow the value "24:00:00" for scenarios like business closing time. + stop_time.labels: |- + - + (Optional) + The labels or tags associated with the Autonomous Database. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + stop_time.minutes: |- + - + (Output) + Minutes of hour of day. Must be from 0 to 59. + stop_time.nanos: |- + - + (Output) + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + stop_time.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + stop_time.seconds: |- + - + (Output) + Seconds of minutes of the time. Must normally be from 0 to 59. An API may + allow the value 60 if it allows leap-seconds. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 120 minutes.' + importStatements: [] + google_oracle_database_cloud_exadata_infrastructure: + subCategory: Oracle Database + description: A CloudExadataInfrastructure resource. + name: google_oracle_database_cloud_exadata_infrastructure + title: "" + examples: + - name: my-cloud-exadata + manifest: |- + { + "cloud_exadata_infrastructure_id": "my-instance", + "deletion_protection": "true", + "display_name": "my-instance displayname", + "location": "us-east4", + "project": "my-project", + "properties": [ + { + "compute_count": "2", + "shape": "Exadata.X9M", + "storage_count": "3" + } + ] + } + - name: my-cloud-exadata + manifest: |- + { + "cloud_exadata_infrastructure_id": "my-instance", + "deletion_protection": "true", + "display_name": "my-instance displayname", + "gcp_oracle_zone": "us-east4-b-r1", + "labels": { + "label-one": "value-one" + }, + "location": "us-east4", + "project": "my-project", + "properties": [ + { + "compute_count": "2", + "customer_contacts": [ + { + "email": "xyz@example.com" + } + ], + "maintenance_window": [ + { + "custom_action_timeout_mins": "20", + "days_of_week": [ + "SUNDAY" + ], + "hours_of_day": [ + 4 + ], + "is_custom_action_timeout_enabled": "0", + "lead_time_week": "1", + "months": [ + "JANUARY", + "APRIL", + "MAY", + "OCTOBER" + ], + "patching_mode": "ROLLING", + "preference": "CUSTOM_PREFERENCE", + "weeks_of_month": [ + 4 + ] + } + ], + "shape": "Exadata.X9M", + "storage_count": "3", + "total_storage_size_gb": "196608" + } + ] + } + argumentDocs: + cloud_exadata_infrastructure_id: |- + - + (Required) + The ID of the Exadata Infrastructure to create. This value is restricted + to (^a-z?$) and must be a maximum of 63 + characters in length. The value must start with a letter and end with + a letter or a number. + create: '- Default is 240 minutes.' + create_time: |- + - + The date and time that the Exadata Infrastructure was created. + customer_contacts.email: |- + - + (Required) + The email address used by Oracle to send notifications regarding databases + and infrastructure. + delete: '- Default is 120 minutes.' + deletion_protection: '- (Optional) Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail.' + display_name: |- + - + (Optional) + User friendly name for this resource. + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + entitlement_id: |- + - + Entitlement ID of the private offer against which this infrastructure + resource is provisioned. + gcp_oracle_zone: |- + - + (Optional) + GCP location where Oracle Exadata is hosted. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures/{{cloud_exadata_infrastructure_id}}' + labels: |- + - + (Optional) + Labels or tags associated with the resource. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + Resource ID segment making up resource name. See documentation for resource type oracledatabase.googleapis.com/DbServer. + maintenance_window.custom_action_timeout_mins: |- + - + (Optional) + Determines the amount of time the system will wait before the start of each + database server patching operation. Custom action timeout is in minutes and + valid value is between 15 to 120 (inclusive). + maintenance_window.days_of_week: |- + - + (Optional) + Days during the week when maintenance should be performed. + maintenance_window.hours_of_day: |- + - + (Optional) + The window of hours during the day when maintenance should be performed. + The window is a 4 hour slot. Valid values are: + 0 - represents time slot 0:00 - 3:59 UTC + 4 - represents time slot 4:00 - 7:59 UTC + 8 - represents time slot 8:00 - 11:59 UTC + 12 - represents time slot 12:00 - 15:59 UTC + 16 - represents time slot 16:00 - 19:59 UTC + 20 - represents time slot 20:00 - 23:59 UTC + maintenance_window.is_custom_action_timeout_enabled: |- + - + (Optional) + If true, enables the configuration of a custom action timeout (waiting + period) between database server patching operations. + maintenance_window.lead_time_week: |- + - + (Optional) + Lead time window allows user to set a lead time to prepare for a down time. + The lead time is in weeks and valid value is between 1 to 4. + maintenance_window.months: |- + - + (Optional) + Months during the year when maintenance should be performed. + maintenance_window.patching_mode: |- + - + (Optional) + Cloud CloudExadataInfrastructure node patching method, either "ROLLING" + or "NONROLLING". Default value is ROLLING. + Possible values: + PATCHING_MODE_UNSPECIFIED + ROLLING + NON_ROLLING + maintenance_window.preference: |- + - + (Optional) + The maintenance window scheduling preference. + Possible values: + MAINTENANCE_WINDOW_PREFERENCE_UNSPECIFIED + CUSTOM_PREFERENCE + NO_PREFERENCE + maintenance_window.weeks_of_month: |- + - + (Optional) + Weeks during the month when maintenance should be performed. Weeks start on + the 1st, 8th, 15th, and 22nd days of the month, and have a duration of 7 + days. Weeks start and end based on calendar dates, not days of the week. + name: |- + - + Identifier. The name of the Exadata Infrastructure resource with the following format: + projects/{project}/locations/{region}/cloudExadataInfrastructures/{cloud_exadata_infrastructure} + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + properties: |- + - + (Optional) + Various properties of Exadata Infrastructure. + Structure is documented below. + properties.activated_storage_count: |- + - + (Output) + The requested number of additional storage servers activated for the + Exadata Infrastructure. + properties.additional_storage_count: |- + - + (Output) + The requested number of additional storage servers for the Exadata + Infrastructure. + properties.available_storage_size_gb: |- + - + (Output) + The available storage can be allocated to the Exadata Infrastructure + resource, in gigabytes (GB). + properties.compute_count: |- + - + (Optional) + The number of compute servers for the Exadata Infrastructure. + properties.cpu_count: |- + - + (Output) + The number of enabled CPU cores. + properties.customer_contacts: |- + - + (Optional) + The list of customer contacts. + Structure is documented below. + properties.data_storage_size_tb: |- + - + (Output) + Size, in terabytes, of the DATA disk group. + properties.db_node_storage_size_gb: |- + - + (Output) + The local node storage allocated in GBs. + properties.db_server_version: |- + - + (Output) + The software version of the database servers (dom0) in the Exadata + Infrastructure. + properties.maintenance_window: |- + - + (Optional) + Maintenance window as defined by Oracle. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/MaintenanceWindow + Structure is documented below. + properties.max_cpu_count: |- + - + (Output) + The total number of CPU cores available. + properties.max_data_storage_tb: |- + - + (Output) + The total available DATA disk group size. + properties.max_db_node_storage_size_gb: |- + - + (Output) + The total local node storage available in GBs. + properties.max_memory_gb: |- + - + (Output) + The total memory available in GBs. + properties.memory_size_gb: |- + - + (Output) + The memory allocated in GBs. + properties.monthly_db_server_version: |- + - + (Output) + The monthly software version of the database servers (dom0) + in the Exadata Infrastructure. Example: 20.1.15 + properties.monthly_storage_server_version: |- + - + (Output) + The monthly software version of the storage servers (cells) + in the Exadata Infrastructure. Example: 20.1.15 + properties.next_maintenance_run_id: |- + - + (Output) + The OCID of the next maintenance run. + properties.next_maintenance_run_time: |- + - + (Output) + The time when the next maintenance run will occur. + properties.next_security_maintenance_run_time: |- + - + (Output) + The time when the next security maintenance run will occur. + properties.oci_url: |- + - + (Output) + Deep link to the OCI console to view this resource. + properties.ocid: |- + - + (Output) + OCID of created infra. + https://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle + properties.shape: |- + - + (Required) + The shape of the Exadata Infrastructure. The shape determines the + amount of CPU, storage, and memory resources allocated to the instance. + properties.state: |- + - + (Output) + The current lifecycle state of the Exadata Infrastructure. + Possible values: + STATE_UNSPECIFIED + PROVISIONING + AVAILABLE + UPDATING + TERMINATING + TERMINATED + FAILED + MAINTENANCE_IN_PROGRESS + properties.storage_count: |- + - + (Optional) + The number of Cloud Exadata storage servers for the Exadata Infrastructure. + properties.storage_server_version: |- + - + (Output) + The software version of the storage servers (cells) in the Exadata + Infrastructure. + properties.total_storage_size_gb: |- + - + (Optional) + The total storage allocated to the Exadata Infrastructure + resource, in gigabytes (GB). + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 120 minutes.' + importStatements: [] + google_oracle_database_cloud_vm_cluster: + subCategory: Oracle Database + description: A CloudVmCluster resource. + name: google_oracle_database_cloud_vm_cluster + title: "" + examples: + - name: my_vmcluster + manifest: |- + { + "backup_subnet_cidr": "10.6.0.0/24", + "cidr": "10.5.0.0/24", + "cloud_vm_cluster_id": "my-instance", + "deletion_protection": "true", + "display_name": "my-instance displayname", + "exadata_infrastructure": "${google_oracle_database_cloud_exadata_infrastructure.cloudExadataInfrastructures.id}", + "location": "us-east4", + "network": "${data.google_compute_network.default.id}", + "project": "my-project", + "properties": [ + { + "cpu_core_count": "4", + "gi_version": "19.0.0.0", + "hostname_prefix": "hostname1", + "license_type": "LICENSE_INCLUDED", + "ssh_public_keys": [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCz1X2744t+6vRLmE5u6nHi6/QWh8bQDgHmd+OIxRQIGA/IWUtCs2FnaCNZcqvZkaeyjk5v0lTA/n+9jvO42Ipib53athrfVG8gRt8fzPL66C6ZqHq+6zZophhrCdfJh/0G4x9xJh5gdMprlaCR1P8yAaVvhBQSKGc4SiIkyMNBcHJ5YTtMQMTfxaB4G1sHZ6SDAY9a6Cq/zNjDwfPapWLsiP4mRhE5SSjJX6l6EYbkm0JeLQg+AbJiNEPvrvDp1wtTxzlPJtIivthmLMThFxK7+DkrYFuLvN5AHUdo9KTDLvHtDCvV70r8v0gafsrKkM/OE9Jtzoo0e1N/5K/ZdyFRbAkFT4QSF3nwpbmBWLf2Evg//YyEuxnz4CwPqFST2mucnrCCGCVWp1vnHZ0y30nM35njLOmWdRDFy5l27pKUTwLp02y3UYiiZyP7d3/u5pKiN4vC27VuvzprSdJxWoAvluOiDeRh+/oeQDowxoT/Oop8DzB9uJmjktXw8jyMW2+Rpg+ENQqeNgF1OGlEzypaWiRskEFlkpLb4v/s3ZDYkL1oW0Nv/J8LTjTOTEaYt2Udjoe9x2xWiGnQixhdChWuG+MaoWffzUgx1tsVj/DBXijR5DjkPkrA1GA98zd3q8GKEaAdcDenJjHhNYSd4+rE9pIsnYn7fo5X/tFfcQH1XQ== nobody@google.com" + ] + } + ] + } + references: + exadata_infrastructure: google_oracle_database_cloud_exadata_infrastructure.cloudExadataInfrastructures.id + network: data.google_compute_network.default.id + dependencies: + google_oracle_database_cloud_exadata_infrastructure.cloudExadataInfrastructures: |- + { + "cloud_exadata_infrastructure_id": "my-exadata", + "deletion_protection": "true", + "display_name": "my-exadata displayname", + "location": "us-east4", + "project": "my-project", + "properties": [ + { + "compute_count": "2", + "shape": "Exadata.X9M", + "storage_count": "3" + } + ] + } + - name: my_vmcluster + manifest: |- + { + "backup_subnet_cidr": "10.6.0.0/24", + "cidr": "10.5.0.0/24", + "cloud_vm_cluster_id": "my-instance", + "deletion_protection": "true", + "display_name": "my-instance displayname", + "exadata_infrastructure": "${google_oracle_database_cloud_exadata_infrastructure.cloudExadataInfrastructures.id}", + "labels": { + "label-one": "value-one" + }, + "location": "us-east4", + "network": "${data.google_compute_network.default.id}", + "project": "my-project", + "properties": [ + { + "cluster_name": "pq-ppat4", + "cpu_core_count": "4", + "data_storage_size_tb": 2, + "db_node_storage_size_gb": 120, + "db_server_ocids": [ + "${data.google_oracle_database_db_servers.mydbserver.db_servers.0.properties.0.ocid}", + "${data.google_oracle_database_db_servers.mydbserver.db_servers.1.properties.0.ocid}" + ], + "diagnostics_data_collection_options": [ + { + "diagnostics_events_enabled": true, + "health_monitoring_enabled": true, + "incident_logs_enabled": true + } + ], + "disk_redundancy": "HIGH", + "gi_version": "19.0.0.0", + "hostname_prefix": "hostname1", + "license_type": "LICENSE_INCLUDED", + "local_backup_enabled": false, + "memory_size_gb": 60, + "node_count": "2", + "ocpu_count": "4.0", + "sparse_diskgroup_enabled": false, + "ssh_public_keys": [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCz1X2744t+6vRLmE5u6nHi6/QWh8bQDgHmd+OIxRQIGA/IWUtCs2FnaCNZcqvZkaeyjk5v0lTA/n+9jvO42Ipib53athrfVG8gRt8fzPL66C6ZqHq+6zZophhrCdfJh/0G4x9xJh5gdMprlaCR1P8yAaVvhBQSKGc4SiIkyMNBcHJ5YTtMQMTfxaB4G1sHZ6SDAY9a6Cq/zNjDwfPapWLsiP4mRhE5SSjJX6l6EYbkm0JeLQg+AbJiNEPvrvDp1wtTxzlPJtIivthmLMThFxK7+DkrYFuLvN5AHUdo9KTDLvHtDCvV70r8v0gafsrKkM/OE9Jtzoo0e1N/5K/ZdyFRbAkFT4QSF3nwpbmBWLf2Evg//YyEuxnz4CwPqFST2mucnrCCGCVWp1vnHZ0y30nM35njLOmWdRDFy5l27pKUTwLp02y3UYiiZyP7d3/u5pKiN4vC27VuvzprSdJxWoAvluOiDeRh+/oeQDowxoT/Oop8DzB9uJmjktXw8jyMW2+Rpg+ENQqeNgF1OGlEzypaWiRskEFlkpLb4v/s3ZDYkL1oW0Nv/J8LTjTOTEaYt2Udjoe9x2xWiGnQixhdChWuG+MaoWffzUgx1tsVj/DBXijR5DjkPkrA1GA98zd3q8GKEaAdcDenJjHhNYSd4+rE9pIsnYn7fo5X/tFfcQH1XQ== nobody@google.com" + ], + "time_zone": [ + { + "id": "UTC" + } + ] + } + ] + } + references: + exadata_infrastructure: google_oracle_database_cloud_exadata_infrastructure.cloudExadataInfrastructures.id + network: data.google_compute_network.default.id + properties.db_server_ocids: data.google_oracle_database_db_servers.mydbserver.db_servers.0.properties.0.ocid + dependencies: + google_oracle_database_cloud_exadata_infrastructure.cloudExadataInfrastructures: |- + { + "cloud_exadata_infrastructure_id": "my-exadata", + "deletion_protection": "true", + "display_name": "my-exadata displayname", + "location": "us-east4", + "project": "my-project", + "properties": [ + { + "compute_count": "2", + "shape": "Exadata.X9M", + "storage_count": "3" + } + ] + } + argumentDocs: + backup_subnet_cidr: |- + - + (Required) + CIDR range of the backup subnet. + cidr: |- + - + (Required) + Network settings. CIDR to use for cluster IP allocation. + cloud_vm_cluster_id: |- + - + (Required) + The ID of the VM Cluster to create. This value is restricted + to (^a-z?$) and must be a maximum of 63 + characters in length. The value must start with a letter and end with + a letter or a number. + create: '- Default is 120 minutes.' + create_time: |- + - + The date and time that the VM cluster was created. + delete: '- Default is 60 minutes.' + deletion_protection: '- (Optional) Whether Terraform will be prevented from destroying the cluster. Deleting this cluster via terraform destroy or terraform apply will only succeed if this field is false in the Terraform state.' + diagnostics_data_collection_options.diagnostics_events_enabled: |- + - + (Optional) + Indicates whether diagnostic collection is enabled for the VM cluster + diagnostics_data_collection_options.health_monitoring_enabled: |- + - + (Optional) + Indicates whether health monitoring is enabled for the VM cluster + diagnostics_data_collection_options.incident_logs_enabled: |- + - + (Optional) + Indicates whether incident logs and trace collection are enabled for the VM + cluster + display_name: |- + - + (Optional) + User friendly name for this resource. + effective_labels: |- + - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + exadata_infrastructure: |- + - + (Required) + The name of the Exadata Infrastructure resource on which VM cluster + resource is created, in the following format: + projects/{project}/locations/{region}/cloudExadataInfrastuctures/{cloud_extradata_infrastructure} + gcp_oracle_zone: |- + - + GCP location where Oracle Exadata is hosted. It is same as GCP Oracle zone + of Exadata infrastructure. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/cloudVmClusters/{{cloud_vm_cluster_id}}' + labels: |- + - + (Optional) + Labels or tags associated with the VM Cluster. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + location: |- + - + (Required) + Resource ID segment making up resource name. See documentation for resource type oracledatabase.googleapis.com/DbNode. + name: |- + - + Identifier. The name of the VM Cluster resource with the format: + projects/{project}/locations/{region}/cloudVmClusters/{cloud_vm_cluster} + network: |- + - + (Required) + The name of the VPC network. + Format: projects/{project}/global/networks/{network} + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + properties: |- + - + (Optional) + Various properties and settings associated with Exadata VM cluster. + Structure is documented below. + properties.cluster_name: |- + - + (Optional) + OCI Cluster name. + properties.compartment_id: |- + - + (Output) + Compartment ID of cluster. + properties.cpu_core_count: |- + - + (Required) + Number of enabled CPU cores. + properties.data_storage_size_tb: |- + - + (Optional) + The data disk group size to be allocated in TBs. + properties.db_node_storage_size_gb: |- + - + (Optional) + Local storage per VM + properties.db_server_ocids: |- + - + (Optional) + OCID of database servers. + properties.diagnostics_data_collection_options: |- + - + (Optional) + Data collection options for diagnostics. + Structure is documented below. + properties.disk_redundancy: |- + - + (Optional) + The type of redundancy. + Possible values: + DISK_REDUNDANCY_UNSPECIFIED + HIGH + NORMAL + properties.dns_listener_ip: |- + - + (Output) + DNS listener IP. + properties.domain: |- + - + (Output) + Parent DNS domain where SCAN DNS and hosts names are qualified. + ex: ocispdelegated.ocisp10jvnet.oraclevcn.com + properties.gi_version: |- + - + (Optional) + Grid Infrastructure Version. + properties.hostname: |- + - + (Output) + host name without domain. + format: "-" with some suffix. + ex: sp2-yi0xq where "sp2" is the hostname_prefix. + properties.hostname_prefix: |- + - + (Optional) + Prefix for VM cluster host names. + properties.license_type: |- + - + (Required) + License type of VM Cluster. + Possible values: + LICENSE_TYPE_UNSPECIFIED + LICENSE_INCLUDED + BRING_YOUR_OWN_LICENSE + properties.local_backup_enabled: |- + - + (Optional) + Use local backup. + properties.memory_size_gb: |- + - + (Optional) + Memory allocated in GBs. + properties.node_count: |- + - + (Optional) + Number of database servers. + properties.oci_url: |- + - + (Output) + Deep link to the OCI console to view this resource. + properties.ocid: |- + - + (Output) + Oracle Cloud Infrastructure ID of VM Cluster. + properties.ocpu_count: |- + - + (Optional) + OCPU count per VM. Minimum is 0.1. + properties.scan_dns: |- + - + (Output) + SCAN DNS name. + ex: sp2-yi0xq-scan.ocispdelegated.ocisp10jvnet.oraclevcn.com + properties.scan_dns_record_id: |- + - + (Output) + OCID of scan DNS record. + properties.scan_ip_ids: |- + - + (Output) + OCIDs of scan IPs. + properties.scan_listener_port_tcp: |- + - + (Output) + SCAN listener port - TCP + properties.scan_listener_port_tcp_ssl: |- + - + (Output) + SCAN listener port - TLS + properties.shape: |- + - + (Output) + Shape of VM Cluster. + properties.sparse_diskgroup_enabled: |- + - + (Optional) + Use exadata sparse snapshots. + properties.ssh_public_keys: |- + - + (Optional) + SSH public keys to be stored with cluster. + properties.state: |- + - + (Output) + State of the cluster. + Possible values: + STATE_UNSPECIFIED + PROVISIONING + AVAILABLE + UPDATING + TERMINATING + TERMINATED + FAILED + MAINTENANCE_IN_PROGRESS + properties.storage_size_gb: |- + - + (Output) + The storage allocation for the disk group, in gigabytes (GB). + properties.system_version: |- + - + (Output) + Operating system version of the image. + properties.time_zone: |- + - + (Optional) + Represents a time zone from the + IANA Time Zone Database. + Structure is documented below. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + time_zone.id: |- + - + (Optional) + IANA Time Zone Database time zone, e.g. "America/New_York". + update: '- Default is 60 minutes.' + importStatements: [] google_org_policy_custom_constraint: subCategory: Organization Policy description: Custom constraints are created by administrators to provide more granular and customizable control over the specific fields that are restricted by your organization policies. @@ -119003,8 +135371,8 @@ resources: - name: primary manifest: |- { - "name": "projects/${google_project.basic.name}/policies/iam.disableServiceAccountKeyUpload", - "parent": "projects/${google_project.basic.name}", + "name": "projects/${google_project.basic.project_id}/policies/iam.disableServiceAccountKeyUpload", + "parent": "projects/${google_project.basic.project_id}", "spec": [ { "rules": [ @@ -119018,6 +135386,7 @@ resources: dependencies: google_project.basic: |- { + "deletion_policy": "DELETE", "name": "id", "org_id": "123456789", "project_id": "id" @@ -119043,6 +135412,7 @@ resources: dependencies: google_folder.basic: |- { + "deletion_protection": false, "display_name": "folder", "parent": "organizations/123456789" } @@ -119060,8 +135430,8 @@ resources: - name: primary manifest: |- { - "name": "projects/${google_project.basic.name}/policies/gcp.resourceLocations", - "parent": "projects/${google_project.basic.name}", + "name": "projects/${google_project.basic.project_id}/policies/gcp.resourceLocations", + "parent": "projects/${google_project.basic.project_id}", "spec": [ { "rules": [ @@ -119095,6 +135465,7 @@ resources: dependencies: google_project.basic: |- { + "deletion_policy": "DELETE", "name": "id", "org_id": "123456789", "project_id": "id" @@ -119141,6 +135512,30 @@ resources: "container.googleapis.com/NodePool" ] } + - name: primary + manifest: |- + { + "name": "projects/${google_project.basic.name}/policies/compute.managed.restrictDiskCreation", + "parent": "projects/${google_project.basic.name}", + "spec": [ + { + "rules": [ + { + "enforce": "TRUE", + "parameters": "${jsonencode({\"isSizeLimitCheck\" : true, \"allowedDiskTypes\" : [\"pd-ssd\", \"pd-standard\"]})}" + } + ] + } + ] + } + dependencies: + google_project.basic: |- + { + "deletion_policy": "DELETE", + "name": "id", + "org_id": "123456789", + "project_id": "id" + } argumentDocs: condition.description: |- - @@ -119215,6 +135610,10 @@ resources: - (Optional) If "TRUE", then the Policy is enforced. If "FALSE", then any configuration is acceptable. This field can be set only in Policies for boolean constraints. + rules.parameters: |- + - + (Optional) + Optional. Required for Managed Constraints if parameters defined in constraints. Pass parameter values when policy enforcement is enabled. Ensure that parameter value types match those defined in the constraint definition. For example: { "allowedLocations" : ["us-east1", "us-west1"], "allowAll" : true } rules.values: |- - (Optional) @@ -119240,7 +135639,7 @@ resources: spec.rules: |- - (Optional) - Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. + In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. Structure is documented below. spec.update_time: |- - @@ -119322,6 +135721,7 @@ resources: } google_project.my_project: |- { + "deletion_policy": "DELETE", "name": "My Project", "org_id": "123456789", "project_id": "your-project-id" @@ -121406,6 +137806,7 @@ resources: "depends_on": [ "${google_service_networking_connection.default}" ], + "deployment_type": "SCRATCH", "description": "test instance", "directory_stripe_level": "DIRECTORY_STRIPE_LEVEL_MIN", "file_stripe_level": "FILE_STRIPE_LEVEL_MIN", @@ -121446,6 +137847,49 @@ resources: ], "service": "servicenetworking.googleapis.com" } + - name: instance + manifest: |- + { + "capacity_gib": 12000, + "depends_on": [ + "${google_service_networking_connection.default}" + ], + "deployment_type": "SCRATCH", + "description": "test instance", + "directory_stripe_level": "DIRECTORY_STRIPE_LEVEL_MIN", + "file_stripe_level": "FILE_STRIPE_LEVEL_MIN", + "instance_id": "instance", + "labels": { + "test": "value" + }, + "location": "us-central1-a", + "network": "${google_compute_network.network.name}" + } + references: + network: google_compute_network.network.name + dependencies: + google_compute_global_address.private_ip_alloc: |- + { + "address_type": "INTERNAL", + "name": "address", + "network": "${google_compute_network.network.id}", + "prefix_length": 24, + "purpose": "VPC_PEERING" + } + google_compute_network.network: |- + { + "auto_create_subnetworks": true, + "mtu": 8896, + "name": "network" + } + google_service_networking_connection.default: |- + { + "network": "${google_compute_network.network.id}", + "reserved_peering_ranges": [ + "${google_compute_global_address.private_ip_alloc.name}" + ], + "service": "servicenetworking.googleapis.com" + } argumentDocs: '[a-z0-9_-]{0,63}': . a-z{0,62}: . @@ -121463,8 +137907,16 @@ resources: The time when the instance was created. daos_version: |- - - The version of DAOS software running in the instance + The version of DAOS software running in the instance. delete: '- Default is 20 minutes.' + deployment_type: |- + - + (Optional) + Parallelstore Instance deployment type. + Possible values: + DEPLOYMENT_TYPE_UNSPECIFIED + SCRATCH + PERSISTENT description: |- - (Optional) @@ -121483,9 +137935,9 @@ resources: effective_labels: for all of the labels present on the resource. effective_reserved_ip_range: |- - - Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. This field is populated by the service and + Immutable. Contains the id of the allocated IP address + range associated with the private service access connection for example, "test-default" + associated with IP range 10.0.0.0/29. This field is populated by the service and contains the value currently used by the service. file_stripe_level: |- - @@ -121506,12 +137958,12 @@ resources: labels: |- - (Optional) - Cloud Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. Cloud Labels can be used to filter collections of - resources. They can be used to control how resource metrics are aggregated. - And they can be used as arguments to policy management rules (e.g. route, - firewall, load balancing, etc.). + Cloud Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a customer's organizational + needs and deployment strategies. Cloud Labels can be used to filter collections + of resources. They can be used to control how resource metrics are aggregated. + And they can be used as arguments to policy management rules (e.g. route, firewall, + load balancing, etc.). location: |- - (Required) @@ -121520,22 +137972,24 @@ resources: - Identifier. The resource name of the instance, in the format projects/{project}/locations/{location}/instances/{instance_id} + name + "_" + value: |- + would prove problematic if we were to + allow "_" in a future release. " network: |- - (Optional) - Immutable. The name of the Google Compute Engine - VPC network to which the - instance is connected. + Immutable. The name of the Google Compute Engine VPC network + to which the instance is connected. project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. reserved_ip_range: |- - (Optional) - Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. If no range id is provided all ranges will be - considered. + Immutable. Contains the id of the allocated IP address range + associated with the private service access connection for example, "test-default" + associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + be considered. state: |- - The instance state. @@ -123320,7 +139774,7 @@ resources: subject_key_id.key_id: |- - (Optional) - The value of the KeyId in lowercase hexidecimal. + The value of the KeyId in lowercase hexadecimal. terraform_labels: |- - The combination of labels configured directly on the resource @@ -123412,14 +139866,7 @@ resources: "subject": [ { "common_name": "my-certificate-authority", - "organization": "HashiCorp" - } - ], - "subject_alt_name": [ - { - "dns_names": [ - "hashicorp.com" - ] + "organization": "ACME" } ] } @@ -123428,8 +139875,7 @@ resources: { "ca_options": [ { - "is_ca": true, - "max_issuer_path_length": 10 + "is_ca": true } ], "key_usage": [ @@ -123437,23 +139883,11 @@ resources: "base_key_usage": [ { "cert_sign": true, - "content_commitment": true, - "crl_sign": true, - "data_encipherment": true, - "decipher_only": true, - "digital_signature": true, - "key_agreement": true, - "key_encipherment": false + "crl_sign": true } ], "extended_key_usage": [ - { - "client_auth": false, - "code_signing": true, - "email_protection": true, - "server_auth": true, - "time_stamping": true - } + {} ] } ] @@ -123461,13 +139895,13 @@ resources: ] } ], - "deletion_protection": "true", + "deletion_protection": true, "key_spec": [ { "algorithm": "RSA_PKCS1_4096_SHA256" } ], - "lifetime": "86400s", + "lifetime": "${10 * 365 * 24 * 3600}s", "location": "us-central1", "pool": "ca-pool" } @@ -123482,14 +139916,7 @@ resources: "subject": [ { "common_name": "my-certificate-authority", - "organization": "HashiCorp" - } - ], - "subject_alt_name": [ - { - "dns_names": [ - "hashicorp.com" - ] + "organization": "ACME" } ] } @@ -123510,9 +139937,7 @@ resources: } ], "extended_key_usage": [ - { - "server_auth": false - } + {} ] } ] @@ -123542,14 +139967,7 @@ resources: "subject": [ { "common_name": "my-subordinate-authority", - "organization": "HashiCorp" - } - ], - "subject_alt_name": [ - { - "dns_names": [ - "hashicorp.com" - ] + "organization": "ACME" } ] } @@ -123559,7 +139977,7 @@ resources: "ca_options": [ { "is_ca": true, - "max_issuer_path_length": 0 + "zero_max_issuer_path_length": true } ], "key_usage": [ @@ -123567,23 +139985,11 @@ resources: "base_key_usage": [ { "cert_sign": true, - "content_commitment": true, - "crl_sign": true, - "data_encipherment": true, - "decipher_only": true, - "digital_signature": true, - "key_agreement": true, - "key_encipherment": false + "crl_sign": true } ], "extended_key_usage": [ - { - "client_auth": false, - "code_signing": true, - "email_protection": true, - "server_auth": true, - "time_stamping": true - } + {} ] } ] @@ -123591,13 +139997,13 @@ resources: ] } ], - "deletion_protection": "true", + "deletion_protection": true, "key_spec": [ { - "algorithm": "RSA_PKCS1_4096_SHA256" + "algorithm": "RSA_PKCS1_2048_SHA256" } ], - "lifetime": "86400s", + "lifetime": "${5 * 365 * 24 * 3600}s", "location": "us-central1", "pool": "ca-pool", "subordinate_config": [ @@ -123629,8 +140035,7 @@ resources: { "ca_options": [ { - "is_ca": true, - "max_issuer_path_length": 10 + "is_ca": true } ], "key_usage": [ @@ -123642,9 +140047,7 @@ resources: } ], "extended_key_usage": [ - { - "server_auth": false - } + {} ] } ], @@ -123681,7 +140084,7 @@ resources: ] } ], - "deletion_protection": "true", + "deletion_protection": true, "depends_on": [ "${google_kms_crypto_key_iam_member.privateca_sa_keyuser_signerverifier}", "${google_kms_crypto_key_iam_member.privateca_sa_keyuser_viewer}" @@ -123722,14 +140125,7 @@ resources: "subject": [ { "common_name": "my-certificate-authority", - "organization": "HashiCorp" - } - ], - "subject_alt_name": [ - { - "dns_names": [ - "hashicorp.com" - ] + "organization": "ACME" } ] } @@ -123743,8 +140139,7 @@ resources: { "ca_options": [ { - "is_ca": true, - "max_issuer_path_length": 10 + "is_ca": true } ], "key_usage": [ @@ -123752,23 +140147,11 @@ resources: "base_key_usage": [ { "cert_sign": true, - "content_commitment": true, - "crl_sign": true, - "data_encipherment": true, - "decipher_only": true, - "digital_signature": true, - "key_agreement": true, - "key_encipherment": false + "crl_sign": true } ], "extended_key_usage": [ - { - "client_auth": false, - "code_signing": true, - "email_protection": true, - "server_auth": true, - "time_stamping": true - } + {} ] } ] @@ -123776,13 +140159,13 @@ resources: ] } ], - "deletion_protection": "true", + "deletion_protection": true, "key_spec": [ { "cloud_kms_key_version": "projects/keys-project/locations/us-central1/keyRings/key-ring/cryptoKeys/crypto-key/cryptoKeyVersions/1" } ], - "lifetime": "86400s", + "lifetime": "${10 * 365 * 24 * 3600}s", "location": "us-central1", "pool": "ca-pool" } @@ -123954,7 +140337,9 @@ resources: When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the CertificateAuthority will fail. When the field is set to false, deleting the CertificateAuthority is allowed. - key_spec.desired_state: '- (Optional) Desired state of the CertificateAuthority. Set this field to STAGED to create a STAGED root CA.' + key_spec.desired_state: |- + - (Optional) Desired state of the CertificateAuthority. Set this field to STAGED to create a STAGED root CA. + Possible values: ENABLED, DISABLED, STAGED. key_spec.effective_labels: for all of the labels present on the resource. key_spec.gcs_bucket: |- - @@ -124179,7 +140564,7 @@ resources: subject_key_id.key_id: |- - (Optional) - The value of the KeyId in lowercase hexidecimal. + The value of the KeyId in lowercase hexadecimal. subordinate_config.certificate_authority: |- - (Optional) @@ -124902,6 +141287,16 @@ resources: "display_name": "Department 1", "parent": "organizations/1234567" } + - name: my_project + manifest: |- + { + "name": "My Project", + "org_id": "1234567", + "project_id": "your-project-id", + "tags": { + "1234567/env": "staging" + } + } argumentDocs: auto_create_network: |- - (Optional) Controls whether the 'default' network exists on the project. Defaults @@ -124922,7 +141317,7 @@ resources: - (Optional) The deletion policy for the Project. Setting PREVENT will protect the project against any destroy actions caused by a terraform apply or terraform destroy. Setting ABANDON allows the resource to be abandoned rather than deleted, i.e., the Terraform resource can be deleted without deleting the Project via - the Google API. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is DELETE. + the Google API. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is PREVENT. effective_labels: |- - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. @@ -124947,12 +141342,7 @@ resources: this forces the project to be migrated to the newly specified organization. project_id: '- (Required) The project ID. Changing this forces a new project to be created.' - skip_delete: |- - - (Optional) If true, the Terraform resource can be deleted without - deleting the Project via the Google API. skip_delete is deprecated and will be - removed in 6.0.0. Please use deletion_policy instead. A skip_delete value of false - can be changed to a deletion_policy value of DELETE and a skip_delete value of true - to a deletion_policy value of ABANDON for equivalent behavior. + tags: '- (Optional) A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource.' terraform_labels: |- - The combination of labels configured directly on the resource and default labels configured on the provider. @@ -125155,6 +141545,28 @@ resources: List of possible stages is here. title: '- (Required) A human-readable title for the role.' importStatements: [] + google_project_iam_member_remove: + subCategory: Cloud Platform + description: Ensures that a member:role pairing does not exist in a project's IAM policy. + name: google_project_iam_member_remove + title: "" + examples: + - name: foo + manifest: |- + { + "member": "serviceAccount:${google_project.target_project.number}-compute@developer.gserviceaccount.com", + "project": "${google_project.target_project.project_id}", + "role": "roles/editor" + } + references: + project: google_project.target_project.project_id + argumentDocs: + member: |- + - (Required) The IAM principal that should not have the target role. + Each entry can have one of the following values: + project: '- (Required) The project id of the target project.' + role: '- (Required) The target role that should be removed.' + importStatements: [] google_project_iam_policy: subCategory: Cloud Platform description: Collection of resources to manage IAM policy for a project. @@ -125810,6 +142222,24 @@ resources: { "name": "example-topic" } + - name: example + manifest: |- + { + "ack_deadline_seconds": 20, + "filter": " attributes.foo = \"foo\"\n AND attributes.bar = \"bar\"\n", + "labels": { + "foo": "bar" + }, + "name": "example-subscription", + "topic": "${google_pubsub_topic.example.id}" + } + references: + topic: google_pubsub_topic.example.id + dependencies: + google_pubsub_topic.example: |- + { + "name": "example-topic" + } - name: example manifest: |- { @@ -125842,10 +142272,6 @@ resources: "table": "${google_bigquery_table.test.project}.${google_bigquery_table.test.dataset_id}.${google_bigquery_table.test.table_id}" } ], - "depends_on": [ - "${google_project_iam_member.viewer}", - "${google_project_iam_member.editor}" - ], "name": "example-subscription", "topic": "${google_pubsub_topic.example.id}" } @@ -125863,18 +142289,6 @@ resources: "schema": "[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n", "table_id": "example_table" } - google_project_iam_member.editor: |- - { - "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com", - "project": "${data.google_project.project.project_id}", - "role": "roles/bigquery.dataEditor" - } - google_project_iam_member.viewer: |- - { - "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com", - "project": "${data.google_project.project.project_id}", - "role": "roles/bigquery.metadataViewer" - } google_pubsub_topic.example: |- { "name": "example-topic" @@ -125888,10 +142302,6 @@ resources: "use_table_schema": true } ], - "depends_on": [ - "${google_project_iam_member.viewer}", - "${google_project_iam_member.editor}" - ], "name": "example-subscription", "topic": "${google_pubsub_topic.example.id}" } @@ -125909,18 +142319,6 @@ resources: "schema": "[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n", "table_id": "example_table" } - google_project_iam_member.editor: |- - { - "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com", - "project": "${data.google_project.project.project_id}", - "role": "roles/bigquery.dataEditor" - } - google_project_iam_member.viewer: |- - { - "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com", - "project": "${data.google_project.project.project_id}", - "role": "roles/bigquery.metadataViewer" - } google_pubsub_topic.example: |- { "name": "example-topic" @@ -125936,8 +142334,8 @@ resources: ], "depends_on": [ "${google_service_account.bq_write_service_account}", - "${google_project_iam_member.viewer}", - "${google_project_iam_member.editor}" + "${google_project_iam_member.bigquery_metadata_viewer}", + "${google_project_iam_member.bigquery_data_editor}" ], "name": "example-subscription", "topic": "${google_pubsub_topic.example.id}" @@ -125957,13 +142355,13 @@ resources: "schema": "[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n", "table_id": "example_table" } - google_project_iam_member.editor: |- + google_project_iam_member.bigquery_data_editor: |- { "member": "serviceAccount:${google_service_account.bq_write_service_account.email}", "project": "${data.google_project.project.project_id}", "role": "roles/bigquery.dataEditor" } - google_project_iam_member.viewer: |- + google_project_iam_member.bigquery_metadata_viewer: |- { "member": "serviceAccount:${google_service_account.bq_write_service_account.email}", "project": "${data.google_project.project.project_id}", @@ -125988,7 +142386,8 @@ resources: "filename_prefix": "pre-", "filename_suffix": "-", "max_bytes": 1000, - "max_duration": "300s" + "max_duration": "300s", + "max_messages": 1000 } ], "depends_on": [ @@ -126025,6 +142424,7 @@ resources: { "avro_config": [ { + "use_topic_schema": true, "write_metadata": true } ], @@ -126033,7 +142433,8 @@ resources: "filename_prefix": "pre-", "filename_suffix": "-", "max_bytes": 1000, - "max_duration": "300s" + "max_duration": "300s", + "max_messages": 1000 } ], "depends_on": [ @@ -126130,6 +142531,10 @@ resources: for the call to the push endpoint. If the subscriber never acknowledges the message, the Pub/Sub system will eventually redeliver the message. + avro_config.use_topic_schema: |- + - + (Optional) + When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. avro_config.write_metadata: |- - (Optional) @@ -126211,6 +142616,10 @@ resources: The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes. May not exceed the subscription's acknowledgement deadline. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + cloud_storage_config.max_messages: |- + - + (Optional) + The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. cloud_storage_config.service_account_email: |- - (Optional) @@ -126308,7 +142717,7 @@ resources: retain_acked_messages is true, then this also configures the retention of acknowledged messages, and thus configures how far back in time a subscriptions.seek can be done. Defaults to 7 days. Cannot be more - than 7 days ("604800s") or less than 10 minutes ("600s"). + than 31 days ("2678400s") or less than 10 minutes ("600s"). A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "600.5s". name: |- @@ -126538,44 +142947,118 @@ resources: ], "name": "example-topic" } + - name: example + manifest: |- + { + "ingestion_data_source_settings": [ + { + "cloud_storage": [ + { + "bucket": "test-bucket", + "match_glob": "foo/**", + "minimum_object_create_time": "2024-01-01T00:00:00Z", + "text_format": [ + { + "delimiter": " " + } + ] + } + ], + "platform_logs_settings": [ + { + "severity": "WARNING" + } + ] + } + ], + "name": "example-topic" + } argumentDocs: - create: '- Default is 20 minutes.' - delete: '- Default is 20 minutes.' - effective_labels: for all of the labels present on the resource. - id: '- an identifier for the resource with format projects/{{project}}/topics/{{name}}' - ingestion_data_source_settings: |- - - - (Optional) - Settings for ingestion from a data source into this topic. - Structure is documented below. - ingestion_data_source_settings.aws_kinesis: |- - - - (Optional) - Settings for ingestion from Amazon Kinesis Data Streams. - Structure is documented below. - ingestion_data_source_settings.aws_kinesis.aws_role_arn: |- + aws_kinesis.aws_role_arn: |- - (Required) AWS role ARN to be used for Federated Identity authentication with Kinesis. Check the Pub/Sub docs for how to set up this role and the required permissions that need to be attached to it. - ingestion_data_source_settings.aws_kinesis.consumer_arn: |- + aws_kinesis.consumer_arn: |- - (Required) The Kinesis consumer ARN to used for ingestion in Enhanced Fan-Out mode. The consumer must be already created and ready to be used. - ingestion_data_source_settings.aws_kinesis.gcp_service_account: |- + aws_kinesis.gcp_service_account: |- - (Required) The GCP service account to be used for Federated Identity authentication with Kinesis (via a AssumeRoleWithWebIdentity call for the provided role). The awsRoleArn must be set up with accounts.google.com:sub equals to this service account number. - ingestion_data_source_settings.aws_kinesis.stream_arn: |- + aws_kinesis.stream_arn: |- - (Required) The Kinesis stream ARN to ingest data from. + cloud_storage.avro_format: |- + - + (Optional) + Configuration for reading Cloud Storage data in Avro binary format. The + bytes of each object will be set to the data field of a Pub/Sub message. + cloud_storage.bucket: |- + - + (Required) + Cloud Storage bucket. The bucket name must be without any + prefix like "gs://". See the bucket naming requirements: + https://cloud.google.com/storage/docs/buckets#naming. + cloud_storage.match_glob: |- + - + (Optional) + Glob pattern used to match objects that will be ingested. If unset, all + objects will be ingested. See the supported patterns: + https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob + cloud_storage.minimum_object_create_time: |- + - + (Optional) + The timestamp set in RFC3339 text format. If set, only objects with a + larger or equal timestamp will be ingested. Unset by default, meaning + all objects will be ingested. + cloud_storage.pubsub_avro_format: |- + - + (Optional) + Configuration for reading Cloud Storage data written via Cloud Storage + subscriptions(See https://cloud.google.com/pubsub/docs/cloudstorage). The + data and attributes fields of the originally exported Pub/Sub message + will be restored when publishing. + cloud_storage.text_format: |- + - + (Optional) + Configuration for reading Cloud Storage data in text format. Each line of + text as specified by the delimiter will be set to the data field of a + Pub/Sub message. + Structure is documented below. + create: '- Default is 20 minutes.' + delete: '- Default is 20 minutes.' + effective_labels: for all of the labels present on the resource. + id: '- an identifier for the resource with format projects/{{project}}/topics/{{name}}' + ingestion_data_source_settings: |- + - + (Optional) + Settings for ingestion from a data source into this topic. + Structure is documented below. + ingestion_data_source_settings.aws_kinesis: |- + - + (Optional) + Settings for ingestion from Amazon Kinesis Data Streams. + Structure is documented below. + ingestion_data_source_settings.cloud_storage: |- + - + (Optional) + Settings for ingestion from Cloud Storage. + Structure is documented below. + ingestion_data_source_settings.platform_logs_settings: |- + - + (Optional) + Settings for Platform Logs regarding ingestion to Pub/Sub. If unset, + no Platform Logs will be generated.' + Structure is documented below. kms_key_name: |- - (Optional) @@ -126619,6 +143102,13 @@ resources: - (Required) Name of the topic. + platform_logs_settings.severity: |- + - + (Optional) + The minimum severity level of Platform Logs that will be written. If unspecified, + no Platform Logs will be written. + Default value is SEVERITY_UNSPECIFIED. + Possible values are: SEVERITY_UNSPECIFIED, DISABLED, DEBUG, INFO, WARNING, ERROR. project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -126644,6 +143134,12 @@ resources: - The combination of labels configured directly on the resource and default labels configured on the provider. + text_format.delimiter: |- + - + (Optional) + The delimiter to use when using the 'text' format. Each line of text as + specified by the delimiter will be set to the 'data' field of a Pub/Sub + message. When unset, '\n' is used. update: '- Default is 20 minutes.' importStatements: [] google_pubsub_topic_iam_policy: @@ -126928,12 +143424,25 @@ resources: manifest: |- { "authorization_mode": "AUTH_MODE_DISABLED", + "deletion_protection_enabled": true, "depends_on": [ "${google_network_connectivity_service_connection_policy.default}" ], - "lifecycle": [ + "maintenance_policy": [ { - "prevent_destroy": true + "weekly_maintenance_window": [ + { + "day": "MONDAY", + "start_time": [ + { + "hours": 1, + "minutes": 0, + "nanos": 0, + "seconds": 0 + } + ] + } + ] } ], "name": "ha-cluster", @@ -126989,12 +143498,25 @@ resources: - name: cluster-ha-single-zone manifest: |- { + "deletion_protection_enabled": true, "depends_on": [ "${google_network_connectivity_service_connection_policy.default}" ], - "lifecycle": [ + "maintenance_policy": [ { - "prevent_destroy": true + "weekly_maintenance_window": [ + { + "day": "MONDAY", + "start_time": [ + { + "hours": 1, + "minutes": 0, + "nanos": 0, + "seconds": 0 + } + ] + } + ] } ], "name": "ha-cluster-single-zone", @@ -127042,13 +143564,438 @@ resources: ], "service_class": "gcp-memorystore-redis" } + - name: primary_cluster + manifest: |- + { + "authorization_mode": "AUTH_MODE_DISABLED", + "deletion_protection_enabled": true, + "depends_on": [ + "${google_network_connectivity_service_connection_policy.primary_cluster_region_scp}" + ], + "maintenance_policy": [ + { + "weekly_maintenance_window": [ + { + "day": "MONDAY", + "start_time": [ + { + "hours": 1, + "minutes": 0, + "nanos": 0, + "seconds": 0 + } + ] + } + ] + } + ], + "name": "my-primary-cluster", + "node_type": "REDIS_HIGHMEM_MEDIUM", + "persistence_config": [ + { + "mode": "RDB", + "rdb_config": [ + { + "rdb_snapshot_period": "ONE_HOUR", + "rdb_snapshot_start_time": "2024-10-02T15:01:23Z" + } + ] + } + ], + "psc_configs": [ + { + "network": "${google_compute_network.producer_net.id}" + } + ], + "redis_configs": { + "maxmemory-policy": "volatile-ttl" + }, + "region": "us-east1", + "replica_count": 1, + "shard_count": 3, + "transit_encryption_mode": "TRANSIT_ENCRYPTION_MODE_DISABLED", + "zone_distribution_config": [ + { + "mode": "MULTI_ZONE" + } + ] + } + references: + psc_configs.network: google_compute_network.producer_net.id + dependencies: + google_compute_network.producer_net: |- + { + "auto_create_subnetworks": false, + "name": "mynetwork" + } + google_compute_subnetwork.primary_cluster_producer_subnet: |- + { + "ip_cidr_range": "10.0.1.0/29", + "name": "mysubnet-primary-cluster", + "network": "${google_compute_network.producer_net.id}", + "region": "us-east1" + } + google_compute_subnetwork.secondary_cluster_producer_subnet: |- + { + "ip_cidr_range": "10.0.2.0/29", + "name": "mysubnet-secondary-cluster", + "network": "${google_compute_network.producer_net.id}", + "region": "europe-west1" + } + google_network_connectivity_service_connection_policy.primary_cluster_region_scp: |- + { + "description": "Primary cluster service connection policy", + "location": "us-east1", + "name": "mypolicy-primary-cluster", + "network": "${google_compute_network.producer_net.id}", + "psc_config": [ + { + "subnetworks": [ + "${google_compute_subnetwork.primary_cluster_producer_subnet.id}" + ] + } + ], + "service_class": "gcp-memorystore-redis" + } + google_network_connectivity_service_connection_policy.secondary_cluster_region_scp: |- + { + "description": "Secondary cluster service connection policy", + "location": "europe-west1", + "name": "mypolicy-secondary-cluster", + "network": "${google_compute_network.producer_net.id}", + "psc_config": [ + { + "subnetworks": [ + "${google_compute_subnetwork.secondary_cluster_producer_subnet.id}" + ] + } + ], + "service_class": "gcp-memorystore-redis" + } + - name: secondary_cluster + manifest: |- + { + "authorization_mode": "AUTH_MODE_DISABLED", + "cross_cluster_replication_config": [ + { + "cluster_role": "SECONDARY", + "primary_cluster": [ + { + "cluster": "${google_redis_cluster.primary_cluster.id}" + } + ] + } + ], + "deletion_protection_enabled": true, + "depends_on": [ + "${google_network_connectivity_service_connection_policy.secondary_cluster_region_scp}" + ], + "maintenance_policy": [ + { + "weekly_maintenance_window": [ + { + "day": "WEDNESDAY", + "start_time": [ + { + "hours": 1, + "minutes": 0, + "nanos": 0, + "seconds": 0 + } + ] + } + ] + } + ], + "name": "my-secondary-cluster", + "node_type": "REDIS_HIGHMEM_MEDIUM", + "persistence_config": [ + { + "mode": "RDB", + "rdb_config": [ + { + "rdb_snapshot_period": "ONE_HOUR", + "rdb_snapshot_start_time": "2024-10-02T15:01:23Z" + } + ] + } + ], + "psc_configs": [ + { + "network": "${google_compute_network.producer_net.id}" + } + ], + "redis_configs": { + "maxmemory-policy": "volatile-ttl" + }, + "region": "europe-west1", + "replica_count": 2, + "shard_count": 3, + "transit_encryption_mode": "TRANSIT_ENCRYPTION_MODE_DISABLED", + "zone_distribution_config": [ + { + "mode": "MULTI_ZONE" + } + ] + } + references: + cross_cluster_replication_config.primary_cluster.cluster: google_redis_cluster.primary_cluster.id + psc_configs.network: google_compute_network.producer_net.id + dependencies: + google_compute_network.producer_net: |- + { + "auto_create_subnetworks": false, + "name": "mynetwork" + } + google_compute_subnetwork.primary_cluster_producer_subnet: |- + { + "ip_cidr_range": "10.0.1.0/29", + "name": "mysubnet-primary-cluster", + "network": "${google_compute_network.producer_net.id}", + "region": "us-east1" + } + google_compute_subnetwork.secondary_cluster_producer_subnet: |- + { + "ip_cidr_range": "10.0.2.0/29", + "name": "mysubnet-secondary-cluster", + "network": "${google_compute_network.producer_net.id}", + "region": "europe-west1" + } + google_network_connectivity_service_connection_policy.primary_cluster_region_scp: |- + { + "description": "Primary cluster service connection policy", + "location": "us-east1", + "name": "mypolicy-primary-cluster", + "network": "${google_compute_network.producer_net.id}", + "psc_config": [ + { + "subnetworks": [ + "${google_compute_subnetwork.primary_cluster_producer_subnet.id}" + ] + } + ], + "service_class": "gcp-memorystore-redis" + } + google_network_connectivity_service_connection_policy.secondary_cluster_region_scp: |- + { + "description": "Secondary cluster service connection policy", + "location": "europe-west1", + "name": "mypolicy-secondary-cluster", + "network": "${google_compute_network.producer_net.id}", + "psc_config": [ + { + "subnetworks": [ + "${google_compute_subnetwork.secondary_cluster_producer_subnet.id}" + ] + } + ], + "service_class": "gcp-memorystore-redis" + } + - name: cluster-rdb + manifest: |- + { + "authorization_mode": "AUTH_MODE_DISABLED", + "deletion_protection_enabled": true, + "depends_on": [ + "${google_network_connectivity_service_connection_policy.default}" + ], + "maintenance_policy": [ + { + "weekly_maintenance_window": [ + { + "day": "MONDAY", + "start_time": [ + { + "hours": 1, + "minutes": 0, + "nanos": 0, + "seconds": 0 + } + ] + } + ] + } + ], + "name": "rdb-cluster", + "node_type": "REDIS_SHARED_CORE_NANO", + "persistence_config": [ + { + "mode": "RDB", + "rdb_config": [ + { + "rdb_snapshot_period": "ONE_HOUR", + "rdb_snapshot_start_time": "2024-10-02T15:01:23Z" + } + ] + } + ], + "psc_configs": [ + { + "network": "${google_compute_network.producer_net.id}" + } + ], + "redis_configs": { + "maxmemory-policy": "volatile-ttl" + }, + "region": "us-central1", + "replica_count": 0, + "shard_count": 3, + "transit_encryption_mode": "TRANSIT_ENCRYPTION_MODE_DISABLED", + "zone_distribution_config": [ + { + "mode": "MULTI_ZONE" + } + ] + } + references: + psc_configs.network: google_compute_network.producer_net.id + dependencies: + google_compute_network.producer_net: |- + { + "auto_create_subnetworks": false, + "name": "mynetwork" + } + google_compute_subnetwork.producer_subnet: |- + { + "ip_cidr_range": "10.0.0.248/29", + "name": "mysubnet", + "network": "${google_compute_network.producer_net.id}", + "region": "us-central1" + } + google_network_connectivity_service_connection_policy.default: |- + { + "description": "my basic service connection policy", + "location": "us-central1", + "name": "mypolicy", + "network": "${google_compute_network.producer_net.id}", + "psc_config": [ + { + "subnetworks": [ + "${google_compute_subnetwork.producer_subnet.id}" + ] + } + ], + "service_class": "gcp-memorystore-redis" + } + - name: cluster-aof + manifest: |- + { + "authorization_mode": "AUTH_MODE_DISABLED", + "deletion_protection_enabled": true, + "depends_on": [ + "${google_network_connectivity_service_connection_policy.default}" + ], + "maintenance_policy": [ + { + "weekly_maintenance_window": [ + { + "day": "MONDAY", + "start_time": [ + { + "hours": 1, + "minutes": 0, + "nanos": 0, + "seconds": 0 + } + ] + } + ] + } + ], + "name": "aof-cluster", + "node_type": "REDIS_SHARED_CORE_NANO", + "persistence_config": [ + { + "aof_config": [ + { + "append_fsync": "EVERYSEC" + } + ], + "mode": "AOF" + } + ], + "psc_configs": [ + { + "network": "${google_compute_network.producer_net.id}" + } + ], + "redis_configs": { + "maxmemory-policy": "volatile-ttl" + }, + "region": "us-central1", + "replica_count": 0, + "shard_count": 3, + "transit_encryption_mode": "TRANSIT_ENCRYPTION_MODE_DISABLED", + "zone_distribution_config": [ + { + "mode": "MULTI_ZONE" + } + ] + } + references: + psc_configs.network: google_compute_network.producer_net.id + dependencies: + google_compute_network.producer_net: |- + { + "auto_create_subnetworks": false, + "name": "mynetwork" + } + google_compute_subnetwork.producer_subnet: |- + { + "ip_cidr_range": "10.0.0.248/29", + "name": "mysubnet", + "network": "${google_compute_network.producer_net.id}", + "region": "us-central1" + } + google_network_connectivity_service_connection_policy.default: |- + { + "description": "my basic service connection policy", + "location": "us-central1", + "name": "mypolicy", + "network": "${google_compute_network.producer_net.id}", + "psc_config": [ + { + "subnetworks": [ + "${google_compute_subnetwork.producer_subnet.id}" + ] + } + ], + "service_class": "gcp-memorystore-redis" + } argumentDocs: + APPEND_FSYNC_UNSPECIFIED: ', NO, EVERYSEC, ALWAYS.' + DAY_OF_WEEK_UNSPECIFIED: ', MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY.' + PERSISTENCE_MODE_UNSPECIFIED: ', DISABLED, RDB, AOF.' + SNAPSHOT_PERIOD_UNSPECIFIED: ', ONE_HOUR, SIX_HOURS, TWELVE_HOURS, TWENTY_FOUR_HOURS.' + aof_config.append_fsync: |- + - + (Optional) + Optional. Available fsync modes. create: '- Default is 60 minutes.' create_time: |- - The timestamp associated with the cluster creation request. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + cross_cluster_replication_config: field to the configuration file to match the latest value in the state. + cross_cluster_replication_config.CLUSTER_ROLE_UNSPECIFIED: ': This is an independent cluster that has never participated in cross cluster replication. It allows both reads and writes.' + cross_cluster_replication_config.NONE: ': This is an independent cluster that previously participated in cross cluster replication(either as a PRIMARY or SECONDARY cluster). It allows both reads and writes.' + cross_cluster_replication_config.PRIMARY: ': This cluster serves as the replication source for secondary clusters that are replicating from it. Any data written to it is automatically replicated to its secondary clusters. It allows both reads and writes.' + cross_cluster_replication_config.SECONDARY: |- + : This cluster replicates data from the primary cluster. It allows only reads. + Possible values are: CLUSTER_ROLE_UNSPECIFIED, NONE, PRIMARY, SECONDARY. + cross_cluster_replication_config.cluster_role: from SECONDARY to PRIMARY. + cross_cluster_replication_config.membership: |- + - + (Output) + An output only view of all the member clusters participating in cross cluster replication. This field is populated for all the member clusters irrespective of their cluster role. + Structure is documented below. + cross_cluster_replication_config.primary_cluster: field. + cross_cluster_replication_config.secondary_clusters: list with the new secondaries. The new secondaries are the current primary and other secondary clusters(if any). + cross_cluster_replication_config.update_time: |- + - + (Output) + The last time cross cluster replication config was updated. delete: '- Default is 30 minutes.' discovery_endpoints: |- - @@ -127071,14 +144018,88 @@ resources: is created and accessed from. Structure is documented below. id: '- an identifier for the resource with format projects/{{project}}/locations/{{region}}/clusters/{{name}}' + maintenance_policy.create_time: |- + - + (Output) + Output only. The time when the policy was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + maintenance_policy.update_time: |- + - + (Output) + Output only. The time when the policy was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + maintenance_policy.weekly_maintenance_window: |- + - + (Optional) + Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + Structure is documented below. + maintenance_schedule: |- + - + Upcoming maintenance schedule. + Structure is documented below. + maintenance_schedule.end_time: |- + - + (Output) + Output only. The end time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + maintenance_schedule.schedule_deadline_time: |- + - + (Output) + Output only. The deadline that the maintenance schedule start time + can not go beyond, including reschedule. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + maintenance_schedule.start_time: |- + - + (Output) + Output only. The start time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + membership.primary_cluster: |- + - + (Output) + Details of the primary cluster that is used as the replication source for all the secondary clusters. + Structure is documented below. + membership.secondary_clusters: |- + - + (Output) + List of secondary clusters that are replicating from the primary cluster. + Structure is documented below. name: |- - (Required) Unique name of the resource in this scope including project and location using the form: projects/{projectId}/locations/{locationId}/clusters/{clusterId} + persistence_config.aof_config: |- + - + (Optional) + AOF configuration. This field will be ignored if mode is not AOF. + Structure is documented below. + persistence_config.mode: |- + - + (Optional) + Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used. + persistence_config.rdb_config: |- + - + (Optional) + RDB configuration. This field will be ignored if mode is not RDB. + Structure is documented below. precise_size_gb: |- - Output only. Redis memory precise size in GB for the entire cluster. + primary_cluster.cluster: |- + - + (Optional) + The full resource path of the primary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} + primary_cluster.uid: |- + - + (Output) + The unique id of the primary cluster. psc_config.network: |- - (Optional) @@ -127098,6 +144119,22 @@ resources: Optional. The authorization mode of the Redis cluster. If not provided, auth feature is disabled for the cluster. Default value is AUTH_MODE_DISABLED. Possible values are: AUTH_MODE_UNSPECIFIED, AUTH_MODE_IAM_AUTH, AUTH_MODE_DISABLED. + psc_configs.cross_cluster_replication_config: |- + - + (Optional) + Cross cluster replication config + Structure is documented below. + psc_configs.deletion_protection_enabled: |- + - + (Optional) + Optional. Indicates if the cluster is deletion protected or not. + If the value if set to true, any delete cluster operation will fail. + Default value is true. + psc_configs.maintenance_policy: |- + - + (Optional) + Maintenance policy for a cluster + Structure is documented below. psc_configs.network: |- - (Required) @@ -127110,6 +144147,11 @@ resources: The nodeType for the Redis cluster. If not provided, REDIS_HIGHMEM_MEDIUM will be used as default Possible values are: REDIS_SHARED_CORE_NANO, REDIS_HIGHMEM_MEDIUM, REDIS_HIGHMEM_XLARGE, REDIS_STANDARD_SMALL. + psc_configs.persistence_config: |- + - + (Optional) + Persistence config (RDB, AOF) for the cluster. + Structure is documented below. psc_configs.project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -127163,6 +144205,24 @@ resources: - (Optional) Output only. The PSC connection id of the forwarding rule connected to the service attachment. + rdb_config.rdb_snapshot_period: |- + - + (Optional) + Optional. Available snapshot periods for scheduling. + rdb_config.rdb_snapshot_start_time: |- + - + (Optional) + The time that the first snapshot was/will be attempted, and to which + future snapshots will be aligned. + If not provided, the current time will be used. + secondary_clusters.cluster: |- + - + (Optional) + The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} + secondary_clusters.uid: |- + - + (Output) + The unique id of the secondary cluster. shard_count: |- - (Required) @@ -127170,6 +144230,24 @@ resources: size_gb: |- - Output only. Redis memory size in GB for the entire cluster. + start_time.hours: |- + - + (Optional) + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + start_time.minutes: |- + - + (Optional) + Minutes of hour of day. Must be from 0 to 59. + start_time.nanos: |- + - + (Optional) + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + start_time.seconds: |- + - + (Optional) + Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. state: |- - The current state of this cluster. Can be CREATING, READY, UPDATING, DELETING and SUSPENDED @@ -127180,7 +144258,7 @@ resources: state_info.update_info: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. state_info.update_info.target_replica_count: |- - @@ -127190,10 +144268,28 @@ resources: - (Optional) Target number of shards for redis cluster. + terraform apply: to apply the change and wait for it to complete. + terraform plan: . This should reveal a diff for the modified setting. The proposed value in the Terraform plan should align with the updated value applied to the primary cluster in the preceding step. uid: |- - System assigned, unique identifier for the cluster. update: '- Default is 120 minutes.' + weekly_maintenance_window.day: |- + - + (Required) + Required. The day of week that maintenance updates occur. + weekly_maintenance_window.duration: |- + - + (Output) + Output only. Duration of the maintenance window. + The current window is fixed at 1 hour. + A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". + weekly_maintenance_window.start_time: |- + - + (Required) + Required. Start time of the window in UTC time. + Structure is documented below. zone_distribution_config.mode: |- - (Optional) @@ -127413,6 +144509,7 @@ resources: Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. + auth_string: '- AUTH String set on the instance. This field will only be populated if auth_enabled is true.' authorized_network: |- - (Optional) @@ -127730,6 +144827,7 @@ resources: dependencies: google_project.project: |- { + "deletion_policy": "DELETE", "name": "A very important project!", "project_id": "staging-project" } @@ -127981,6 +145079,7 @@ resources: dependencies: google_folder.folder: |- { + "deletion_protection": false, "display_name": "folder-name", "parent": "organizations/123456789" } @@ -128035,6 +145134,7 @@ resources: dependencies: google_folder.folder: |- { + "deletion_protection": false, "display_name": "folder-name", "parent": "organizations/123456789" } @@ -128255,6 +145355,108 @@ resources: The supported operators are: update: '- Default is 20 minutes.' importStatements: [] + google_scc_folder_scc_big_query_export: + subCategory: Security Command Center (SCC) + description: A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + name: google_scc_folder_scc_big_query_export + title: "" + examples: + - name: custom_big_query_export_config + manifest: |- + { + "big_query_export_id": "my-export", + "dataset": "${google_bigquery_dataset.default.id}", + "description": "Cloud Security Command Center Findings Big Query Export Config", + "filter": "state=\"ACTIVE\" AND NOT mute=\"MUTED\"", + "folder": "${google_folder.folder.folder_id}" + } + references: + dataset: google_bigquery_dataset.default.id + folder: google_folder.folder.folder_id + dependencies: + google_bigquery_dataset.default: |- + { + "dataset_id": "my_dataset_id", + "default_partition_expiration_ms": null, + "default_table_expiration_ms": 3600000, + "description": "This is a test description", + "friendly_name": "test", + "labels": { + "env": "default" + }, + "lifecycle": [ + { + "ignore_changes": [ + "${default_partition_expiration_ms}" + ] + } + ], + "location": "US" + } + google_folder.folder: |- + { + "deletion_protection": false, + "display_name": "folder-name", + "parent": "organizations/123456789" + } + argumentDocs: + big_query_export_id: |- + - + (Required) + This must be unique within the organization. + create: '- Default is 20 minutes.' + create_time: |- + - + The time at which the BigQuery export was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + dataset: |- + - + (Required) + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + delete: '- Default is 20 minutes.' + description: |- + - + (Required) + The description of the export (max of 1024 characters). + filter: |- + - + (Required) + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + folder: |- + - + (Required) + The folder where Cloud Security Command Center Big Query Export + Config lives in. + id: '- an identifier for the resource with format folders/{{folder}}/bigQueryExports/{{big_query_export_id}}' + most_recent_editor: |- + - + Email address of the user who last edited the BigQuery export. + name: |- + - + The resource name of this export, in the format + projects/{{project}}/bigQueryExports/{{big_query_export_id}}. + This field is provided in responses, and is ignored when provided in create requests. + principal: |- + - + The service account that needs permission to create table and upload data to the BigQuery dataset. + update: '- Default is 20 minutes.' + update_time: |- + - + The most recent time at which the BigQuery export was updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + importStatements: [] google_scc_management_folder_security_health_analytics_custom_module: subCategory: Security Command Center Management (SCC) description: Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. @@ -128293,6 +145495,7 @@ resources: dependencies: google_folder.folder: |- { + "deletion_protection": false, "display_name": "folder-name", "parent": "organizations/123456789" } @@ -128348,6 +145551,7 @@ resources: dependencies: google_folder.folder: |- { + "deletion_protection": false, "display_name": "folder-name", "parent": "organizations/123456789" } @@ -129340,6 +146544,103 @@ resources: Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. importStatements: [] + google_scc_organization_scc_big_query_export: + subCategory: Security Command Center (SCC) + description: A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + name: google_scc_organization_scc_big_query_export + title: "" + examples: + - name: custom_big_query_export_config + manifest: |- + { + "big_query_export_id": "my-export", + "dataset": "${google_bigquery_dataset.default.id}", + "description": "Cloud Security Command Center Findings Big Query Export Config", + "filter": "state=\"ACTIVE\" AND NOT mute=\"MUTED\"", + "name": "my-export", + "organization": "123456789" + } + references: + dataset: google_bigquery_dataset.default.id + dependencies: + google_bigquery_dataset.default: |- + { + "dataset_id": "", + "default_partition_expiration_ms": null, + "default_table_expiration_ms": 3600000, + "description": "This is a test description", + "friendly_name": "test", + "labels": { + "env": "default" + }, + "lifecycle": [ + { + "ignore_changes": [ + "${default_partition_expiration_ms}" + ] + } + ], + "location": "US" + } + argumentDocs: + big_query_export_id: |- + - + (Required) + This must be unique within the organization. + create: '- Default is 20 minutes.' + create_time: |- + - + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + dataset: |- + - + (Optional) + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + The description of the notification config (max of 1024 characters). + filter: |- + - + (Optional) + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + id: '- an identifier for the resource with format organizations/{{organization}}/bigQueryExports/{{big_query_export_id}}' + most_recent_editor: |- + - + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + name: |- + - + The resource name of this export, in the format + organizations/{{organization}}/bigQueryExports/{{big_query_export_id}}. + This field is provided in responses, and is ignored when provided in create requests. + organization: |- + - + (Required) + The organization whose Cloud Security Command Center the Big Query Export + Config lives in. + principal: |- + - + The service account that needs permission to create table and upload data to the BigQuery dataset. + update: '- Default is 20 minutes.' + update_time: |- + - + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + importStatements: [] google_scc_project_custom_module: subCategory: Security Command Center (SCC) description: Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. @@ -129624,6 +146925,101 @@ resources: If it is not provided, the provider project is used. update: '- Default is 20 minutes.' importStatements: [] + google_scc_project_scc_big_query_export: + subCategory: Security Command Center (SCC) + description: A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + name: google_scc_project_scc_big_query_export + title: "" + examples: + - name: custom_big_query_export_config + manifest: |- + { + "big_query_export_id": "my-export", + "dataset": "${google_bigquery_dataset.default.id}", + "description": "Cloud Security Command Center Findings Big Query Export Config", + "filter": "state=\"ACTIVE\" AND NOT mute=\"MUTED\"", + "name": "my-export", + "project": "my-project-name" + } + references: + dataset: google_bigquery_dataset.default.id + dependencies: + google_bigquery_dataset.default: |- + { + "dataset_id": "my_dataset_id", + "default_partition_expiration_ms": null, + "default_table_expiration_ms": 3600000, + "description": "This is a test description", + "friendly_name": "test", + "labels": { + "env": "default" + }, + "lifecycle": [ + { + "ignore_changes": [ + "${default_partition_expiration_ms}" + ] + } + ], + "location": "US" + } + argumentDocs: + big_query_export_id: |- + - + (Required) + This must be unique within the organization. + create: '- Default is 20 minutes.' + create_time: |- + - + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + dataset: |- + - + (Optional) + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + The description of the notification config (max of 1024 characters). + filter: |- + - + (Optional) + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + id: '- an identifier for the resource with format projects/{{project}}/bigQueryExports/{{big_query_export_id}}' + most_recent_editor: |- + - + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + name: |- + - + The resource name of this export, in the format + projects/{{project}}/bigQueryExports/{{big_query_export_id}}. + This field is provided in responses, and is ignored when provided in create requests. + principal: |- + - + The service account that needs permission to create table and upload data to the BigQuery dataset. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + update: '- Default is 20 minutes.' + update_time: |- + - + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + importStatements: [] google_scc_source: subCategory: Security Command Center (SCC) description: A Cloud Security Command Center's (Cloud SCC) finding source. @@ -129860,6 +147256,116 @@ resources: Location ID of the parent organization. If not provided, 'global' will be used as the default location. update: '- Default is 20 minutes.' importStatements: [] + google_scc_v2_folder_scc_big_query_export: + subCategory: Security Command Center (SCC)v2 API + description: A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + name: google_scc_v2_folder_scc_big_query_export + title: "" + examples: + - name: custom_big_query_export_config + manifest: |- + { + "big_query_export_id": "my-export", + "dataset": "${google_bigquery_dataset.default.id}", + "description": "Cloud Security Command Center Findings Big Query Export Config", + "filter": "state=\"ACTIVE\" AND NOT mute=\"MUTED\"", + "folder": "${google_folder.folder.folder_id}", + "location": "global" + } + references: + dataset: google_bigquery_dataset.default.id + folder: google_folder.folder.folder_id + dependencies: + google_bigquery_dataset.default: |- + { + "dataset_id": "my_dataset_id", + "default_partition_expiration_ms": null, + "default_table_expiration_ms": 3600000, + "description": "This is a test description", + "friendly_name": "test", + "labels": { + "env": "default" + }, + "lifecycle": [ + { + "ignore_changes": [ + "${default_partition_expiration_ms}" + ] + } + ], + "location": "US" + } + google_folder.folder: |- + { + "deletion_protection": false, + "display_name": "folder-name", + "parent": "organizations/123456789" + } + argumentDocs: + big_query_export_id: |- + - + (Required) + This must be unique within the organization. It must consist of only lowercase letters, + numbers, and hyphens, must start with a letter, must end with either a letter or a number, + and must be 63 characters or less. + create: '- Default is 20 minutes.' + create_time: |- + - + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + dataset: |- + - + (Optional) + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + The description of the notification config (max of 1024 characters). + filter: |- + - + (Optional) + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + folder: |- + - + (Required) + The folder where Cloud Security Command Center Big Query Export + Config lives in. + id: '- an identifier for the resource with format folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' + location: |- + - + (Optional) + The BigQuery export configuration is stored in this location. If not provided, Use global as default. + most_recent_editor: |- + - + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + name: |- + - + The resource name of this export, in the format + folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}. + This field is provided in responses, and is ignored when provided in create requests. + principal: |- + - + The service account that needs permission to create table and upload data to the BigQuery dataset. + update: '- Default is 20 minutes.' + update_time: |- + - + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + importStatements: [] google_scc_v2_organization_mute_config: subCategory: Security Command Center (SCC)v2 API description: Mute Findings is a volume management feature in Security Command Center that lets you manually or programmatically hide irrelevant findings, and create filters to automatically silence existing and future findings based on criteria you specify. @@ -130011,6 +147517,109 @@ resources: location Id is provided by organization. If not provided, Use global as default. update: '- Default is 20 minutes.' importStatements: [] + google_scc_v2_organization_scc_big_query_export: + subCategory: Security Command Center (SCC)v2 API + description: A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + name: google_scc_v2_organization_scc_big_query_export + title: "" + examples: + - name: custom_big_query_export_config + manifest: |- + { + "big_query_export_id": "my-export", + "dataset": "${google_bigquery_dataset.default.id}", + "description": "Cloud Security Command Center Findings Big Query Export Config", + "filter": "state=\"ACTIVE\" AND NOT mute=\"MUTED\"", + "location": "global", + "name": "my-export", + "organization": "123456789" + } + references: + dataset: google_bigquery_dataset.default.id + dependencies: + google_bigquery_dataset.default: |- + { + "dataset_id": "", + "default_partition_expiration_ms": null, + "default_table_expiration_ms": 3600000, + "description": "This is a test description", + "friendly_name": "test", + "labels": { + "env": "default" + }, + "lifecycle": [ + { + "ignore_changes": [ + "${default_partition_expiration_ms}" + ] + } + ], + "location": "US" + } + argumentDocs: + big_query_export_id: |- + - + (Required) + This must be unique within the organization. + create: '- Default is 20 minutes.' + create_time: |- + - + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + dataset: |- + - + (Optional) + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + The description of the notification config (max of 1024 characters). + filter: |- + - + (Optional) + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + id: '- an identifier for the resource with format organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' + location: |- + - + (Optional) + location Id is provided by organization. If not provided, Use global as default. + most_recent_editor: |- + - + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + name: |- + - + (Optional) + The resource name of this export, in the format + organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}. + This field is provided in responses, and is ignored when provided in create requests. + organization: |- + - + (Required) + The organization whose Cloud Security Command Center the Big Query Export + Config lives in. + principal: |- + - + The service account that needs permission to create table and upload data to the BigQuery dataset. + update: '- Default is 20 minutes.' + update_time: |- + - + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + importStatements: [] google_scc_v2_organization_scc_big_query_exports: subCategory: Security Command Center (SCC)v2 API description: A Cloud Security Command Center (Cloud SCC) Big Query Export Config. @@ -130021,17 +147630,19 @@ resources: manifest: |- { "big_query_export_id": "my-export", - "dataset": "my-dataset", + "dataset": "${google_bigquery_dataset.default.id}", "description": "Cloud Security Command Center Findings Big Query Export Config", "filter": "state=\"ACTIVE\" AND NOT mute=\"MUTED\"", "location": "global", "name": "my-export", "organization": "123456789" } + references: + dataset: google_bigquery_dataset.default.id dependencies: google_bigquery_dataset.default: |- { - "dataset_id": "my_dataset_id", + "dataset_id": "", "default_partition_expiration_ms": null, "default_table_expiration_ms": 3600000, "description": "This is a test description", @@ -130189,7 +147800,7 @@ resources: "filter": "severity = \"HIGH\"", "location": "global", "mute_config_id": "my-config", - "project": "", + "project": "my-project-name", "type": "STATIC" } argumentDocs: @@ -130323,6 +147934,540 @@ resources: "projects/[project_id]/topics/[topic]". update: '- Default is 20 minutes.' importStatements: [] + google_scc_v2_project_scc_big_query_export: + subCategory: Security Command Center (SCC)v2 API + description: A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + name: google_scc_v2_project_scc_big_query_export + title: "" + examples: + - name: custom_big_query_export_config + manifest: |- + { + "big_query_export_id": "my-export", + "dataset": "${google_bigquery_dataset.default.id}", + "description": "Cloud Security Command Center Findings Big Query Export Config", + "filter": "state=\"ACTIVE\" AND NOT mute=\"MUTED\"", + "location": "global", + "name": "my-export", + "project": "my-project-name" + } + references: + dataset: google_bigquery_dataset.default.id + dependencies: + google_bigquery_dataset.default: |- + { + "dataset_id": "my_dataset_id", + "default_partition_expiration_ms": null, + "default_table_expiration_ms": 3600000, + "description": "This is a test description", + "friendly_name": "test", + "labels": { + "env": "default" + }, + "lifecycle": [ + { + "ignore_changes": [ + "${default_partition_expiration_ms}" + ] + } + ], + "location": "US" + } + argumentDocs: + big_query_export_id: |- + - + (Required) + This must be unique within the organization. + create: '- Default is 20 minutes.' + create_time: |- + - + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + dataset: |- + - + (Optional) + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + delete: '- Default is 20 minutes.' + description: |- + - + (Optional) + The description of the notification config (max of 1024 characters). + filter: |- + - + (Optional) + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' + location: |- + - + (Optional) + location Id is provided by organization. If not provided, Use global as default. + most_recent_editor: |- + - + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + name: |- + - + The resource name of this export, in the format + projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}. + This field is provided in responses, and is ignored when provided in create requests. + principal: |- + - + The service account that needs permission to create table and upload data to the BigQuery dataset. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + update: '- Default is 20 minutes.' + update_time: |- + - + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + importStatements: [] + google_secret_manager_regional_secret: + subCategory: Secret Manager + description: A Regional Secret is a logical secret whose value and versions can be created and accessed within a region only. + name: google_secret_manager_regional_secret + title: "" + examples: + - name: regional-secret-basic + manifest: |- + { + "annotations": { + "key1": "value1", + "key2": "value2", + "key3": "value3" + }, + "labels": { + "label": "my-label" + }, + "location": "us-central1", + "secret_id": "tf-reg-secret" + } + - name: regional-secret-with-cmek + manifest: |- + { + "customer_managed_encryption": [ + { + "kms_key_name": "kms-key" + } + ], + "depends_on": [ + "${google_kms_crypto_key_iam_member.kms-secret-binding}" + ], + "location": "us-central1", + "secret_id": "tf-reg-secret" + } + dependencies: + google_kms_crypto_key_iam_member.kms-secret-binding: |- + { + "crypto_key_id": "kms-key", + "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com", + "role": "roles/cloudkms.cryptoKeyEncrypterDecrypter" + } + - name: regional-secret-with-rotation + manifest: |- + { + "depends_on": [ + "${google_pubsub_topic_iam_member.secrets_manager_access}" + ], + "location": "us-central1", + "rotation": [ + { + "next_rotation_time": "2045-11-30T00:00:00Z", + "rotation_period": "3600s" + } + ], + "secret_id": "tf-reg-secret", + "topics": [ + { + "name": "${google_pubsub_topic.topic.id}" + } + ] + } + references: + topics.name: google_pubsub_topic.topic.id + dependencies: + google_pubsub_topic.topic: |- + { + "name": "tf-topic" + } + google_pubsub_topic_iam_member.secrets_manager_access: |- + { + "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com", + "role": "roles/pubsub.publisher", + "topic": "${google_pubsub_topic.topic.name}" + } + - name: regional-secret-with-ttl + manifest: |- + { + "annotations": { + "key1": "value1", + "key2": "value2", + "key3": "value3" + }, + "labels": { + "label": "my-label" + }, + "location": "us-central1", + "secret_id": "tf-reg-secret", + "ttl": "36000s" + } + - name: regional-secret-with-expire-time + manifest: |- + { + "annotations": { + "key1": "value1", + "key2": "value2", + "key3": "value3" + }, + "expire_time": "2055-11-30T00:00:00Z", + "labels": { + "label": "my-label" + }, + "location": "us-central1", + "secret_id": "tf-reg-secret" + } + - name: regional-secret-with-version-destroy-ttl + manifest: |- + { + "annotations": { + "key1": "value1", + "key2": "value2", + "key3": "value3" + }, + "labels": { + "label": "my-label" + }, + "location": "us-central1", + "secret_id": "tf-reg-secret", + "version_destroy_ttl": "86400s" + } + argumentDocs: + annotations: |- + - + (Optional) + Custom metadata about the regional secret. + Annotations are distinct from various forms of labels. Annotations exist to allow + client tools to store their own state information without requiring a database. + Annotation keys must be between 1 and 63 characters long, have a UTF-8 encoding of + maximum 128 bytes, begin and end with an alphanumeric character ([a-z0-9A-Z]), and + may have dashes (-), underscores (_), dots (.), and alphanumerics in between these + symbols. + The total size of annotation keys and values must be less than 16KiB. + An object containing a list of "key": value pairs. Example: + { "name": "wrench", "mass": "1.3kg", "count": "3" }. + create: '- Default is 20 minutes.' + create_time: |- + - + The time at which the regional secret was created. + customer_managed_encryption: |- + - + (Optional) + The customer-managed encryption configuration of the regional secret. + Structure is documented below. + customer_managed_encryption.kms_key_name: |- + - + (Required) + The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads. + delete: '- Default is 20 minutes.' + effective_annotations: for all of the annotations present on the resource. + effective_labels: for all of the labels present on the resource. + expire_time: |- + - + (Optional) + Timestamp in UTC when the regional secret is scheduled to expire. This is always provided on + output, regardless of what was sent on input. A timestamp in RFC3339 UTC "Zulu" format, with + nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and + "2014-10-02T15:01:23.045123456Z". Only one of expire_time or ttl can be provided. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}' + labels: |- + - + (Optional) + The labels assigned to this regional secret. + Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, + and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} + Label values must be between 0 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, + and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} + No more than 64 labels can be assigned to a given resource. + An object containing a list of "key": value pairs. Example: + { "name": "wrench", "mass": "1.3kg", "count": "3" }. + location: |- + - + (Required) + The location of the regional secret. eg us-central1 + name: |- + - + The resource name of the regional secret. Format: + projects/{{project}}/locations/{{location}}/secrets/{{secret_id}} + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + rotation: |- + - + (Optional) + The rotation time and period for a regional secret. At next_rotation_time, Secret Manager + will send a Pub/Sub notification to the topics configured on the Secret. topics must be + set to configure rotation. + Structure is documented below. + rotation.next_rotation_time: |- + - + (Optional) + Timestamp in UTC at which the Secret is scheduled to rotate. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine + fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + rotation.rotation_period: |- + - + (Optional) + The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) + and at most 3153600000s (100 years). If rotationPeriod is set, next_rotation_time must + be set. next_rotation_time will be advanced by this period when the service + automatically sends rotation notifications. + secret_id: |- + - + (Required) + This must be unique within the project. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + topics: |- + - + (Optional) + A list of up to 10 Pub/Sub topics to which messages are published when control plane + operations are called on the regional secret or its versions. + Structure is documented below. + topics.name: |- + - + (Required) + The resource name of the Pub/Sub topic that will be published to, in the following format: + projects//topics/. For publication to succeed, the Secret Manager Service + Agent service account must have pubsub.publisher permissions on the topic. + ttl: |- + - + (Optional) + The TTL for the regional secret. A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". Only one of ttl or expire_time can be provided. + update: '- Default is 20 minutes.' + version_aliases: |- + - + (Optional) + Mapping from version alias to version name. + A version alias is a string with a maximum length of 63 characters and can contain + uppercase and lowercase letters, numerals, and the hyphen (-) and underscore ('_') + characters. An alias string must start with a letter and cannot be the string + 'latest' or 'NEW'. No more than 50 aliases can be assigned to a given secret. + An object containing a list of "key": value pairs. Example: + { "name": "wrench", "mass": "1.3kg", "count": "3" }. + version_destroy_ttl: |- + - + (Optional) + Secret Version TTL after destruction request. + This is a part of the delayed delete feature on Secret Version. + For secret with versionDestroyTtl>0, version destruction doesn't happen immediately + on calling destroy instead the version goes to a disabled state and + the actual destruction happens after this TTL expires. It must be atleast 24h. + importStatements: [] + google_secret_manager_regional_secret_iam_policy: + subCategory: Secret Manager + description: Collection of resources to manage IAM policy for Secret Manager RegionalSecret + name: google_secret_manager_regional_secret_iam_policy + title: "" + examples: + - name: policy + manifest: |- + { + "location": "${google_secret_manager_regional_secret.regional-secret-basic.location}", + "policy_data": "${data.google_iam_policy.admin.policy_data}", + "project": "${google_secret_manager_regional_secret.regional-secret-basic.project}", + "secret_id": "${google_secret_manager_regional_secret.regional-secret-basic.secret_id}" + } + references: + location: google_secret_manager_regional_secret.regional-secret-basic.location + policy_data: data.google_iam_policy.admin.policy_data + project: google_secret_manager_regional_secret.regional-secret-basic.project + secret_id: google_secret_manager_regional_secret.regional-secret-basic.secret_id + - name: policy + manifest: |- + { + "location": "${google_secret_manager_regional_secret.regional-secret-basic.location}", + "policy_data": "${data.google_iam_policy.admin.policy_data}", + "project": "${google_secret_manager_regional_secret.regional-secret-basic.project}", + "secret_id": "${google_secret_manager_regional_secret.regional-secret-basic.secret_id}" + } + references: + location: google_secret_manager_regional_secret.regional-secret-basic.location + policy_data: data.google_iam_policy.admin.policy_data + project: google_secret_manager_regional_secret.regional-secret-basic.project + secret_id: google_secret_manager_regional_secret.regional-secret-basic.secret_id + argumentDocs: + condition: |- + - (Optional) An IAM Condition for a given binding. + Structure is documented below. + condition.description: '- (Optional) An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.' + condition.expression: '- (Required) Textual representation of an expression in Common Expression Language syntax.' + condition.title: '- (Required) A title for the expression, i.e. a short string describing its purpose.' + etag: '- (Computed) The etag of the IAM policy.' + google_secret_manager_regional_secret_iam_binding: ': Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the regionalsecret are preserved.' + google_secret_manager_regional_secret_iam_member: ': Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the regionalsecret are preserved.' + google_secret_manager_regional_secret_iam_policy: ': Authoritative. Sets the IAM policy for the regionalsecret and replaces any existing policy already attached.' + location: |- + - (Optional) The location of the regional secret. eg us-central1 + Used to find the parent resource to bind the IAM policy to. If not specified, + the value will be parsed from the identifier of the parent resource. If no location is provided in the parent identifier and no + location is specified, it is taken from the provider configuration. + member/members: |- + - (Required) Identities that will be granted the privilege in role. + Each entry can have one of the following values: + policy_data: |- + - (Required only by google_secret_manager_regional_secret_iam_policy) The policy data generated by + a google_iam_policy data source. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + role: |- + - (Required) The role that should be applied. Only one + google_secret_manager_regional_secret_iam_binding can be used per role. Note that custom roles must be of the format + [projects|organizations]/{parent-name}/roles/{role-name}. + importStatements: [] + google_secret_manager_regional_secret_version: + subCategory: Secret Manager + description: A regional secret version resource. + name: google_secret_manager_regional_secret_version + title: "" + examples: + - name: regional_secret_version_basic + manifest: |- + { + "secret": "${google_secret_manager_regional_secret.secret-basic.id}", + "secret_data": "secret-data" + } + references: + secret: google_secret_manager_regional_secret.secret-basic.id + dependencies: + google_secret_manager_regional_secret.secret-basic: |- + { + "location": "us-central1", + "secret_id": "secret-version" + } + - name: regional_secret_version_base64 + manifest: |- + { + "is_secret_data_base64": true, + "secret": "${google_secret_manager_regional_secret.secret-basic.id}", + "secret_data": "${filebase64(\"secret-data.pfx\")}" + } + references: + secret: google_secret_manager_regional_secret.secret-basic.id + dependencies: + google_secret_manager_regional_secret.secret-basic: |- + { + "location": "us-central1", + "secret_id": "secret-version" + } + - name: regional_secret_version_disabled + manifest: |- + { + "enabled": false, + "secret": "${google_secret_manager_regional_secret.secret-basic.id}", + "secret_data": "secret-data" + } + references: + secret: google_secret_manager_regional_secret.secret-basic.id + dependencies: + google_secret_manager_regional_secret.secret-basic: |- + { + "location": "us-central1", + "secret_id": "secret-version" + } + - name: regional_secret_version_deletion_policy + manifest: |- + { + "deletion_policy": "ABANDON", + "secret": "${google_secret_manager_regional_secret.secret-basic.id}", + "secret_data": "secret-data" + } + references: + secret: google_secret_manager_regional_secret.secret-basic.id + dependencies: + google_secret_manager_regional_secret.secret-basic: |- + { + "location": "us-central1", + "secret_id": "secret-version" + } + - name: regional_secret_version_deletion_policy + manifest: |- + { + "deletion_policy": "DISABLE", + "secret": "${google_secret_manager_regional_secret.secret-basic.id}", + "secret_data": "secret-data" + } + references: + secret: google_secret_manager_regional_secret.secret-basic.id + dependencies: + google_secret_manager_regional_secret.secret-basic: |- + { + "location": "us-central1", + "secret_id": "secret-version" + } + argumentDocs: + create: '- Default is 20 minutes.' + create_time: |- + - + The time at which the regional secret version was created. + customer_managed_encryption: |- + - + The customer-managed encryption configuration of the regional secret. + Structure is documented below. + customer_managed_encryption.kms_key_version_name: |- + - + (Output) + The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads. + delete: '- Default is 20 minutes.' + deletion_policy: |- + - (Optional) The deletion policy for the regional secret version. Setting ABANDON allows the resource + to be abandoned rather than deleted. Setting DISABLE allows the resource to be + disabled rather than deleted. Default is DELETE. Possible values are: + destroy_time: |- + - + The time at which the regional secret version was destroyed. Only present if state is DESTROYED. + enabled: |- + - + (Optional) + The current state of the regional secret version. + id: '- an identifier for the resource with format {{name}}' + is_secret_data_base64: '- (Optional) If set to ''true'', the secret data is expected to be base64-encoded string and would be sent as is.' + location: |- + - + Location of Secret Manager regional secret resource. + name: |- + - + The resource name of the regional secret version. Format: + projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}/versions/{{version}} + secret: |- + - + (Required) + Secret Manager regional secret resource. + secret_data: |- + - + (Required) + The secret data. Must be no larger than 64KiB. + Note: This property is sensitive and will not be displayed in the plan. + update: '- Default is 20 minutes.' + version: |- + - + The version of the Regional Secret. + importStatements: [] google_secret_manager_secret: subCategory: Secret Manager description: A Secret is a logical secret whose value and versions can be accessed. @@ -130772,6 +148917,150 @@ resources: - The version of the Secret. importStatements: [] + google_secure_source_manager_branch_rule: + subCategory: Secure Source Manager + description: BranchRule is the protection rule to enforce pre-defined rules on designated branches within a repository. + name: google_secure_source_manager_branch_rule + title: "" + examples: + - name: basic + manifest: |- + { + "branch_rule_id": "my-basic-branchrule", + "include_pattern": "main", + "location": "${google_secure_source_manager_repository.repository.location}", + "repository_id": "${google_secure_source_manager_repository.repository.repository_id}" + } + references: + location: google_secure_source_manager_repository.repository.location + repository_id: google_secure_source_manager_repository.repository.repository_id + dependencies: + google_secure_source_manager_instance.instance: |- + { + "instance_id": "my-basic-instance", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], + "location": "us-central1" + } + google_secure_source_manager_repository.repository: |- + { + "instance": "${google_secure_source_manager_instance.instance.name}", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], + "location": "${google_secure_source_manager_instance.instance.location}", + "repository_id": "my-basic-repository" + } + - name: default + manifest: |- + { + "allow_stale_reviews": false, + "branch_rule_id": "my-initial-branchrule", + "disabled": false, + "include_pattern": "test", + "location": "${google_secure_source_manager_repository.repository.location}", + "minimum_approvals_count": 2, + "minimum_reviews_count": 2, + "repository_id": "${google_secure_source_manager_repository.repository.repository_id}", + "require_comments_resolved": true, + "require_linear_history": true, + "require_pull_request": true + } + references: + location: google_secure_source_manager_repository.repository.location + repository_id: google_secure_source_manager_repository.repository.repository_id + dependencies: + google_secure_source_manager_instance.instance: |- + { + "instance_id": "my-initial-instance", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], + "location": "us-central1" + } + google_secure_source_manager_repository.repository: |- + { + "instance": "${google_secure_source_manager_instance.instance.name}", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], + "location": "${google_secure_source_manager_instance.instance.location}", + "repository_id": "my-initial-repository" + } + argumentDocs: + allow_stale_reviews: |- + - + (Optional) + Determines if allow stale reviews or approvals before merging to the branch. + branch_rule_id: |- + - + (Required) + The ID for the BranchRule. + create: '- Default is 20 minutes.' + create_time: |- + - + Time the BranchRule was created in UTC. + delete: '- Default is 20 minutes.' + disabled: |- + - + (Optional) + Determines if the branch rule is disabled or not. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/branchRules/{{branch_rule_id}}' + include_pattern: |- + - + (Required) + The BranchRule matches branches based on the specified regular expression. Use .* to match all branches. + location: |- + - + (Required) + The location for the Repository. + minimum_approvals_count: |- + - + (Optional) + The minimum number of approvals required for the branch rule to be matched. + minimum_reviews_count: |- + - + (Optional) + The minimum number of reviews required for the branch rule to be matched. + name: |- + - + The resource name for the BranchRule. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + repository_id: |- + - + (Required) + The ID for the Repository. + require_comments_resolved: |- + - + (Optional) + Determines if require comments resolved before merging to the branch. + require_linear_history: |- + - + (Optional) + Determines if require linear history before merging to the branch. + require_pull_request: |- + - + (Optional) + Determines if the branch rule requires a pull request or not. + uid: |- + - + Unique identifier of the BranchRule. + update: '- Default is 20 minutes.' + update_time: |- + - + Time the BranchRule was updated in UTC. + importStatements: [] google_secure_source_manager_instance: subCategory: Secure Source Manager description: Instances are deployed to an available Google Cloud region and are accessible via their web interface. @@ -130785,6 +149074,11 @@ resources: "labels": { "foo": "bar" }, + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], "location": "us-central1" } - name: default @@ -130794,28 +149088,21 @@ resources: "${google_kms_crypto_key_iam_member.crypto_key_binding}" ], "instance_id": "my-instance", - "kms_key": "${google_kms_crypto_key.crypto_key.id}", + "kms_key": "my-key", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], "location": "us-central1" } - references: - kms_key: google_kms_crypto_key.crypto_key.id dependencies: - google_kms_crypto_key.crypto_key: |- - { - "key_ring": "${google_kms_key_ring.key_ring.id}", - "name": "my-key" - } google_kms_crypto_key_iam_member.crypto_key_binding: |- { - "crypto_key_id": "${google_kms_crypto_key.crypto_key.id}", + "crypto_key_id": "my-key", "member": "serviceAccount:service-${data.google_project.project.number}@gcp-sa-sourcemanager.iam.gserviceaccount.com", "role": "roles/cloudkms.cryptoKeyEncrypterDecrypter" } - google_kms_key_ring.key_ring: |- - { - "location": "us-central1", - "name": "my-keyring" - } - name: default manifest: |- { @@ -130824,6 +149111,11 @@ resources: "${time_sleep.wait_120_seconds}" ], "instance_id": "my-instance", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], "location": "us-central1", "private_config": [ { @@ -130922,6 +149214,11 @@ resources: "${time_sleep.wait_120_seconds}" ], "instance_id": "my-instance", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], "location": "us-central1", "private_config": [ { @@ -131131,6 +149428,11 @@ resources: "${time_sleep.wait_120_seconds}" ], "instance_id": "my-instance", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], "location": "us-central1", "private_config": [ { @@ -131296,6 +149598,22 @@ resources: "${google_privateca_ca_pool_iam_binding.ca_pool_binding}" ] } + - name: default + manifest: |- + { + "instance_id": "my-instance", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], + "location": "us-central1", + "workforce_identity_federation_config": [ + { + "enabled": true + } + ] + } argumentDocs: create: '- Default is 60 minutes.' create_time: |- @@ -131381,6 +149699,16 @@ resources: update_time: |- - Time the Instance was updated in UTC. + workforce_identity_federation_config: |- + - + (Optional) + Configuration for Workforce Identity Federation to support third party identity provider. + If unset, defaults to the Google OIDC IdP. + Structure is documented below. + workforce_identity_federation_config.enabled: |- + - + (Required) + 'Whether Workforce Identity Federation is enabled.' importStatements: [] google_secure_source_manager_instance_iam_policy: subCategory: Secure Source Manager @@ -131438,6 +149766,11 @@ resources: manifest: |- { "instance": "${google_secure_source_manager_instance.instance.name}", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], "location": "us-central1", "repository_id": "my-repository" } @@ -131447,6 +149780,11 @@ resources: google_secure_source_manager_instance.instance: |- { "instance_id": "my-instance", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], "location": "us-central1" } - name: default @@ -131464,6 +149802,11 @@ resources: } ], "instance": "${google_secure_source_manager_instance.instance.name}", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], "location": "us-central1", "repository_id": "my-repository" } @@ -131473,6 +149816,11 @@ resources: google_secure_source_manager_instance.instance: |- { "instance_id": "my-instance", + "lifecycle": [ + { + "prevent_destroy": "true" + } + ], "location": "us-central1" } argumentDocs: @@ -132744,7 +151092,7 @@ resources: importStatements: [] google_service_networking_vpc_service_controls: subCategory: Service Networking - description: Manages the VPC Service Controls configuration for a service networking connection + description: 'Manages the VPC Service Controls configuration for a service networking connection When enabled, Google Cloud makes the following route configuration changes in the service producer VPC network:' name: google_service_networking_vpc_service_controls title: "" examples: @@ -132829,6 +151177,7 @@ resources: dependencies: google_project.my_project: |- { + "deletion_policy": "DELETE", "name": "tf-test-project", "org_id": "123456789", "project_id": "quota", @@ -132854,6 +151203,7 @@ resources: dependencies: google_project.my_project: |- { + "deletion_policy": "DELETE", "name": "tf-test-project", "org_id": "123456789", "project_id": "quota", @@ -132879,6 +151229,7 @@ resources: dependencies: google_project.my_project: |- { + "deletion_policy": "DELETE", "name": "tf-test-project", "org_id": "123456789", "project_id": "quota", @@ -132923,6 +151274,130 @@ resources: The service that the metrics belong to, e.g. compute.googleapis.com. update: '- Default is 20 minutes.' importStatements: [] + google_site_verification_owner: + subCategory: Site Verification + description: Manages additional owners on verified web resources. + name: google_site_verification_owner + title: "" + examples: + - name: example + manifest: |- + { + "email": "user@example.com", + "web_resource_id": "${google_site_verification_web_resource.example.id}" + } + references: + web_resource_id: google_site_verification_web_resource.example.id + dependencies: + google_site_verification_web_resource.example: |- + { + "site": [ + { + "identifier": "${data.google_site_verification_token.token.identifier}", + "type": "${data.google_site_verification_token.token.type}" + } + ], + "verification_method": "${data.google_site_verification_token.token.verification_method}" + } + google_storage_bucket.bucket: |- + { + "location": "US", + "name": "example-storage-bucket" + } + google_storage_bucket_object.object: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "content": "google-site-verification: ${data.google_site_verification_token.token.token}", + "name": "${data.google_site_verification_token.token.token}" + } + google_storage_object_access_control.public_rule: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "entity": "allUsers", + "object": "${google_storage_bucket_object.object.name}", + "role": "READER" + } + argumentDocs: + create: '- Default is 20 minutes.' + delete: '- Default is 20 minutes.' + email: |- + - + (Required) + The email of the user to be added as an owner. + web_resource_id: |- + - + (Required) + The id of of the web resource to which the owner will be added, in the form webResource/, + such as webResource/https://www.example.com/ + importStatements: [] + google_site_verification_web_resource: + subCategory: Site Verification + description: A web resource is a website or domain with verified ownership. + name: google_site_verification_web_resource + title: "" + examples: + - name: example + manifest: |- + { + "depends_on": [ + "${google_dns_record_set.example}" + ], + "site": [ + { + "identifier": "${data.google_site_verification_token.token.identifier}", + "type": "${data.google_site_verification_token.token.type}" + } + ], + "verification_method": "${data.google_site_verification_token.token.verification_method}" + } + references: + site.identifier: data.google_site_verification_token.token.identifier + site.type: data.google_site_verification_token.token.type + verification_method: data.google_site_verification_token.token.verification_method + dependencies: + google_dns_record_set.example: |- + { + "managed_zone": "example.com", + "name": "www.example.com.", + "rrdatas": [ + "${data.google_site_verification_token.token.token}" + ], + "ttl": 86400, + "type": "TXT" + } + argumentDocs: + create: '- Default is 60 minutes.' + delete: '- Default is 20 minutes.' + id: '- an identifier for the resource with format webResource/{{web_resource_id}}' + owners: |- + - + The email addresses of all direct, verified owners of this exact property. Indirect owners — + for example verified owners of the containing domain—are not included in this list. + site: |- + - + (Required) + Container for the address and type of a site for which a verification token will be verified. + Structure is documented below. + site.identifier: |- + - + (Required) + The site identifier. If the type is set to SITE, the identifier is a URL. If the type is + set to INET_DOMAIN, the identifier is a domain name. + site.type: |- + - + (Required) + The type of resource to be verified. + Possible values are: INET_DOMAIN, SITE. + verification_method: |- + - + (Required) + The verification method for the Site Verification system to use to verify + this site or domain. + Possible values are: ANALYTICS, DNS_CNAME, DNS_TXT, FILE, META, TAG_MANAGER. + web_resource_id: |- + - + The string used to identify this web resource. + importStatements: [] google_sourcerepo_repository: subCategory: Cloud Source Repositories description: A repository (or repo) is a Git repository storing versioned source content. @@ -132962,6 +151437,7 @@ resources: argumentDocs: PROTOBUF: ', JSON.' create: '- Default is 20 minutes.' + create_ignore_already_exists: '- (Optional) If set to true, skip repository creation if a repository with the same name already exists.' delete: '- Default is 20 minutes.' id: '- an identifier for the resource with format projects/{{project}}/repos/{{name}}' name: |- @@ -133035,6 +151511,157 @@ resources: google_sourcerepo_repository_iam_binding can be used per role. Note that custom roles must be of the format [projects|organizations]/{parent-name}/roles/{role-name}. importStatements: [] + google_spanner_backup_schedule: + subCategory: Cloud Spanner + description: A backup schedule for a Cloud Spanner Database. + name: google_spanner_backup_schedule + title: "" + examples: + - name: full-backup + manifest: |- + { + "database": "${google_spanner_database.database.name}", + "full_backup_spec": [ + {} + ], + "instance": "${google_spanner_instance.main.name}", + "name": "backup-schedule-id", + "retention_duration": "31620000s", + "spec": [ + { + "cron_spec": [ + { + "text": "0 12 * * *" + } + ] + } + ] + } + references: + database: google_spanner_database.database.name + instance: google_spanner_instance.main.name + dependencies: + google_spanner_database.database: |- + { + "ddl": [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)" + ], + "deletion_protection": true, + "instance": "${google_spanner_instance.main.name}", + "name": "database-id", + "version_retention_period": "3d" + } + google_spanner_instance.main: |- + { + "config": "regional-europe-west1", + "display_name": "main-instance", + "name": "instance-id", + "num_nodes": 1 + } + - name: incremental-backup + manifest: |- + { + "database": "${google_spanner_database.database.name}", + "incremental_backup_spec": [ + {} + ], + "instance": "${google_spanner_instance.main.name}", + "name": "backup-schedule-id", + "retention_duration": "31620000s", + "spec": [ + { + "cron_spec": [ + { + "text": "0 12 * * *" + } + ] + } + ] + } + references: + database: google_spanner_database.database.name + instance: google_spanner_instance.main.name + dependencies: + google_spanner_database.database: |- + { + "ddl": [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)" + ], + "deletion_protection": true, + "instance": "${google_spanner_instance.main.name}", + "name": "database-id", + "version_retention_period": "3d" + } + google_spanner_instance.main: |- + { + "config": "regional-europe-west1", + "display_name": "main-instance", + "edition": "ENTERPRISE", + "name": "instance-id", + "num_nodes": 1 + } + argumentDocs: + create: '- Default is 20 minutes.' + database: |- + - + (Required) + The database to create the backup schedule on. + delete: '- Default is 20 minutes.' + full_backup_spec: |- + - + (Optional) + The schedule creates only full backups.. + id: '- an identifier for the resource with format projects/{{project}}/instances/{{instance}}/databases/{{database}}/backupSchedules/{{name}}' + incremental_backup_spec: |- + - + (Optional) + The schedule creates incremental backup chains. + instance: |- + - + (Required) + The instance to create the database on. + name: |- + - + (Optional) + A unique identifier for the backup schedule, which cannot be changed after + the backup schedule is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9]. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + retention_duration: |- + - + (Required) + At what relative time in the future, compared to its creation time, the backup should be deleted, e.g. keep backups for 7 days. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: '3.5s'. + You can set this to a value up to 366 days. + spec: |- + - + (Optional) + Defines specifications of the backup schedule. + Structure is documented below. + spec.cron_spec: |- + - + (Optional) + Cron style schedule specification.. + Structure is documented below. + spec.cron_spec.text: |- + - + (Optional) + Textual representation of the crontab. User can customize the + backup frequency and the backup version time using the cron + expression. The version time must be in UTC timzeone. + The backup will contain an externally consistent copy of the + database at the version time. Allowed frequencies are 12 hour, 1 day, + 1 week and 1 month. Examples of valid cron specifications: + 0 2/12 * * * : every 12 hours at (2, 14) hours past midnight in UTC. + 0 2,14 * * * : every 12 hours at (2,14) hours past midnight in UTC. + 0 2 * * * : once a day at 2 past midnight in UTC. + 0 2 * * 0 : once a week every Sunday at 2 past midnight in UTC. + 0 2 8 * * : once a month on 8th day at 2 past midnight in UTC. + update: '- Default is 20 minutes.' + importStatements: [] google_spanner_database: subCategory: Cloud Spanner description: A Cloud Spanner Database which is hosted on a Spanner instance. @@ -133102,9 +151729,14 @@ resources: Structure is documented below. encryption_config.kms_key_name: |- - - (Required) + (Optional) Fully qualified name of the KMS key to use to encrypt this database. This key must exist in the same location as the Spanner Database. + encryption_config.kms_key_names: |- + - + (Optional) + Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist + in the same locations as the Spanner Database. id: '- an identifier for the resource with format {{instance}}/{{name}}' instance: |- - @@ -133113,8 +151745,8 @@ resources: name: |- - (Required) - A unique identifier for the database, which cannot be changed after - the instance is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9]. + A unique identifier for the database, which cannot be changed after the + instance is created. Values are of the form [a-z][-_a-z0-9]*[a-z0-9]. project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -133194,6 +151826,7 @@ resources: "config": "regional-us-central1", "default_backup_schedule_type": "AUTOMATIC", "display_name": "Test Spanner Instance", + "edition": "STANDARD", "labels": { "foo": "bar" }, @@ -133245,6 +151878,16 @@ resources: "num_nodes": 2 } argumentDocs: + asymmetric_autoscaling_options.overrides: |- + - + (Required) + A nested object resource. + Structure is documented below. + asymmetric_autoscaling_options.replica_selection: |- + - + (Required) + A nested object resource. + Structure is documented below. autoscaling_config: |- - (Optional) @@ -133253,6 +151896,11 @@ resources: OUTPUT_ONLY fields and reflect the current compute capacity allocated to the instance. Structure is documented below. + autoscaling_config.asymmetric_autoscaling_options: |- + - + (Optional) + Asymmetric autoscaling options for specific replicas. + Structure is documented below. autoscaling_config.autoscaling_limits: |- - (Optional) @@ -133325,6 +151973,11 @@ resources: (Required) The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. + edition: |- + - + (Optional) + The edition selected for this instance. Different editions provide different capabilities at different price points. + Possible values are: EDITION_UNSPECIFIED, STANDARD, ENTERPRISE, ENTERPRISE_PLUS. effective_labels: for all of the labels present on the resource. force_destroy: |- - (Optional) When deleting a spanner instance, this boolean option will delete all backups of this instance. @@ -133347,6 +152000,19 @@ resources: (Optional) The number of nodes allocated to this instance. Exactly one of either node_count or processing_units must be present in terraform. + overrides.autoscaling_limits: |- + - + (Required) + A nested object resource. + Structure is documented below. + overrides.autoscaling_limits.max_nodes: |- + - + (Required) + The maximum number of nodes for this specific replica. + overrides.autoscaling_limits.min_nodes: |- + - + (Required) + The minimum number of nodes for this specific replica. processing_units: |- - (Optional) @@ -133355,6 +152021,10 @@ resources: project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + replica_selection.location: |- + - + (Required) + The location of the replica to apply asymmetric autoscaling options. state: |- - Instance status: CREATING or READY. @@ -133500,7 +152170,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "MYSQL_8_0", - "deletion_protection": "true", + "deletion_protection": true, "name": "my-database-instance", "region": "us-central1", "settings": [ @@ -133522,7 +152192,7 @@ resources: google_sql_database_instance.instance: |- { "database_version": "POSTGRES_14", - "deletion_protection": "true", + "deletion_protection": true, "name": "my-database-instance", "region": "us-central1", "settings": [ @@ -133764,8 +152434,44 @@ resources: } ] } + - name: main + manifest: |- + { + "database_version": "MYSQL_8_0", + "name": "psc-enabled-main-instance", + "settings": [ + { + "availability_type": "REGIONAL", + "backup_configuration": [ + { + "binary_log_enabled": true, + "enabled": true + } + ], + "ip_configuration": [ + { + "ipv4_enabled": false, + "psc_config": [ + { + "allowed_consumer_projects": [ + "allowed-consumer-project-name" + ], + "psc_auto_connections": [ + { + "consumer_network": "network-name", + "consumer_service_project_id": "project-id" + } + ], + "psc_enabled": true + } + ] + } + ], + "tier": "db-f1-micro" + } + ] + } argumentDocs: - ALLOW_UNENCRYPTED_AND_ENCRYPTED: when require_ssl=false and ENCRYPTED_ONLY otherwise. OUTGOING: address is the source address of connections originating from the instance, if supported. PRIMARY: address is an address that can accept incoming connections. PRIVATE: 'address is an address for an instance which has been configured to use private networking see: Private IP.' @@ -133786,9 +152492,9 @@ resources: database_version: |- - (Required) The MySQL, PostgreSQL or SQL Server version to use. Supported values include MYSQL_5_6, - MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, - POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, - SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. + MYSQL_5_7, MYSQL_8_0, MYSQL_8_4, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, + POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, POSTGRES_16, POSTGRES_17, + SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. SQLSERVER_2019_STANDARD, SQLSERVER_2019_ENTERPRISE, SQLSERVER_2019_EXPRESS, SQLSERVER_2019_WEB. Database Version Policies @@ -133799,7 +152505,7 @@ resources: When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the instance will fail. When the field is set to false, deleting the instance is allowed. - dsn_name: '- The DNS name of the instance. See Connect to an instance using Private Service Connect for more details.' + dns_name: '- The DNS name of the instance. See Connect to an instance using Private Service Connect for more details.' encryption_key_name: |- - (Optional) The full path to the encryption key used for the CMEK disk encryption. Setting @@ -133849,10 +152555,11 @@ resources: the provider region will be used instead. replica_configuration: |- - (Optional) The configuration for replication. The - configuration is detailed below. Valid only for MySQL instances. + configuration is detailed below. replica_configuration.ca_certificate: |- - (Optional) PEM representation of the trusted CA's x509 certificate. + replica_configuration.cascadable_replica: '- (Optional) Specifies if the replica is a cascadable replica. If true, instance must be in different region from primary.' replica_configuration.client_certificate: |- - (Optional) PEM representation of the replica's x509 certificate. @@ -133864,7 +152571,8 @@ resources: between connect retries. MySQL's default is 60 seconds. replica_configuration.dump_file_path: |- - (Optional) Path to a SQL file in GCS from which replica - instances are created. Format is gs://bucket/filename. + instances are created. Format is gs://bucket/filename. Note, if the master + instance is a source representation instance this field must be present. replica_configuration.failover_target: |- - (Optional) Specifies if the replica is the failover target. If the field is set to true the replica will be designated as a failover replica. @@ -133880,6 +152588,7 @@ resources: replica_configuration.verify_server_certificate: |- - (Optional) True if the master's common name value is checked during the SSL handshake. + replica_names: '- (Optional, Computed) List of replica names. Can be updated.' restore_backup_context: |- - (optional) The context needed to restore the database to a backup run. This field will cause Terraform to trigger the database to restore from the backup run indicated. The configuration is detailed below. @@ -133935,7 +152644,7 @@ resources: configuration starts. settings.backup_configuration.transaction_log_retention_days: '- (Optional) The number of days of transaction logs we retain for point in time restore, from 1-7. For PostgreSQL Enterprise Plus instances, the number of days of retained transaction logs can be set from 1 to 35.' settings.collation: '- (Optional) The name of server instance collation.' - settings.connector_enforcement: '- (Optional) Enables the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections. If enabled, all the direct connections are rejected.' + settings.connector_enforcement: '- (Optional) Control the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections, can be REQUIRED or NOT_REQUIRED. If enabled, all the direct connections are rejected.' settings.data_cache_config.data_cache_enabled: '- (Optional) Whether data cache is enabled for the instance. Defaults to false. Can be used with MYSQL and PostgreSQL only.' settings.deletion_protection_enabled: . settings.deny_maintenance_period.end_date: '- (Required) "deny maintenance period" end date. If the year of the end date is empty, the year of the start date also must be empty. In this case, it means the no maintenance interval recurs every year. The date is in format yyyy-m-dd (the month is without leading zeros)i.e., 2020-1-01, or 2020-11-01, or mm-dd, i.e., 11-01' @@ -133955,6 +152664,8 @@ resources: settings.insights_config.record_client_address: '- True if Query Insights will record client address when enabled.' settings.ip_configuration.allocated_ip_range: '- (Optional) The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the instance ip will be created in the allocated range. The range name must comply with RFC 1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z?.' settings.ip_configuration.allowed_consumer_projects: '- (Optional) List of consumer projects that are allow-listed for PSC connections to this instance. This instance can be connected to with PSC from any network in these projects. Each consumer project in this list may be represented by a project number (numeric) or by a project id (alphanumeric).' + settings.ip_configuration.consumer_network: '- "The consumer network of this consumer endpoint. This must be a resource path that includes both the host project and the network name. For example, projects/project1/global/networks/network1. The consumer host project of this network might be different from the consumer service project."' + settings.ip_configuration.consumer_service_project_id: '- (Optional) The project ID of consumer service project of this consumer endpoint.' settings.ip_configuration.enable_private_path_for_google_cloud_services: '- (Optional) Whether Google Cloud services such as BigQuery are allowed to access data in this Cloud SQL instance over a private IP connection. SQLSERVER database type is not supported.' settings.ip_configuration.expiration_time: |- - (Optional) The RFC 3339 @@ -133970,10 +152681,11 @@ resources: Specifying a network enables private IP. At least ipv4_enabled must be enabled or a private_network must be configured. This setting can be updated, but it cannot be removed after it is set. + settings.ip_configuration.psc_config.psc_auto_connections: subblock - (Optional) A comma-separated list of networks or a comma-separated list of network-project pairs. Each project in this list is represented by a project number (numeric) or by a project ID (alphanumeric). This allows Private Service Connect connections to be created automatically for the specified networks. settings.ip_configuration.psc_enabled: '- (Optional) Whether PSC connectivity is enabled for this instance.' - settings.ip_configuration.require_ssl: '- (Optional, Deprecated) Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in ssl_mode. It will be fully deprecated in a future major release. For now, please use ssl_mode with a compatible require_ssl value instead.' - settings.ip_configuration.server_ca_mode: '- (Optional) Specify how the server certificate''s Certificate Authority is hosted. Supported value is GOOGLE_MANAGED_INTERNAL_CA.' - settings.ip_configuration.ssl_mode: '- (Optional) Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcement options compared to require_ssl. To change this field, also set the correspoding value in require_ssl.' + settings.ip_configuration.server_ca_mode: '- (Optional) Specify how the server certificate''s Certificate Authority is hosted. Supported values are GOOGLE_MANAGED_INTERNAL_CA and GOOGLE_MANAGED_CAS_CA.' + settings.ip_configuration.server_ca_pool: '- (Optional) The resource name of the server CA pool for an instance with CUSTOMER_MANAGED_CAS_CA as the server_ca_mode.' + settings.ip_configuration.ssl_mode: '- (Optional) Specify how SSL connection should be enforced in DB connections. Supported values are ALLOW_UNENCRYPTED_AND_ENCRYPTED, ENCRYPTED_ONLY, and TRUSTED_CLIENT_CERTIFICATE_REQUIRED (not supported for SQL Server). See API reference doc for details.' settings.ip_configuration.value: |- - (Required) A CIDR notation IPv4 or IPv6 address that is allowed to access this instance. Must be set even if other two attributes are not for @@ -134007,7 +152719,7 @@ resources: settings.version: |- - Used to make sure changes to the settings block are atomic. - ssl_mode: field. + terraform plan: outputs "0 to add, 0 to destroy" update: '- Default is 90 minutes.' importStatements: [] google_sql_source_representation_instance: @@ -134198,7 +152910,7 @@ resources: { "database_flags": [ { - "name": "cloudsql_iam_authentication", + "name": "cloudsql.iam_authentication", "value": "on" } ], @@ -134228,7 +152940,7 @@ resources: { "database_flags": [ { - "name": "cloudsql_iam_authentication", + "name": "cloudsql.iam_authentication", "value": "on" } ], @@ -134305,9 +153017,10 @@ resources: type: |- - (Optional) The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type. Flags - include "BUILT_IN", "CLOUD_IAM_USER", and "CLOUD_IAM_SERVICE_ACCOUNT" for both - Postgres and MySQL. - MySQL also includes "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT". + include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", "CLOUD_IAM_GROUP", + "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" for + Postgres + and MySQL. update: '- Default is 10 minutes.' importStatements: [] google_storage_bucket: @@ -134414,13 +153127,23 @@ resources: "name": "no-public-access-bucket", "public_access_prevention": "enforced" } + - name: auto-expire + manifest: |- + { + "force_destroy": true, + "hierarchical_namespace": { + "enabled": true + }, + "location": "US", + "name": "hns-enabled-bucket" + } argumentDocs: action.storage_class: '- (Required if action type is SetStorageClass) The target Storage Class of objects affected by this Lifecycle Rule. Supported values include: STANDARD, MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE.' action.type: '- The type of the action of this Lifecycle Rule. Supported values include: Delete, SetStorageClass and AbortIncompleteMultipartUpload.' autoclass: '- (Optional) The bucket''s Autoclass configuration. Structure is documented below.' autoclass.enabled: '- (Required) While set to true, autoclass automatically transitions objects in your bucket to appropriate storage classes based on each object''s access pattern.' autoclass.terminal_storage_class: '- (Optional) The storage class that objects in the bucket eventually transition to if they are not read for a certain length of time. Supported values include: NEARLINE, ARCHIVE.' - condition.age: '- (Optional) Minimum age of an object in days to satisfy this condition. If not supplied alongside another condition and without setting no_age to true, a default age of 0 will be set.' + condition.age: '- (Optional) Minimum age of an object in days to satisfy this condition. Note To set 0 value of age, send_age_if_zero should be set true otherwise 0 value of age field will be ignored.' condition.created_before: '- (Optional) A date in the RFC 3339 format YYYY-MM-DD. This condition is satisfied when an object is created before midnight of the specified date in UTC.' condition.custom_time_before: '- (Optional) A date in the RFC 3339 format YYYY-MM-DD. This condition is satisfied when the customTime metadata for the object is set to an earlier date than the date used in this lifecycle condition.' condition.days_since_custom_time: "- (Optional)\tDays since the date set in the customTime metadata for the object. This condition is satisfied when the current date and time is at least the specified number of days after the customTime. Due to a current bug you are unable to set this value to 0 within Terraform. When set to 0 it will be ignored, and your state will treat it as though you supplied no days_since_custom_time condition." @@ -134428,10 +153151,9 @@ resources: condition.matches_prefix: '- (Optional) One or more matching name prefixes to satisfy this condition.' condition.matches_storage_class: '- (Optional) Storage Class of objects to satisfy this condition. Supported values include: STANDARD, MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, DURABLE_REDUCED_AVAILABILITY.' condition.matches_suffix: '- (Optional) One or more matching name suffixes to satisfy this condition.' - condition.no_age: '- (Optional, Deprecated) While set true, age value will be omitted from requests. This prevents a default age of 0 from being applied, and if you do not have an age value set, setting this to true is strongly recommended. When unset and other conditions are set to zero values, this can result in a rule that applies your action to all files in the bucket. no_age is deprecated and will be removed in a future major release. Use send_age_if_zero instead.' condition.noncurrent_time_before: '- (Optional) Relevant only for versioned objects. The date in RFC 3339 (e.g. 2017-06-13) when the object became nonconcurrent. Due to a current bug you are unable to set this value to 0 within Terraform. When set to 0 it will be ignored, and your state will treat it as though you supplied no noncurrent_time_before condition.' condition.num_newer_versions: '- (Optional) Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition. Due to a current bug you are unable to set this value to 0 within Terraform. When set to 0 it will be ignored and your state will treat it as though you supplied no num_newer_versions condition.' - condition.send_age_if_zero: '- (Optional, Default: true) While set true, age value will be sent in the request even for zero value of the field. This field is only useful and required for setting 0 value to the age field. It can be used alone or together with age attribute. NOTE age attibute with 0 value will be ommitted from the API request if send_age_if_zero field is having false value.' + condition.send_age_if_zero: '- (Optional) While set true, age value will be sent in the request even for zero value of the field. This field is only useful and required for setting 0 value to the age field. It can be used alone or together with age attribute. NOTE age attibute with 0 value will be ommitted from the API request if send_age_if_zero field is having false value.' condition.send_days_since_custom_time_if_zero: '- (Optional) While set true, days_since_custom_time value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the days_since_custom_time field. It can be used alone or together with days_since_custom_time.' condition.send_days_since_noncurrent_time_if_zero: '- (Optional) While set true, days_since_noncurrent_time value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the days_since_noncurrent_time field. It can be used alone or together with days_since_noncurrent_time.' condition.send_num_newer_versions_if_zero: '- (Optional) While set true, num_newer_versions value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the num_newer_versions field. It can be used alone or together with num_newer_versions.' @@ -134455,6 +153177,8 @@ resources: - (Optional, Default: false) When deleting a bucket, this boolean option will delete all contained objects. If you try to delete a bucket that contains objects, Terraform will fail that run. + hierarchical_namespace: '- (Optional, ForceNew) The bucket''s hierarchical namespace policy, which defines the bucket capability to handle folders in logical structure. Structure is documented below. To use this configuration, uniform_bucket_level_access must be enabled on bucket.' + hierarchical_namespace.enabled: '- (Required) Enables hierarchical namespace for the bucket.' labels: '- (Optional) A map of key/value label pairs to assign to the bucket.' lifecycle_rule: '- (Optional) The bucket''s Lifecycle Rules configuration. Multiple blocks of this type are permitted. Structure is documented below.' lifecycle_rule.action: '- (Required) The Lifecycle Rule''s action configuration. A single block of this type is supported. Structure is documented below.' @@ -134792,6 +153516,78 @@ resources: See GCS Object ACL documentation for more details. Omitting the field is the same as providing an empty list. importStatements: [] + google_storage_folder: + subCategory: Cloud Storage + description: A Google Cloud Storage Folder. + name: google_storage_folder + title: "" + examples: + - name: folder + manifest: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "name": "parent-folder/" + } + references: + bucket: google_storage_bucket.bucket.name + dependencies: + google_storage_bucket.bucket: |- + { + "hierarchical_namespace": [ + { + "enabled": true + } + ], + "location": "EU", + "name": "my-bucket", + "uniform_bucket_level_access": true + } + - name: subfolder + manifest: |- + { + "bucket": "${google_storage_bucket.bucket.name}", + "name": "${google_storage_folder.folder.name}subfolder/" + } + references: + bucket: google_storage_bucket.bucket.name + dependencies: + google_storage_bucket.bucket: |- + { + "hierarchical_namespace": [ + { + "enabled": true + } + ], + "location": "EU", + "name": "my-bucket", + "uniform_bucket_level_access": true + } + argumentDocs: + bucket: |- + - + (Required) + The name of the bucket that contains the folder. + create: '- Default is 20 minutes.' + create_time: |- + - + The timestamp at which this folder was created. + delete: '- Default is 20 minutes.' + force_destroy: '- (Optional) If set to true, items within folder if any will be force destroyed.' + id: '- an identifier for the resource with format {{bucket}}/{{name}}' + metageneration: |- + - + The metadata generation of the folder. + name: |- + - + (Required) + The name of the folder expressed as a path. Must include + trailing '/'. For example, example_dir/example_dir2/, example@#/, a-b/d-f/. + self_link: '- The URI of the created resource.' + update: '- Default is 20 minutes.' + update_time: |- + - + The timestamp at which this folder was most recently updated. + importStatements: [] google_storage_hmac_key: subCategory: Cloud Storage description: The hmacKeys resource represents an HMAC key within Cloud Storage. @@ -135005,7 +153801,7 @@ resources: object_metadata_report_options.storage_filters: |- - (Optional) - A nested object resource + A nested object resource. Structure is documented below. start_date.day: |- - @@ -135514,6 +154310,7 @@ resources: gcs_data_sink.path: '- (Optional) Root path to transfer objects. Must be an empty string or full path name that ends with a ''/''. This field is treated as an object prefix. As such, it should generally not begin with a ''/''.' gcs_data_source.bucket_name: '- (Required) Google Cloud Storage bucket name.' gcs_data_source.path: '- (Optional) Root path to transfer objects. Must be an empty string or full path name that ends with a ''/''. This field is treated as an object prefix. As such, it should generally not begin with a ''/''.' + hdfs_data_source.path: '- (Required) Root directory path to the filesystem.' http_data_source.list_url: '- (Required) The URL that points to the file that stores the object list entries. This file must allow public access. Currently, only URLs with HTTP and HTTPS schemes are supported.' last_modification_time: '- When the Transfer Job was last modified.' name: '- (Optional) The name of the Transfer Job. This name must start with "transferJobs/" prefix and end with a letter or a number, and should be no more than 128 characters ( transferJobs/^(?!OPI)[A-Za-z0-9-._~]*[A-Za-z0-9]$ ). For transfers involving PosixFilesystem, this name must start with transferJobs/OPI specifically ( transferJobs/OPI^[A-Za-z0-9-._~]*[A-Za-z0-9]$ ). For all other transfer types, this name must not start with transferJobs/OPI. Default the provider will assign a random unique name with transferJobs/{{name}} format, where name is a numeric value.' @@ -135534,6 +154331,11 @@ resources: project: |- - (Optional) The project in which the resource belongs. If it is not provided, the provider project is used. + replication_spec: '- (Optional) Replication specification. Structure documented below. User should not configure schedule, event_stream with this argument. One of transfer_spec, or replication_spec must be specified.' + replication_spec.gcs_data_sink: '- (Optional) A Google Cloud Storage data sink. Structure documented below.' + replication_spec.gcs_data_source: '- (Optional) A Google Cloud Storage data source. Structure documented below.' + replication_spec.object_conditions: '- (Optional) Only objects that satisfy these object conditions are included in the set of data source and data sink objects. Object conditions based on objects'' last_modification_time do not exclude objects in a data sink. Structure documented below.' + replication_spec.transfer_options: '- (Optional) Characteristics of how to treat files from datasource and sink during job. If the option delete_objects_unique_in_sink is true, object conditions based on objects'' last_modification_time are ignored and do not exclude objects in a data source or a data sink. Structure documented below.' schedule: '- (Optional) Schedule specification defining when the Transfer Job should be scheduled to start, end and what time to run. Structure documented below. Either schedule or event_stream must be set.' schedule.repeat_interval: '- (Optional) Interval between the start of each scheduled transfer. If unspecified, the default value is 24 hours. This value may not be less than 1 hour. A duration in seconds with up to nine fractional digits, terminated by ''s''. Example: "3.5s".' schedule.schedule_end_date: '- (Optional) The last day the recurring transfer will be run. If schedule_end_date is the same as schedule_start_date, the transfer will be executed only once. Structure documented below.' @@ -135553,11 +154355,12 @@ resources: delete_objects_from_source_after_transfer are mutually exclusive. transfer_options.overwrite_objects_already_existing_in_sink: '- (Optional) Whether overwriting objects that already exist in the sink is allowed.' transfer_options.overwrite_when: '- (Optional) When to overwrite objects that already exist in the sink. If not set, overwrite behavior is determined by overwrite_objects_already_existing_in_sink. Possible values: ALWAYS, DIFFERENT, NEVER.' - transfer_spec: '- (Required) Transfer specification. Structure documented below.' + transfer_spec: '- (Optional) Transfer specification. Structure documented below. One of transfer_spec, or replication_spec can be specified.' transfer_spec.aws_s3_data_source: '- (Optional) An AWS S3 data source. Structure documented below.' transfer_spec.azure_blob_storage_data_source: '- (Optional) An Azure Blob Storage data source. Structure documented below.' transfer_spec.gcs_data_sink: '- (Optional) A Google Cloud Storage data sink. Structure documented below.' transfer_spec.gcs_data_source: '- (Optional) A Google Cloud Storage data source. Structure documented below.' + transfer_spec.hdfs_data_source: '- (Optional) An HDFS data source. Structure documented below.' transfer_spec.http_data_source: '- (Optional) A HTTP URL data source. Structure documented below.' transfer_spec.object_conditions: '- (Optional) Only objects that satisfy these object conditions are included in the set of data source and data sink objects. Object conditions based on objects'' last_modification_time do not exclude objects in a data sink. Structure documented below.' transfer_spec.posix_data_sink: '- (Optional) A POSIX data sink. Structure documented below.' @@ -135577,8 +154380,10 @@ resources: { "location": "us-central1", "parent": "//run.googleapis.com/projects/${data.google_project.project.number}/locations/${google_cloud_run_service.default.location}/services/${google_cloud_run_service.default.name}", - "tag_value": "tagValues/${google_tags_tag_value.value.name}" + "tag_value": "${google_tags_tag_value.value.id}" } + references: + tag_value: google_tags_tag_value.value.id dependencies: google_project.project: |- { @@ -135595,7 +154400,7 @@ resources: google_tags_tag_value.value: |- { "description": "For valuename resources.", - "parent": "tagKeys/${google_tags_tag_key.key.name}", + "parent": "${google_tags_tag_key.key.id}", "short_name": "valuename" } - name: binding @@ -135603,8 +154408,10 @@ resources: { "location": "us-central1-a", "parent": "//compute.googleapis.com/projects/${google_project.project.number}/zones/us-central1-a/instances/${google_compute_instance.instance.instance_id}", - "tag_value": "tagValues/${google_tags_tag_value.value.name}" + "tag_value": "${google_tags_tag_value.value.id}" } + references: + tag_value: google_tags_tag_value.value.id dependencies: google_project.project: |- { @@ -135621,7 +154428,7 @@ resources: google_tags_tag_value.value: |- { "description": "For valuename resources.", - "parent": "tagKeys/${google_tags_tag_key.key.name}", + "parent": "${google_tags_tag_key.key.id}", "short_name": "valuename" } argumentDocs: @@ -135654,11 +154461,14 @@ resources: manifest: |- { "parent": "//cloudresourcemanager.googleapis.com/projects/${google_project.project.number}", - "tag_value": "tagValues/${google_tags_tag_value.value.name}" + "tag_value": "${google_tags_tag_value.value.id}" } + references: + tag_value: google_tags_tag_value.value.id dependencies: google_project.project: |- { + "deletion_policy": "DELETE", "name": "project_id", "org_id": "123456789", "project_id": "project_id" @@ -135672,7 +154482,7 @@ resources: google_tags_tag_value.value: |- { "description": "For valuename resources.", - "parent": "tagKeys/${google_tags_tag_key.key.name}", + "parent": "${google_tags_tag_key.key.id}", "short_name": "valuename" } argumentDocs: @@ -135741,7 +154551,7 @@ resources: - (Required) Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace. - The short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). update: '- Default is 20 minutes.' update_time: |- - @@ -135790,9 +154600,11 @@ resources: manifest: |- { "description": "For valuename resources.", - "parent": "tagKeys/${google_tags_tag_key.key.name}", + "parent": "${google_tags_tag_key.key.id}", "short_name": "valuename" } + references: + parent: google_tags_tag_key.key.id dependencies: google_tags_tag_key.key: |- { @@ -135826,7 +154638,7 @@ resources: - (Required) Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. - The short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). update: '- Default is 20 minutes.' update_time: |- - @@ -136021,6 +154833,90 @@ resources: (Optional) The GCP location for the TPU. If it is not provided, the provider zone is used. importStatements: [] + google_tpu_v2_queued_resource: + subCategory: Cloud TPU v2 + description: A Cloud TPU Queued Resource. + name: google_tpu_v2_queued_resource + title: "" + examples: + - name: qr + manifest: |- + { + "name": "test-qr", + "project": "my-project-name", + "provider": "${google-beta}", + "tpu": [ + { + "node_spec": [ + { + "node": [ + { + "accelerator_type": "v2-8", + "description": "Text description of the TPU.", + "runtime_version": "tpu-vm-tf-2.13.0" + } + ], + "node_id": "test-tpu", + "parent": "projects/my-project-name/locations/us-central1-c" + } + ] + } + ], + "zone": "us-central1-c" + } + references: + provider: google-beta + argumentDocs: + create: '- Default is 20 minutes.' + delete: '- Default is 20 minutes.' + id: '- an identifier for the resource with format projects/{{project}}/locations/{{zone}}/queuedResources/{{name}}' + name: |- + - + (Required) + The immutable name of the Queued Resource. + node.accelerator_type: |- + - + (Optional) + TPU accelerator type for the TPU. If not specified, this defaults to 'v2-8'. + node.description: |- + - + (Optional) + Text description of the TPU. + node.runtime_version: |- + - + (Required) + Runtime version for the TPU. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + tpu: |- + - + (Optional) + Defines a TPU resource. + Structure is documented below. + tpu.node_spec: |- + - + (Optional) + The TPU node(s) being requested. + Structure is documented below. + tpu.node_spec.node: |- + - + (Required) + The node. + Structure is documented below. + tpu.node_spec.node_id: |- + - + (Optional) + Unqualified node identifier used to identify the node in the project once provisioned. + tpu.node_spec.parent: |- + - + (Required) + The parent resource name. + zone: |- + - + (Optional) + The GCP location for the Queued Resource. If it is not provided, the provider zone is used. + importStatements: [] google_tpu_v2_vm: subCategory: Cloud TPU v2 description: A Cloud TPU VM instance. @@ -136069,6 +154965,7 @@ resources: "can_ip_forward": true, "enable_external_ips": true, "network": "${google_compute_network.network.id}", + "queue_count": 32, "subnetwork": "${google_compute_subnetwork.subnet.id}" } ], @@ -136154,8 +155051,7 @@ resources: accelerator_config.type: |- - (Required) - Type of TPU. - Possible values are: V2, V3, V4, V5P. + Type of TPU. Please select one of the allowed types: https://cloud.google.com/tpu/docs/reference/rest/v2/AcceleratorConfig#Type accelerator_type: |- - (Optional) @@ -136246,16 +155142,46 @@ resources: - (Optional) The name of the network for the TPU node. It must be a preexisting Google Compute Engine - network. If both network and subnetwork are specified, the given subnetwork must belong - to the given network. If network is not specified, it will be looked up from the - subnetwork if one is provided, or otherwise use "default". + network. If none is provided, "default" will be used. + network_config.queue_count: |- + - + (Optional) + Specifies networking queue count for TPU VM instance's network interface. network_config.subnetwork: |- - (Optional) The name of the subnetwork for the TPU node. It must be a preexisting Google Compute - Engine subnetwork. If both network and subnetwork are specified, the given subnetwork - must belong to the given network. If subnetwork is not specified, the subnetwork with the - same name as the network will be used. + Engine subnetwork. If none is provided, "default" will be used. + network_configs: |- + - + (Optional) + Repeated network configurations for the TPU node. This field is used to specify multiple + network configs for the TPU node. + Structure is documented below. + network_configs.can_ip_forward: |- + - + (Optional) + Allows the TPU node to send and receive packets with non-matching destination or source + IPs. This is required if you plan to use the TPU workers to forward routes. + network_configs.enable_external_ips: |- + - + (Optional) + Indicates that external IP addresses would be associated with the TPU workers. If set to + false, the specified subnetwork or network should have Private Google Access enabled. + network_configs.network: |- + - + (Optional) + The name of the network for the TPU node. It must be a preexisting Google Compute Engine + network. If none is provided, "default" will be used. + network_configs.queue_count: |- + - + (Optional) + Specifies networking queue count for TPU VM instance's network interface. + network_configs.subnetwork: |- + - + (Optional) + The name of the subnetwork for the TPU node. It must be a preexisting Google Compute + Engine subnetwork. If none is provided, "default" will be used. network_endpoints: |- - The network endpoints where TPU workers can be accessed and sent work. It is recommended that @@ -136358,6 +155284,2278 @@ resources: (Optional) The GCP location for the TPU. If it is not provided, the provider zone is used. importStatements: [] + google_transcoder_job: + subCategory: Transcoder + description: Transcoding Job Resource + name: google_transcoder_job + title: "" + examples: + - name: default + manifest: |- + { + "labels": { + "label": "key" + }, + "location": "us-central1", + "template_id": "${google_transcoder_job_template.default.name}" + } + references: + template_id: google_transcoder_job_template.default.name + dependencies: + google_storage_bucket.default: |- + { + "force_destroy": true, + "location": "US", + "name": "transcoder-job", + "public_access_prevention": "enforced", + "uniform_bucket_level_access": true + } + google_storage_bucket_object.example_mp4: |- + { + "bucket": "${google_storage_bucket.default.name}", + "name": "example.mp4", + "source": "./test-fixtures/example.mp4" + } + google_transcoder_job_template.default: |- + { + "config": [ + { + "edit_list": [ + { + "inputs": [ + "input0" + ], + "key": "atom0", + "start_time_offset": "0s" + } + ], + "elementary_streams": [ + { + "key": "video-stream0", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 360, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 495000, + "vbv_size_bits": 550000, + "width_pixels": 640 + } + ] + } + ] + }, + { + "key": "video-stream1", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 720, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 2250000, + "vbv_size_bits": 2500000, + "width_pixels": 1280 + } + ] + } + ] + }, + { + "audio_stream": [ + { + "bitrate_bps": 64000, + "channel_count": 2, + "channel_layout": [ + "fl", + "fr" + ], + "codec": "aac", + "sample_rate_hertz": 48000 + } + ], + "key": "audio-stream0" + } + ], + "inputs": [ + { + "key": "input0", + "uri": "gs://${google_storage_bucket.default.name}/${google_storage_bucket_object.example_mp4.name}" + } + ], + "mux_streams": [ + { + "container": "mp4", + "elementary_streams": [ + "video-stream0", + "audio-stream0" + ], + "file_name": "sd.mp4", + "key": "sd" + }, + { + "container": "mp4", + "elementary_streams": [ + "video-stream1", + "audio-stream0" + ], + "file_name": "hd.mp4", + "key": "hd" + } + ], + "output": [ + { + "uri": "gs://${google_storage_bucket.default.name}/outputs/" + } + ] + } + ], + "job_template_id": "example-job-template", + "labels": { + "label": "key" + }, + "location": "us-central1" + } + - name: default + manifest: |- + { + "config": [ + { + "ad_breaks": [ + { + "start_time_offset": "3.500s" + } + ], + "edit_list": [ + { + "inputs": [ + "input0" + ], + "key": "atom0", + "start_time_offset": "0s" + } + ], + "elementary_streams": [ + { + "key": "video-stream0", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 360, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 495000, + "vbv_size_bits": 550000, + "width_pixels": 640 + } + ] + } + ] + }, + { + "key": "video-stream1", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 720, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 2250000, + "vbv_size_bits": 2500000, + "width_pixels": 1280 + } + ] + } + ] + }, + { + "audio_stream": [ + { + "bitrate_bps": 64000, + "channel_count": 2, + "channel_layout": [ + "fl", + "fr" + ], + "codec": "aac", + "sample_rate_hertz": 48000 + } + ], + "key": "audio-stream0" + } + ], + "inputs": [ + { + "key": "input0", + "uri": "gs://${google_storage_bucket.default.name}/${google_storage_bucket_object.example_mp4.name}" + } + ], + "mux_streams": [ + { + "container": "mp4", + "elementary_streams": [ + "video-stream0", + "audio-stream0" + ], + "file_name": "sd.mp4", + "key": "sd" + }, + { + "container": "mp4", + "elementary_streams": [ + "video-stream1", + "audio-stream0" + ], + "file_name": "hd.mp4", + "key": "hd" + } + ], + "output": [ + { + "uri": "gs://${google_storage_bucket.default.name}/outputs/" + } + ], + "pubsub_destination": [ + { + "topic": "${google_pubsub_topic.transcoder_notifications.id}" + } + ] + } + ], + "labels": { + "label": "key" + }, + "location": "us-central1" + } + references: + config.pubsub_destination.topic: google_pubsub_topic.transcoder_notifications.id + dependencies: + google_pubsub_topic.transcoder_notifications: |- + { + "name": "transcoder-notifications" + } + google_storage_bucket.default: |- + { + "force_destroy": true, + "location": "US", + "name": "transcoder-job", + "public_access_prevention": "enforced", + "uniform_bucket_level_access": true + } + google_storage_bucket_object.example_mp4: |- + { + "bucket": "${google_storage_bucket.default.name}", + "name": "example.mp4", + "source": "./test-fixtures/example.mp4" + } + - name: default + manifest: |- + { + "config": [ + { + "elementary_streams": [ + { + "key": "es_video", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 1000000, + "frame_rate": 60, + "height_pixels": 600, + "profile": "main", + "width_pixels": 800 + } + ] + } + ] + }, + { + "audio_stream": [ + { + "bitrate_bps": 160000, + "channel_count": 2, + "codec": "aac" + } + ], + "key": "es_audio" + } + ], + "encryptions": [ + { + "aes128": [ + {} + ], + "drm_systems": [ + { + "clearkey": [ + {} + ] + } + ], + "id": "aes-128", + "secret_manager_key_source": [ + { + "secret_version": "${google_secret_manager_secret_version.encryption_key.name}" + } + ] + }, + { + "drm_systems": [ + { + "widevine": [ + {} + ] + } + ], + "id": "cenc", + "mpeg_cenc": [ + { + "scheme": "cenc" + } + ], + "secret_manager_key_source": [ + { + "secret_version": "${google_secret_manager_secret_version.encryption_key.name}" + } + ] + }, + { + "drm_systems": [ + { + "widevine": [ + {} + ] + } + ], + "id": "cbcs", + "mpeg_cenc": [ + { + "scheme": "cbcs" + } + ], + "secret_manager_key_source": [ + { + "secret_version": "${google_secret_manager_secret_version.encryption_key.name}" + } + ] + } + ], + "inputs": [ + { + "key": "input0", + "uri": "gs://${google_storage_bucket.default.name}/${google_storage_bucket_object.example_mp4.name}" + } + ], + "manifests": [ + { + "file_name": "manifest_aes128.m3u8", + "mux_streams": [ + "ts_aes128" + ], + "type": "HLS" + }, + { + "file_name": "manifest_cenc.mpd", + "mux_streams": [ + "fmp4_cenc_video", + "fmp4_cenc_audio" + ], + "type": "DASH" + }, + { + "file_name": "manifest_cbcs.mpd", + "mux_streams": [ + "fmp4_cbcs_video", + "fmp4_cbcs_audio" + ], + "type": "DASH" + } + ], + "mux_streams": [ + { + "container": "ts", + "elementary_streams": [ + "es_video", + "es_audio" + ], + "encryption_id": "aes-128", + "key": "ts_aes128", + "segment_settings": [ + { + "segment_duration": "6s" + } + ] + }, + { + "container": "fmp4", + "elementary_streams": [ + "es_video" + ], + "encryption_id": "cenc", + "key": "fmp4_cenc_video", + "segment_settings": [ + { + "segment_duration": "6s" + } + ] + }, + { + "container": "fmp4", + "elementary_streams": [ + "es_audio" + ], + "encryption_id": "cenc", + "key": "fmp4_cenc_audio", + "segment_settings": [ + { + "segment_duration": "6s" + } + ] + }, + { + "container": "fmp4", + "elementary_streams": [ + "es_video" + ], + "encryption_id": "cbcs", + "key": "fmp4_cbcs_video", + "segment_settings": [ + { + "segment_duration": "6s" + } + ] + }, + { + "container": "fmp4", + "elementary_streams": [ + "es_audio" + ], + "encryption_id": "cbcs", + "key": "fmp4_cbcs_audio", + "segment_settings": [ + { + "segment_duration": "6s" + } + ] + } + ], + "output": [ + { + "uri": "gs://${google_storage_bucket.default.name}/outputs/" + } + ] + } + ], + "labels": { + "label": "key" + }, + "location": "us-central1", + "provider": "${google-beta}" + } + references: + config.encryptions.secret_manager_key_source.secret_version: google_secret_manager_secret_version.encryption_key.name + provider: google-beta + dependencies: + google_project_service_identity.transcoder: |- + { + "project": "${data.google_project.project.project_id}", + "provider": "${google-beta}", + "service": "transcoder.googleapis.com" + } + google_secret_manager_secret.encryption_key: |- + { + "provider": "${google-beta}", + "replication": [ + { + "auto": [ + {} + ] + } + ], + "secret_id": "transcoder-encryption-key" + } + google_secret_manager_secret_iam_member.transcoder_encryption_key_accessor: |- + { + "member": "serviceAccount:${google_project_service_identity.transcoder.email}", + "project": "${google_secret_manager_secret.encryption_key.project}", + "provider": "${google-beta}", + "role": "roles/secretmanager.secretAccessor", + "secret_id": "${google_secret_manager_secret.encryption_key.secret_id}" + } + google_secret_manager_secret_version.encryption_key: |- + { + "provider": "${google-beta}", + "secret": "${google_secret_manager_secret.encryption_key.name}", + "secret_data": "4A67F2C1B8E93A4F6D3E7890A1BC23DF" + } + google_storage_bucket.default: |- + { + "force_destroy": true, + "location": "US", + "name": "transcoder-job", + "provider": "${google-beta}", + "public_access_prevention": "enforced", + "uniform_bucket_level_access": true + } + google_storage_bucket_object.example_mp4: |- + { + "bucket": "${google_storage_bucket.default.name}", + "name": "example.mp4", + "provider": "${google-beta}", + "source": "./test-fixtures/example.mp4" + } + - name: default + manifest: |- + { + "config": [ + { + "ad_breaks": [ + { + "start_time_offset": "3.500s" + } + ], + "edit_list": [ + { + "inputs": [ + "input0" + ], + "key": "atom0", + "start_time_offset": "0s" + } + ], + "elementary_streams": [ + { + "key": "video-stream0", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 360, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 495000, + "vbv_size_bits": 550000, + "width_pixels": 640 + } + ] + } + ] + }, + { + "key": "video-stream1", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 720, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 2250000, + "vbv_size_bits": 2500000, + "width_pixels": 1280 + } + ] + } + ] + }, + { + "audio_stream": [ + { + "bitrate_bps": 64000, + "channel_count": 2, + "channel_layout": [ + "fl", + "fr" + ], + "codec": "aac", + "sample_rate_hertz": 48000 + } + ], + "key": "audio-stream0" + } + ], + "inputs": [ + { + "key": "input0", + "uri": "gs://${google_storage_bucket.default.name}/${google_storage_bucket_object.example_mp4.name}" + } + ], + "mux_streams": [ + { + "container": "mp4", + "elementary_streams": [ + "video-stream0", + "audio-stream0" + ], + "file_name": "sd.mp4", + "key": "sd" + }, + { + "container": "mp4", + "elementary_streams": [ + "video-stream1", + "audio-stream0" + ], + "file_name": "hd.mp4", + "key": "hd" + } + ], + "output": [ + { + "uri": "gs://${google_storage_bucket.default.name}/outputs/" + } + ], + "overlays": [ + { + "animations": [ + { + "animation_fade": [ + { + "end_time_offset": "3.500s", + "fade_type": "FADE_IN", + "start_time_offset": "1.500s", + "xy": [ + { + "x": 1, + "y": 0.5 + } + ] + } + ] + } + ], + "image": [ + { + "uri": "gs://${google_storage_bucket.default.name}/${google_storage_bucket_object.overlay_png.name}" + } + ] + } + ] + } + ], + "labels": { + "label": "key" + }, + "location": "us-central1" + } + dependencies: + google_storage_bucket.default: |- + { + "force_destroy": true, + "location": "US", + "name": "transcoder-job", + "public_access_prevention": "enforced", + "uniform_bucket_level_access": true + } + google_storage_bucket_object.example_mp4: |- + { + "bucket": "${google_storage_bucket.default.name}", + "name": "example.mp4", + "source": "./test-fixtures/example.mp4" + } + google_storage_bucket_object.overlay_png: |- + { + "bucket": "${google_storage_bucket.default.name}", + "name": "overlay.png", + "source": "./test-fixtures/overlay.png" + } + - name: default + manifest: |- + { + "config": [ + { + "ad_breaks": [ + { + "start_time_offset": "3.500s" + } + ], + "edit_list": [ + { + "inputs": [ + "input0" + ], + "key": "atom0", + "start_time_offset": "0s" + } + ], + "elementary_streams": [ + { + "key": "video-stream0", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 360, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 495000, + "vbv_size_bits": 550000, + "width_pixels": 640 + } + ] + } + ] + }, + { + "key": "video-stream1", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 720, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 2250000, + "vbv_size_bits": 2500000, + "width_pixels": 1280 + } + ] + } + ] + }, + { + "audio_stream": [ + { + "bitrate_bps": 64000, + "channel_count": 2, + "channel_layout": [ + "fl", + "fr" + ], + "codec": "aac", + "sample_rate_hertz": 48000 + } + ], + "key": "audio-stream0" + } + ], + "inputs": [ + { + "key": "input0", + "uri": "gs://${google_storage_bucket.default.name}/${google_storage_bucket_object.example_mp4.name}" + } + ], + "manifests": [ + { + "file_name": "manifest.m3u8", + "mux_streams": [ + "media-sd", + "media-hd" + ], + "type": "HLS" + }, + { + "file_name": "manifest.mpd", + "mux_streams": [ + "video-only-sd", + "video-only-hd", + "audio-only" + ], + "type": "DASH" + } + ], + "mux_streams": [ + { + "container": "mp4", + "elementary_streams": [ + "video-stream0", + "audio-stream0" + ], + "file_name": "sd.mp4", + "key": "sd" + }, + { + "container": "mp4", + "elementary_streams": [ + "video-stream1", + "audio-stream0" + ], + "file_name": "hd.mp4", + "key": "hd" + }, + { + "container": "ts", + "elementary_streams": [ + "video-stream0", + "audio-stream0" + ], + "file_name": "media-sd.ts", + "key": "media-sd" + }, + { + "container": "ts", + "elementary_streams": [ + "video-stream1", + "audio-stream0" + ], + "file_name": "media-hd.ts", + "key": "media-hd" + }, + { + "container": "fmp4", + "elementary_streams": [ + "video-stream0" + ], + "file_name": "video-only-sd.m4s", + "key": "video-only-sd" + }, + { + "container": "fmp4", + "elementary_streams": [ + "video-stream1" + ], + "file_name": "video-only-hd.m4s", + "key": "video-only-hd" + }, + { + "container": "fmp4", + "elementary_streams": [ + "audio-stream0" + ], + "file_name": "audio-only.m4s", + "key": "audio-only" + } + ], + "output": [ + { + "uri": "gs://${google_storage_bucket.default.name}/outputs/" + } + ] + } + ], + "labels": { + "label": "key" + }, + "location": "us-central1" + } + dependencies: + google_storage_bucket.default: |- + { + "force_destroy": true, + "location": "US", + "name": "transcoder-job", + "public_access_prevention": "enforced", + "uniform_bucket_level_access": true + } + google_storage_bucket_object.example_mp4: |- + { + "bucket": "${google_storage_bucket.default.name}", + "name": "example.mp4", + "source": "./test-fixtures/example.mp4" + } + argumentDocs: + FADE_IN: ': Fade the overlay object into view.' + FADE_OUT: |- + : Fade the overlay object out of view. + Possible values are: FADE_TYPE_UNSPECIFIED, FADE_IN, FADE_OUT. + FADE_TYPE_UNSPECIFIED: ': The fade type is not specified.' + ad_breaks.start_time_offset: |- + - + (Optional) + Start time in seconds for the ad break, relative to the output file timeline + animations.animation_fade: |- + - + (Optional) + Display overlay object with fade animation. + Structure is documented below. + animations.animation_fade.end_time_offset: |- + - + (Optional) + The time to end the fade animation, in seconds. + animations.animation_fade.fade_type: |- + - + (Required) + Required. Type of fade animation: FADE_IN or FADE_OUT. + The possible values are: + animations.animation_fade.start_time_offset: |- + - + (Optional) + The time to start the fade animation, in seconds. + animations.animation_fade.xy: |- + - + (Optional) + Normalized coordinates based on output video resolution. + Structure is documented below. + audio_stream.bitrate_bps: |- + - + (Required) + Audio bitrate in bits per second. + audio_stream.channel_count: |- + - + (Optional) + Number of audio channels. The default is 2. + audio_stream.channel_layout: |- + - + (Optional) + A list of channel names specifying layout of the audio channels. The default is ["fl", "fr"]. + audio_stream.codec: |- + - + (Optional) + The codec for this audio stream. The default is aac. + audio_stream.sample_rate_hertz: |- + - + (Optional) + The audio sample rate in Hertz. The default is 48000. + config: |- + - + (Optional) + The configuration for this template. + Structure is documented below. + config.ad_breaks: |- + - + (Optional) + Ad break. + Structure is documented below. + config.edit_list: |- + - + (Optional) + List of input assets stored in Cloud Storage. + Structure is documented below. + config.elementary_streams: |- + - + (Optional) + List of input assets stored in Cloud Storage. + Structure is documented below. + config.encryptions: |- + - + (Optional) + List of encryption configurations for the content. + Structure is documented below. + config.inputs: |- + - + (Optional) + List of input assets stored in Cloud Storage. + Structure is documented below. + config.manifests: |- + - + (Optional) + Manifest configuration. + Structure is documented below. + config.mux_streams: |- + - + (Optional) + Multiplexing settings for output stream. + Structure is documented below. + config.output: |- + - + (Optional) + Location of output file(s) in a Cloud Storage bucket. + Structure is documented below. + config.overlays: |- + - + (Optional) + List of overlays on the output video, in descending Z-order. + Structure is documented below. + config.pubsub_destination: |- + - + (Optional) + Pub/Sub destination. + Structure is documented below. + create: '- Default is 20 minutes.' + create_time: |- + - + The time the job was created. + delete: '- Default is 20 minutes.' + drm_systems.clearkey: |- + - + (Optional) + Clearkey configuration. + drm_systems.fairplay: |- + - + (Optional) + Fairplay configuration. + drm_systems.playready: |- + - + (Optional) + Playready configuration. + drm_systems.widevine: |- + - + (Optional) + Widevine configuration. + edit_list.inputs: |- + - + (Optional) + List of values identifying files that should be used in this atom. + edit_list.key: |- + - + (Optional) + A unique key for this atom. + edit_list.start_time_offset: |- + - + (Optional) + Start time in seconds for the atom, relative to the input file timeline. The default is 0s. + effective_labels: for all of the labels present on the resource. + elementary_streams.audio_stream: |- + - + (Optional) + Encoding of an audio stream. + Structure is documented below. + elementary_streams.key: |- + - + (Optional) + A unique key for this atom. + elementary_streams.video_stream: |- + - + (Optional) + Encoding of a video stream. + Structure is documented below. + encryptions.aes128: |- + - + (Optional) + Configuration for AES-128 encryption. + encryptions.drm_systems: |- + - + (Optional) + DRM system(s) to use; at least one must be specified. If a DRM system is omitted, it is considered disabled. + Structure is documented below. + encryptions.id: |- + - + (Required) + Identifier for this set of encryption options. + encryptions.mpeg_cenc: |- + - + (Optional) + Configuration for MPEG Common Encryption (MPEG-CENC). + Structure is documented below. + encryptions.sample_aes: |- + - + (Optional) + Configuration for SAMPLE-AES encryption. + encryptions.secret_manager_key_source: |- + - + (Optional) + Configuration for secrets stored in Google Secret Manager. + Structure is documented below. + end_time: |- + - + The time the transcoding finished. + id: '- an identifier for the resource with format {{name}}' + image.uri: |- + - + (Required) + URI of the image in Cloud Storage. For example, gs://bucket/inputs/image.png. + inputs.key: |- + - + (Optional) + A unique key for this input. Must be specified when using advanced mapping and edit lists. + inputs.uri: |- + - + (Optional) + URI of the media. Input files must be at least 5 seconds in duration and stored in Cloud Storage (for example, gs://bucket/inputs/file.mp4). + If empty, the value is populated from Job.input_uri. + labels: |- + - + (Optional) + The labels associated with this job. You can use these to organize and group your jobs. + location: |- + - + (Required) + The location of the transcoding job resource. + manifests.file_name: |- + - + (Optional) + The name of the generated file. The default is manifest. + manifests.mux_streams: |- + - + (Optional) + List of user supplied MuxStream.key values that should appear in this manifest. + manifests.type: |- + - + (Required) + Type of the manifest. + Possible values are: MANIFEST_TYPE_UNSPECIFIED, HLS, DASH. + mpeg_cenc.scheme: |- + - + (Required) + Specify the encryption scheme. + mux_streams.container: |- + - + (Optional) + The container format. The default is mp4. + mux_streams.elementary_streams: |- + - + (Optional) + List of ElementaryStream.key values multiplexed in this stream. + mux_streams.encryption_id: |- + - + (Optional) + Identifier of the encryption configuration to use. + mux_streams.file_name: |- + - + (Optional) + The name of the generated file. + mux_streams.key: |- + - + (Optional) + A unique key for this multiplexed stream. + mux_streams.segment_settings: |- + - + (Optional) + Segment settings for ts, fmp4 and vtt. + Structure is documented below. + name: |- + - + The resource name of the job. + output.uri: |- + - + (Optional) + URI for the output file(s). For example, gs://my-bucket/outputs/. + overlays.animations: |- + - + (Optional) + List of animations. The list should be chronological, without any time overlap. + Structure is documented below. + overlays.image: |- + - + (Optional) + Image overlay. + Structure is documented below. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + pubsub_destination.topic: |- + - + (Optional) + The name of the Pub/Sub topic to publish job completion notification to. For example: projects/{project}/topics/{topic}. + secret_manager_key_source.secret_version: |- + - + (Required) + The name of the Secret Version containing the encryption key in the following format: projects/{project}/secrets/{secret_id}/versions/{version_number}. + segment_settings.segment_duration: |- + - + (Optional) + Duration of the segments in seconds. The default is 6.0s. + start_time: |- + - + The time the transcoding started. + state: |- + - + The current state of the job. + template_id: |- + - + (Optional) + Specify the templateId to use for populating Job.config. + The default is preset/web-hd, which is the only supported preset. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' + video_stream.h264: |- + - + (Optional) + H264 codec settings + Structure is documented below. + video_stream.h264.bitrate_bps: |- + - + (Required) + The video bitrate in bits per second. + video_stream.h264.crf_level: |- + - + (Optional) + Target CRF level. The default is 21. + video_stream.h264.entropy_coder: |- + - + (Optional) + The entropy coder to use. The default is cabac. + video_stream.h264.frame_rate: |- + - + (Required) + The target video frame rate in frames per second (FPS). + video_stream.h264.gop_duration: |- + - + (Optional) + Select the GOP size based on the specified duration. The default is 3s. + video_stream.h264.height_pixels: |- + - + (Optional) + The height of the video in pixels. + video_stream.h264.hlg: |- + - + (Optional) + HLG color format setting for H264. + video_stream.h264.pixel_format: |- + - + (Optional) + Pixel format to use. The default is yuv420p. + video_stream.h264.preset: |- + - + (Optional) + Enforces the specified codec preset. The default is veryfast. + video_stream.h264.profile: |- + - + (Optional) + Enforces the specified codec profile. + video_stream.h264.rate_control_mode: |- + - + (Optional) + Specify the mode. The default is vbr. + video_stream.h264.sdr: |- + - + (Optional) + SDR color format setting for H264. + video_stream.h264.vbv_fullness_bits: |- + - + (Optional) + Initial fullness of the Video Buffering Verifier (VBV) buffer in bits. + video_stream.h264.vbv_size_bits: |- + - + (Optional) + Size of the Video Buffering Verifier (VBV) buffer in bits. + video_stream.h264.width_pixels: |- + - + (Optional) + The width of the video in pixels. + xy.x: |- + - + (Optional) + Normalized x coordinate. + xy.y: |- + - + (Optional) + Normalized y coordinate. + importStatements: [] + google_transcoder_job_template: + subCategory: Transcoder + description: Transcoding Job Template Resource + name: google_transcoder_job_template + title: "" + examples: + - name: default + manifest: |- + { + "config": [ + { + "ad_breaks": [ + { + "start_time_offset": "3.500s" + } + ], + "edit_list": [ + { + "inputs": [ + "input0" + ], + "key": "atom0", + "start_time_offset": "0s" + } + ], + "elementary_streams": [ + { + "key": "video-stream0", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 360, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 495000, + "vbv_size_bits": 550000, + "width_pixels": 640 + } + ] + } + ] + }, + { + "key": "video-stream1", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 720, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 2250000, + "vbv_size_bits": 2500000, + "width_pixels": 1280 + } + ] + } + ] + }, + { + "audio_stream": [ + { + "bitrate_bps": 64000, + "channel_count": 2, + "channel_layout": [ + "fl", + "fr" + ], + "codec": "aac", + "sample_rate_hertz": 48000 + } + ], + "key": "audio-stream0" + } + ], + "inputs": [ + { + "key": "input0" + } + ], + "mux_streams": [ + { + "container": "mp4", + "elementary_streams": [ + "video-stream0", + "audio-stream0" + ], + "file_name": "sd.mp4", + "key": "sd" + }, + { + "container": "mp4", + "elementary_streams": [ + "video-stream1", + "audio-stream0" + ], + "file_name": "hd.mp4", + "key": "hd" + } + ] + } + ], + "job_template_id": "example-job-template", + "labels": { + "label": "key" + }, + "location": "us-central1" + } + - name: default + manifest: |- + { + "config": [ + { + "ad_breaks": [ + { + "start_time_offset": "3.500s" + } + ], + "edit_list": [ + { + "inputs": [ + "input0" + ], + "key": "atom0", + "start_time_offset": "0s" + } + ], + "elementary_streams": [ + { + "key": "video-stream0", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 360, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 495000, + "vbv_size_bits": 550000, + "width_pixels": 640 + } + ] + } + ] + }, + { + "key": "video-stream1", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 720, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 2250000, + "vbv_size_bits": 2500000, + "width_pixels": 1280 + } + ] + } + ] + }, + { + "audio_stream": [ + { + "bitrate_bps": 64000, + "channel_count": 2, + "channel_layout": [ + "fl", + "fr" + ], + "codec": "aac", + "sample_rate_hertz": 48000 + } + ], + "key": "audio-stream0" + } + ], + "inputs": [ + { + "key": "input0", + "uri": "gs://example/example.mp4" + } + ], + "mux_streams": [ + { + "container": "mp4", + "elementary_streams": [ + "video-stream0", + "audio-stream0" + ], + "file_name": "sd.mp4", + "key": "sd" + }, + { + "container": "mp4", + "elementary_streams": [ + "video-stream1", + "audio-stream0" + ], + "file_name": "hd.mp4", + "key": "hd" + } + ], + "output": [ + { + "uri": "gs://example/outputs/" + } + ], + "overlays": [ + { + "animations": [ + { + "animation_fade": [ + { + "end_time_offset": "3.500s", + "fade_type": "FADE_IN", + "start_time_offset": "1.500s", + "xy": [ + { + "x": 1, + "y": 0.5 + } + ] + } + ] + } + ], + "image": [ + { + "uri": "gs://example/overlay.png" + } + ] + } + ] + } + ], + "job_template_id": "example-job-template", + "labels": { + "label": "key" + }, + "location": "us-central1" + } + - name: default + manifest: |- + { + "config": [ + { + "elementary_streams": [ + { + "key": "es_video", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 1000000, + "frame_rate": 60, + "height_pixels": 600, + "profile": "main", + "width_pixels": 800 + } + ] + } + ] + }, + { + "audio_stream": [ + { + "bitrate_bps": 160000, + "channel_count": 2, + "codec": "aac" + } + ], + "key": "es_audio" + } + ], + "encryptions": [ + { + "aes128": [ + {} + ], + "drm_systems": [ + { + "clearkey": [ + {} + ] + } + ], + "id": "aes-128", + "secret_manager_key_source": [ + { + "secret_version": "${google_secret_manager_secret_version.encryption_key.name}" + } + ] + }, + { + "drm_systems": [ + { + "widevine": [ + {} + ] + } + ], + "id": "cenc", + "mpeg_cenc": [ + { + "scheme": "cenc" + } + ], + "secret_manager_key_source": [ + { + "secret_version": "${google_secret_manager_secret_version.encryption_key.name}" + } + ] + }, + { + "drm_systems": [ + { + "widevine": [ + {} + ] + } + ], + "id": "cbcs", + "mpeg_cenc": [ + { + "scheme": "cbcs" + } + ], + "secret_manager_key_source": [ + { + "secret_version": "${google_secret_manager_secret_version.encryption_key.name}" + } + ] + } + ], + "manifests": [ + { + "file_name": "manifest_aes128.m3u8", + "mux_streams": [ + "ts_aes128" + ], + "type": "HLS" + }, + { + "file_name": "manifest_cenc.mpd", + "mux_streams": [ + "fmp4_cenc_video", + "fmp4_cenc_audio" + ], + "type": "DASH" + }, + { + "file_name": "manifest_cbcs.mpd", + "mux_streams": [ + "fmp4_cbcs_video", + "fmp4_cbcs_audio" + ], + "type": "DASH" + } + ], + "mux_streams": [ + { + "container": "ts", + "elementary_streams": [ + "es_video", + "es_audio" + ], + "encryption_id": "aes-128", + "key": "ts_aes128", + "segment_settings": [ + { + "segment_duration": "6s" + } + ] + }, + { + "container": "fmp4", + "elementary_streams": [ + "es_video" + ], + "encryption_id": "cenc", + "key": "fmp4_cenc_video", + "segment_settings": [ + { + "segment_duration": "6s" + } + ] + }, + { + "container": "fmp4", + "elementary_streams": [ + "es_audio" + ], + "encryption_id": "cenc", + "key": "fmp4_cenc_audio", + "segment_settings": [ + { + "segment_duration": "6s" + } + ] + }, + { + "container": "fmp4", + "elementary_streams": [ + "es_video" + ], + "encryption_id": "cbcs", + "key": "fmp4_cbcs_video", + "segment_settings": [ + { + "segment_duration": "6s" + } + ] + }, + { + "container": "fmp4", + "elementary_streams": [ + "es_audio" + ], + "encryption_id": "cbcs", + "key": "fmp4_cbcs_audio", + "segment_settings": [ + { + "segment_duration": "6s" + } + ] + } + ] + } + ], + "job_template_id": "example-job-template", + "labels": { + "label": "key" + }, + "location": "us-central1" + } + references: + config.encryptions.secret_manager_key_source.secret_version: google_secret_manager_secret_version.encryption_key.name + dependencies: + google_secret_manager_secret.encryption_key: |- + { + "replication": [ + { + "auto": [ + {} + ] + } + ], + "secret_id": "transcoder-encryption-key" + } + google_secret_manager_secret_version.encryption_key: |- + { + "secret": "${google_secret_manager_secret.encryption_key.name}", + "secret_data": "4A67F2C1B8E93A4F6D3E7890A1BC23DF" + } + - name: default + manifest: |- + { + "config": [ + { + "ad_breaks": [ + { + "start_time_offset": "3.500s" + } + ], + "edit_list": [ + { + "inputs": [ + "input0" + ], + "key": "atom0", + "start_time_offset": "0s" + } + ], + "elementary_streams": [ + { + "key": "video-stream0", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 360, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 495000, + "vbv_size_bits": 550000, + "width_pixels": 640 + } + ] + } + ] + }, + { + "key": "video-stream1", + "video_stream": [ + { + "h264": [ + { + "bitrate_bps": 550000, + "crf_level": 21, + "entropy_coder": "cabac", + "frame_rate": 60, + "gop_duration": "3s", + "height_pixels": 720, + "pixel_format": "yuv420p", + "preset": "veryfast", + "profile": "high", + "rate_control_mode": "vbr", + "vbv_fullness_bits": 2250000, + "vbv_size_bits": 2500000, + "width_pixels": 1280 + } + ] + } + ] + }, + { + "audio_stream": [ + { + "bitrate_bps": 64000, + "channel_count": 2, + "channel_layout": [ + "fl", + "fr" + ], + "codec": "aac", + "sample_rate_hertz": 48000 + } + ], + "key": "audio-stream0" + } + ], + "inputs": [ + { + "key": "input0", + "uri": "gs://example/example.mp4" + } + ], + "mux_streams": [ + { + "container": "mp4", + "elementary_streams": [ + "video-stream0", + "audio-stream0" + ], + "file_name": "sd.mp4", + "key": "sd" + }, + { + "container": "mp4", + "elementary_streams": [ + "video-stream1", + "audio-stream0" + ], + "file_name": "hd.mp4", + "key": "hd" + } + ], + "output": [ + { + "uri": "gs://example/outputs/" + } + ], + "pubsub_destination": [ + { + "topic": "${google_pubsub_topic.transcoder_notifications.id}" + } + ] + } + ], + "job_template_id": "example-job-template", + "labels": { + "label": "key" + }, + "location": "us-central1" + } + references: + config.pubsub_destination.topic: google_pubsub_topic.transcoder_notifications.id + dependencies: + google_pubsub_topic.transcoder_notifications: |- + { + "name": "transcoder-notifications" + } + argumentDocs: + FADE_IN: ': Fade the overlay object into view.' + FADE_OUT: |- + : Fade the overlay object out of view. + Possible values are: FADE_TYPE_UNSPECIFIED, FADE_IN, FADE_OUT. + FADE_TYPE_UNSPECIFIED: ': The fade type is not specified.' + ad_breaks.start_time_offset: |- + - + (Optional) + Start time in seconds for the ad break, relative to the output file timeline + animations.animation_fade: |- + - + (Optional) + Display overlay object with fade animation. + Structure is documented below. + animations.animation_fade.end_time_offset: |- + - + (Optional) + The time to end the fade animation, in seconds. + animations.animation_fade.fade_type: |- + - + (Required) + Required. Type of fade animation: FADE_IN or FADE_OUT. + The possible values are: + animations.animation_fade.start_time_offset: |- + - + (Optional) + The time to start the fade animation, in seconds. + animations.animation_fade.xy: |- + - + (Optional) + Normalized coordinates based on output video resolution. + Structure is documented below. + audio_stream.bitrate_bps: |- + - + (Required) + Audio bitrate in bits per second. + audio_stream.channel_count: |- + - + (Optional) + Number of audio channels. The default is 2. + audio_stream.channel_layout: |- + - + (Optional) + A list of channel names specifying layout of the audio channels. The default is ["fl", "fr"]. + audio_stream.codec: |- + - + (Optional) + The codec for this audio stream. The default is aac. + audio_stream.sample_rate_hertz: |- + - + (Optional) + The audio sample rate in Hertz. The default is 48000. + config: |- + - + (Optional) + The configuration for this template. + Structure is documented below. + config.ad_breaks: |- + - + (Optional) + Ad break. + Structure is documented below. + config.edit_list: |- + - + (Optional) + List of input assets stored in Cloud Storage. + Structure is documented below. + config.elementary_streams: |- + - + (Optional) + List of input assets stored in Cloud Storage. + Structure is documented below. + config.encryptions: |- + - + (Optional) + List of encryption configurations for the content. + Structure is documented below. + config.inputs: |- + - + (Optional) + List of input assets stored in Cloud Storage. + Structure is documented below. + config.manifests: |- + - + (Optional) + Manifest configuration. + Structure is documented below. + config.mux_streams: |- + - + (Optional) + Multiplexing settings for output stream. + Structure is documented below. + config.output: |- + - + (Optional) + Location of output file(s) in a Cloud Storage bucket. + Structure is documented below. + config.overlays: |- + - + (Optional) + List of overlays on the output video, in descending Z-order. + Structure is documented below. + config.pubsub_destination: |- + - + (Optional) + Pub/Sub destination. + Structure is documented below. + create: '- Default is 20 minutes.' + delete: '- Default is 20 minutes.' + drm_systems.clearkey: |- + - + (Optional) + Clearkey configuration. + drm_systems.fairplay: |- + - + (Optional) + Fairplay configuration. + drm_systems.playready: |- + - + (Optional) + Playready configuration. + drm_systems.widevine: |- + - + (Optional) + Widevine configuration. + edit_list.inputs: |- + - + (Optional) + List of values identifying files that should be used in this atom. + edit_list.key: |- + - + (Optional) + A unique key for this atom. + edit_list.start_time_offset: |- + - + (Optional) + Start time in seconds for the atom, relative to the input file timeline. The default is 0s. + effective_labels: for all of the labels present on the resource. + elementary_streams.audio_stream: |- + - + (Optional) + Encoding of an audio stream. + Structure is documented below. + elementary_streams.key: |- + - + (Optional) + A unique key for this atom. + elementary_streams.video_stream: |- + - + (Optional) + Encoding of a video stream. + Structure is documented below. + encryptions.aes128: |- + - + (Optional) + Configuration for AES-128 encryption. + encryptions.drm_systems: |- + - + (Optional) + DRM system(s) to use; at least one must be specified. If a DRM system is omitted, it is considered disabled. + Structure is documented below. + encryptions.id: |- + - + (Required) + Identifier for this set of encryption options. + encryptions.mpeg_cenc: |- + - + (Optional) + Configuration for MPEG Common Encryption (MPEG-CENC). + Structure is documented below. + encryptions.sample_aes: |- + - + (Optional) + Configuration for SAMPLE-AES encryption. + encryptions.secret_manager_key_source: |- + - + (Optional) + Configuration for secrets stored in Google Secret Manager. + Structure is documented below. + id: '- an identifier for the resource with format projects/{{project}}/locations/{{location}}/jobTemplates/{{job_template_id}}' + image.uri: |- + - + (Required) + URI of the image in Cloud Storage. For example, gs://bucket/inputs/image.png. + inputs.key: |- + - + (Optional) + A unique key for this input. Must be specified when using advanced mapping and edit lists. + inputs.uri: |- + - + (Optional) + URI of the media. Input files must be at least 5 seconds in duration and stored in Cloud Storage (for example, gs://bucket/inputs/file.mp4). + If empty, the value is populated from Job.input_uri. + job_template_id: |- + - + (Required) + ID to use for the Transcoding job template. + labels: |- + - + (Optional) + The labels associated with this job template. You can use these to organize and group your job templates. + location: |- + - + (Required) + The location of the transcoding job template resource. + manifests.file_name: |- + - + (Optional) + The name of the generated file. The default is manifest. + manifests.mux_streams: |- + - + (Optional) + List of user supplied MuxStream.key values that should appear in this manifest. + manifests.type: |- + - + (Required) + Type of the manifest. + Possible values are: MANIFEST_TYPE_UNSPECIFIED, HLS, DASH. + mpeg_cenc.scheme: |- + - + (Required) + Specify the encryption scheme. + mux_streams.container: |- + - + (Optional) + The container format. The default is mp4. + mux_streams.elementary_streams: |- + - + (Optional) + List of ElementaryStream.key values multiplexed in this stream. + mux_streams.encryption_id: |- + - + (Optional) + Identifier of the encryption configuration to use. + mux_streams.file_name: |- + - + (Optional) + The name of the generated file. + mux_streams.key: |- + - + (Optional) + A unique key for this multiplexed stream. + mux_streams.segment_settings: |- + - + (Optional) + Segment settings for ts, fmp4 and vtt. + Structure is documented below. + name: |- + - + The resource name of the job template. + output.uri: |- + - + (Optional) + URI for the output file(s). For example, gs://my-bucket/outputs/. + overlays.animations: |- + - + (Optional) + List of animations. The list should be chronological, without any time overlap. + Structure is documented below. + overlays.image: |- + - + (Optional) + Image overlay. + Structure is documented below. + project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + pubsub_destination.topic: |- + - + (Optional) + The name of the Pub/Sub topic to publish job completion notification to. For example: projects/{project}/topics/{topic}. + secret_manager_key_source.secret_version: |- + - + (Required) + The name of the Secret Version containing the encryption key in the following format: projects/{project}/secrets/{secret_id}/versions/{version_number}. + segment_settings.segment_duration: |- + - + (Optional) + Duration of the segments in seconds. The default is 6.0s. + terraform_labels: |- + - + The combination of labels configured directly on the resource + and default labels configured on the provider. + update: '- Default is 20 minutes.' + video_stream.h264: |- + - + (Optional) + H264 codec settings + Structure is documented below. + video_stream.h264.bitrate_bps: |- + - + (Required) + The video bitrate in bits per second. + video_stream.h264.crf_level: |- + - + (Optional) + Target CRF level. The default is 21. + video_stream.h264.entropy_coder: |- + - + (Optional) + The entropy coder to use. The default is cabac. + video_stream.h264.frame_rate: |- + - + (Required) + The target video frame rate in frames per second (FPS). + video_stream.h264.gop_duration: |- + - + (Optional) + Select the GOP size based on the specified duration. The default is 3s. + video_stream.h264.height_pixels: |- + - + (Optional) + The height of the video in pixels. + video_stream.h264.hlg: |- + - + (Optional) + HLG color format setting for H264. + video_stream.h264.pixel_format: |- + - + (Optional) + Pixel format to use. The default is yuv420p. + video_stream.h264.preset: |- + - + (Optional) + Enforces the specified codec preset. The default is veryfast. + video_stream.h264.profile: |- + - + (Optional) + Enforces the specified codec profile. + video_stream.h264.rate_control_mode: |- + - + (Optional) + Specify the mode. The default is vbr. + video_stream.h264.sdr: |- + - + (Optional) + SDR color format setting for H264. + video_stream.h264.vbv_fullness_bits: |- + - + (Optional) + Initial fullness of the Video Buffering Verifier (VBV) buffer in bits. + video_stream.h264.vbv_size_bits: |- + - + (Optional) + Size of the Video Buffering Verifier (VBV) buffer in bits. + video_stream.h264.width_pixels: |- + - + (Optional) + The width of the video in pixels. + xy.x: |- + - + (Optional) + Normalized x coordinate. + xy.y: |- + - + (Optional) + Normalized y coordinate. + importStatements: [] google_vertex_ai_dataset: subCategory: Vertex AI description: A collection of DataItems and Annotations on them. @@ -136443,7 +157641,7 @@ resources: "machine_spec": [ { "accelerator_count": 1, - "accelerator_type": "NVIDIA_TESLA_K80", + "accelerator_type": "NVIDIA_TESLA_P4", "machine_type": "n1-standard-4" } ], @@ -136541,9 +157739,29 @@ resources: "location": "us-central1", "name": "endpoint-name", "network": "projects/${data.google_project.project.number}/global/networks/${google_compute_network.vertex_network.name}", - "region": "us-central1" + "predict_request_response_logging_config": [ + { + "bigquery_destination": [ + { + "output_uri": "bq://${data.google_project.project.project_id}.${google_bigquery_dataset.bq_dataset.dataset_id}.request_response_logging" + } + ], + "enabled": true, + "sampling_rate": 0.1 + } + ], + "region": "us-central1", + "traffic_split": "${jsonencode({\n \"12345\" = 100\n })}" } dependencies: + google_bigquery_dataset.bq_dataset: |- + { + "dataset_id": "some_dataset", + "delete_contents_on_destroy": true, + "description": "This is a dataset that requests are logged to", + "friendly_name": "logging dataset", + "location": "US" + } google_compute_global_address.vertex_range: |- { "address_type": "INTERNAL", @@ -136570,6 +157788,40 @@ resources: ], "service": "servicenetworking.googleapis.com" } + - name: endpoint + manifest: |- + { + "description": "A sample vertex endpoint", + "display_name": "sample-endpoint", + "labels": { + "label-one": "value-one" + }, + "location": "us-central1", + "name": "endpoint-name", + "private_service_connect_config": [ + { + "enable_private_service_connect": true, + "enable_secure_private_service_connect": false, + "project_allowlist": [ + "${data.google_project.project.project_id}" + ] + } + ], + "region": "us-central1" + } + - name: endpoint + manifest: |- + { + "dedicated_endpoint_enabled": true, + "description": "A sample vertex endpoint", + "display_name": "sample-endpoint", + "labels": { + "label-one": "value-one" + }, + "location": "us-central1", + "name": "endpoint-name", + "region": "us-central1" + } argumentDocs: automatic_resources.max_replica_count: |- - @@ -136587,10 +157839,21 @@ resources: - (Output) The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided. + bigquery_destination.output_uri: |- + - + (Optional) + BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: - BigQuery path. For example: bq://projectId or bq://projectId.bqDatasetId or bq://projectId.bqDatasetId.bqTableId. create: '- Default is 20 minutes.' create_time: |- - Output only. Timestamp when this Endpoint was created. + dedicated_endpoint_dns: |- + - + Output only. DNS of the dedicated endpoint. Will only be populated if dedicatedEndpointEnabled is true. Format: https://{endpointId}.{region}-{projectNumber}.prediction.vertexai.goog. + dedicated_endpoint_enabled: |- + - + (Optional) + If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon. dedicated_resources.autoscaling_metric_specs: |- - (Output) @@ -136721,7 +157984,25 @@ resources: network: |- - (Optional) - The full name of the Google Compute Engine network to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. Format: projects/{project}/global/networks/{network}. Where {project} is a project number, as in 12345, and {network} is network name. + The full name of the Google Compute Engine network to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. Format: projects/{project}/global/networks/{network}. Where {project} is a project number, as in 12345, and {network} is network name. Only one of the fields, network or privateServiceConnectConfig, can be set. + predict_request_response_logging_config: |- + - + (Optional) + Configures the request-response logging for online prediction. + Structure is documented below. + predict_request_response_logging_config.bigquery_destination: |- + - + (Optional) + BigQuery table for logging. If only given a project, a new dataset will be created with name logging__ where will be made BigQuery-dataset-name compatible (e.g. most special characters will become underscores). If no table name is given, a new table will be created with name request_response_logging + Structure is documented below. + predict_request_response_logging_config.enabled: |- + - + (Optional) + If logging is enabled or not. + predict_request_response_logging_config.sampling_rate: |- + - + (Optional) + Percentage of requests to be logged, expressed as a fraction in range(0,1] private_endpoints.explain_http_uri: |- - (Output) @@ -136738,6 +158019,23 @@ resources: - (Output) Output only. The name of the service attachment resource. Populated if private service connect is enabled. + private_service_connect_config: |- + - + (Optional) + Configuration for private service connect. network and privateServiceConnectConfig are mutually exclusive. + Structure is documented below. + private_service_connect_config.enable_private_service_connect: |- + - + (Required) + Required. If true, expose the IndexEndpoint via private service connect. + private_service_connect_config.enable_secure_private_service_connect: |- + - + (Optional) + If set to true, enable secure private service connect with IAM authorization. Otherwise, private service connect will be done without authorization. Note latency will be slightly increased if authorization is enabled. + private_service_connect_config.project_allowlist: |- + - + (Optional) + A list of Projects from which the forwarding rule will target the service attachment. project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -136749,6 +158047,15 @@ resources: - The combination of labels configured directly on the resource and default labels configured on the provider. + traffic_split: |- + - + (Optional) + A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. + If a DeployedModel's id is not listed in this map, then it receives no traffic. + The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. See + the deployModel example and + documentation for more information. + ~> Note: To set the map to empty, set "{}", apply, and then remove the field from your config. update: '- Default is 20 minutes.' update_time: |- - @@ -137414,6 +158721,7 @@ resources: google_project.project: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "tf-test", "org_id": "123456789", "project_id": "tf-test" @@ -138488,7 +159796,7 @@ resources: Structure is documented below. metadata.contents_delta_uri: |- - - (Required) + (Optional) Allows inserting, updating or deleting the contents of the Matching Engine Index. The string must be a valid Cloud Storage directory path. If this field is set when calling IndexService.UpdateIndex, then no other @@ -138584,14 +159892,14 @@ resources: { "enable_private_service_connect": true, "project_allowlist": [ - "${data.google_project.project.number}" + "${data.google_project.project.name}" ] } ], "region": "us-central1" } references: - private_service_connect_config.project_allowlist: data.google_project.project.number + private_service_connect_config.project_allowlist: data.google_project.project.name - name: index_endpoint manifest: |- { @@ -139223,6 +160531,38 @@ resources: - name: vmw-ext-cluster manifest: |- { + "autoscaling_settings": [ + { + "autoscaling_policies": [ + { + "autoscale_policy_id": "autoscaling-policy", + "consumed_memory_thresholds": [ + { + "scale_in": 20, + "scale_out": 75 + } + ], + "cpu_thresholds": [ + { + "scale_in": 15, + "scale_out": 80 + } + ], + "node_type_id": "standard-72", + "scale_out_size": 1, + "storage_thresholds": [ + { + "scale_in": 20, + "scale_out": 80 + } + ] + } + ], + "cool_down_period": "1800s", + "max_cluster_node_count": 8, + "min_cluster_node_count": 3 + } + ], "name": "ext-cluster", "node_type_configs": [ { @@ -139268,6 +160608,76 @@ resources: ] } argumentDocs: + autoscaling_policies.autoscale_policy_id: '- (Required) The identifier for this object. Format specified above.' + autoscaling_policies.consumed_memory_thresholds: |- + - + (Optional) + Utilization thresholds pertaining to amount of consumed memory. + Structure is documented below. + autoscaling_policies.cpu_thresholds: |- + - + (Optional) + Utilization thresholds pertaining to CPU utilization. + Structure is documented below. + autoscaling_policies.node_type_id: |- + - + (Required) + The canonical identifier of the node type to add or remove. + autoscaling_policies.scale_out_size: |- + - + (Required) + Number of nodes to add to a cluster during a scale-out operation. + Must be divisible by 2 for stretched clusters. + autoscaling_policies.storage_thresholds: |- + - + (Optional) + Utilization thresholds pertaining to amount of consumed storage. + Structure is documented below. + autoscaling_settings: |- + - + (Optional) + Configuration of the autoscaling applied to this cluster + Structure is documented below. + autoscaling_settings.autoscaling_policies: |- + - + (Required) + The map with autoscaling policies applied to the cluster. + The key is the identifier of the policy. + It must meet the following requirements: + autoscaling_settings.cool_down_period: |- + - + (Optional) + The minimum duration between consecutive autoscale operations. + It starts once addition or removal of nodes is fully completed. + Minimum cool down period is 30m. + Cool down period must be in whole minutes (for example, 30m, 31m, 50m). + Mandatory for successful addition of autoscaling settings in cluster. + autoscaling_settings.max_cluster_node_count: |- + - + (Optional) + Maximum number of nodes of any type in a cluster. + Mandatory for successful addition of autoscaling settings in cluster. + autoscaling_settings.min_cluster_node_count: |- + - + (Optional) + Minimum number of nodes of any type in a cluster. + Mandatory for successful addition of autoscaling settings in cluster. + consumed_memory_thresholds.scale_in: |- + - + (Required) + The utilization triggering the scale-in operation in percent. + consumed_memory_thresholds.scale_out: |- + - + (Required) + The utilization triggering the scale-out operation in percent. + cpu_thresholds.scale_in: |- + - + (Required) + The utilization triggering the scale-in operation in percent. + cpu_thresholds.scale_out: |- + - + (Required) + The utilization triggering the scale-out operation in percent. create: '- Default is 210 minutes.' delete: '- Default is 150 minutes.' id: '- an identifier for the resource with format {{parent}}/clusters/{{name}}' @@ -139306,6 +160716,14 @@ resources: state: |- - State of the Cluster. + storage_thresholds.scale_in: |- + - + (Required) + The utilization triggering the scale-in operation in percent. + storage_thresholds.scale_out: |- + - + (Required) + The utilization triggering the scale-out operation in percent. uid: |- - System-generated unique identifier for the resource. @@ -139646,6 +161064,7 @@ resources: google_project.acceptance: |- { "billing_account": "000000-0000000-0000000-000000", + "deletion_policy": "DELETE", "name": "vmw-proj", "org_id": "123456789", "project_id": "vmw-proj" @@ -140023,6 +161442,38 @@ resources: "location": "us-west1-a", "management_cluster": [ { + "autoscaling_settings": [ + { + "autoscaling_policies": [ + { + "autoscale_policy_id": "autoscaling-policy", + "consumed_memory_thresholds": [ + { + "scale_in": 20, + "scale_out": 75 + } + ], + "cpu_thresholds": [ + { + "scale_in": 15, + "scale_out": 80 + } + ], + "node_type_id": "standard-72", + "scale_out_size": 1, + "storage_thresholds": [ + { + "scale_in": 20, + "scale_out": 80 + } + ] + } + ], + "cool_down_period": "1800s", + "max_cluster_node_count": 8, + "min_cluster_node_count": 3 + } + ], "cluster_id": "sample-mgmt-cluster", "node_type_configs": [ { @@ -140054,6 +161505,71 @@ resources: "type": "STANDARD" } argumentDocs: + autoscaling_policies.autoscale_policy_id: '- (Required) The identifier for this object. Format specified above.' + autoscaling_policies.consumed_memory_thresholds: |- + - + (Optional) + Utilization thresholds pertaining to amount of consumed memory. + Structure is documented below. + autoscaling_policies.cpu_thresholds: |- + - + (Optional) + Utilization thresholds pertaining to CPU utilization. + Structure is documented below. + autoscaling_policies.node_type_id: |- + - + (Required) + The canonical identifier of the node type to add or remove. + autoscaling_policies.scale_out_size: |- + - + (Required) + Number of nodes to add to a cluster during a scale-out operation. + Must be divisible by 2 for stretched clusters. + autoscaling_policies.storage_thresholds: |- + - + (Optional) + Utilization thresholds pertaining to amount of consumed storage. + Structure is documented below. + autoscaling_settings.autoscaling_policies: |- + - + (Required) + The map with autoscaling policies applied to the cluster. + The key is the identifier of the policy. + It must meet the following requirements: + autoscaling_settings.cool_down_period: |- + - + (Optional) + The minimum duration between consecutive autoscale operations. + It starts once addition or removal of nodes is fully completed. + Minimum cool down period is 30m. + Cool down period must be in whole minutes (for example, 30m, 31m, 50m). + Mandatory for successful addition of autoscaling settings in cluster. + autoscaling_settings.max_cluster_node_count: |- + - + (Optional) + Maximum number of nodes of any type in a cluster. + Mandatory for successful addition of autoscaling settings in cluster. + autoscaling_settings.min_cluster_node_count: |- + - + (Optional) + Minimum number of nodes of any type in a cluster. + Mandatory for successful addition of autoscaling settings in cluster. + consumed_memory_thresholds.scale_in: |- + - + (Required) + The utilization triggering the scale-in operation in percent. + consumed_memory_thresholds.scale_out: |- + - + (Required) + The utilization triggering the scale-out operation in percent. + cpu_thresholds.scale_in: |- + - + (Required) + The utilization triggering the scale-in operation in percent. + cpu_thresholds.scale_out: |- + - + (Required) + The utilization triggering the scale-out operation in percent. create: '- Default is 240 minutes.' delete: '- Default is 150 minutes.' hcx: |- @@ -140087,6 +161603,12 @@ resources: (Required) The management cluster for this private cloud. This used for creating and managing the default cluster. Structure is documented below. + management_cluster.autoscaling_settings: |- + - + (Optional) + Configuration of the autoscaling applied to this cluster + Private cloud must have a minimum of 3 nodes to add autoscale settings + Structure is documented below. management_cluster.cluster_id: |- - (Required) @@ -140171,28 +161693,36 @@ resources: state: |- - State of the resource. New values may be added to this enum when appropriate. - stretched_cluster_config.deletion_delay_hours: '- (Optional) The number of hours to delay this request. You can set this value to an hour between 0 to 8, where setting it to 0 starts the deletion request immediately. If no value is set, a default value is set at the API Level.' - stretched_cluster_config.description: |- + storage_thresholds.deletion_delay_hours: '- (Optional) The number of hours to delay this request. You can set this value to an hour between 0 to 8, where setting it to 0 starts the deletion request immediately. If no value is set, a default value is set at the API Level.' + storage_thresholds.description: |- - (Optional) User-provided description for this private cloud. + storage_thresholds.project: |- + - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + storage_thresholds.scale_in: |- + - + (Required) + The utilization triggering the scale-in operation in percent. + storage_thresholds.scale_out: |- + - + (Required) + The utilization triggering the scale-out operation in percent. + storage_thresholds.send_deletion_delay_hours_if_zero: '- (Optional) While set true, deletion_delay_hours value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the deletion_delay_hours field. It can be used both alone and together with deletion_delay_hours.' + storage_thresholds.type: |- + - + (Optional) + Initial type of the private cloud. + Possible values are: STANDARD, TIME_LIMITED, STRETCHED. stretched_cluster_config.preferred_location: |- - (Optional) Zone that will remain operational when connection between the two zones is lost. - stretched_cluster_config.project: |- - - (Optional) The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. stretched_cluster_config.secondary_location: |- - (Optional) Additional zone for a higher level of availability and load balancing. - stretched_cluster_config.send_deletion_delay_hours_if_zero: '- (Optional) While set true, deletion_delay_hours value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the deletion_delay_hours field. It can be used both alone and together with deletion_delay_hours.' - stretched_cluster_config.type: |- - - - (Optional) - Initial type of the private cloud. - Possible values are: STANDARD, TIME_LIMITED, STRETCHED. uid: |- - System-generated unique identifier for the resource. @@ -140340,6 +161870,8 @@ resources: manifest: |- { "ip_cidr_range": "10.8.0.0/28", + "max_instances": 3, + "min_instances": 2, "name": "vpc-con", "network": "default" } @@ -140347,6 +161879,8 @@ resources: manifest: |- { "machine_type": "e2-standard-4", + "max_instances": 3, + "min_instances": 2, "name": "vpc-con", "subnet": [ { @@ -140389,8 +161923,7 @@ resources: (Optional) Maximum throughput of the connector in Mbps, must be greater than min_throughput. Default is 300. Refers to the expected throughput when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by - min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of - max_throughput is discouraged in favor of max_instances. + min_throughput. Only one of max_throughput and max_instances can be specified. The use of max_throughput is discouraged in favor of max_instances. min_instances: |- - (Optional) @@ -140400,8 +161933,8 @@ resources: - (Optional) Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type. - Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and - min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances. + Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. + Only one of min_throughput and min_instances can be specified. The use of min_throughput is discouraged in favor of min_instances. name: |- - (Required) @@ -140672,7 +162205,7 @@ resources: - (Optional) The tag of the container image. If not specified, this defaults to the latest tag. - create: '- Default is 10 minutes.' + create: '- Default is 20 minutes.' create_time: |- - An RFC3339 timestamp in UTC time. This in the format of yyyy-MM-ddTHH:mm:ss.SSSZ. @@ -140800,7 +162333,9 @@ resources: (Optional) 'Optional. Input only. The owner of this instance after creation. Format: alias@example.com Currently supports one owner only. If not specified, all of - the service account users of your VM instance''s service account can use the instance.' + the service account users of your VM instance''s service account can use the instance. + If specified, sets the access mode to Single user. For more details, see + https://cloud.google.com/vertex-ai/docs/workbench/instances/manage-access-jupyterlab' labels: |- - (Optional) @@ -140992,6 +162527,7 @@ resources: manifest: |- { "call_log_level": "LOG_ERRORS_ONLY", + "deletion_protection": false, "description": "Magic", "labels": { "env": "test" @@ -141030,6 +162566,13 @@ resources: The KMS key used to encrypt workflow and execution data. Format: projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} delete: '- Default is 20 minutes.' + deletion_protection: |- + - (Optional) Whether Terraform will be prevented from destroying the workflow. Defaults to true. + When aterraform destroy or terraform apply would delete the workflow, + the command will fail if this field is not set to false in Terraform state. + When the field is set to true or unset in Terraform state, a terraform apply + or terraform destroy that would delete the workflow will fail. + When the field is set to false, deleting the workflow is allowed. description: |- - (Optional) @@ -141210,6 +162753,11 @@ resources: project: |- - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + source_workstation: |- + - + (Optional) + Full resource name of the source workstation from which the workstation's persistent + directories will be cloned from during creation. state: |- - Current state of the workstation. @@ -141503,7 +163051,7 @@ resources: "disable_ssh": false, "machine_type": "e2-standard-4", "vm_tags": { - "tagKeys/${google_tags_tag_key.tag_key1.name}": "tagValues/${google_tags_tag_value.tag_value1.name}" + "${(google_tags_tag_key.tag_key1.id)}": "${google_tags_tag_value.tag_value1.id}" } } ] @@ -141514,6 +163062,7 @@ resources: "label": "key" }, "location": "us-central1", + "max_usable_workstations": 1, "provider": "${google-beta}", "replica_zones": [ "us-central1-a", @@ -141549,7 +163098,7 @@ resources: } google_tags_tag_value.tag_value1: |- { - "parent": "tagKeys/${google_tags_tag_key.tag_key1.name}", + "parent": "${google_tags_tag_key.tag_key1.id}", "provider": "${google-beta}", "short_name": "valuename" } @@ -142027,6 +163576,71 @@ resources: "subnetwork": "${google_compute_subnetwork.default.id}", "workstation_cluster_id": "workstation-cluster" } + - name: default + manifest: |- + { + "allowed_ports": [ + { + "first": 80, + "last": 80 + }, + { + "first": 22, + "last": 22 + }, + { + "first": 1024, + "last": 65535 + } + ], + "host": [ + { + "gce_instance": [ + { + "boot_disk_size_gb": 35, + "disable_public_ip_addresses": true, + "machine_type": "e2-standard-4" + } + ] + } + ], + "location": "us-central1", + "provider": "${google-beta}", + "workstation_cluster_id": "${google_workstations_workstation_cluster.default.workstation_cluster_id}", + "workstation_config_id": "workstation-config" + } + references: + provider: google-beta + workstation_cluster_id: google_workstations_workstation_cluster.default.workstation_cluster_id + dependencies: + google_compute_network.default: |- + { + "auto_create_subnetworks": false, + "name": "workstation-cluster", + "provider": "${google-beta}" + } + google_compute_subnetwork.default: |- + { + "ip_cidr_range": "10.0.0.0/24", + "name": "workstation-cluster", + "network": "${google_compute_network.default.name}", + "provider": "${google-beta}", + "region": "us-central1" + } + google_workstations_workstation_cluster.default: |- + { + "annotations": { + "label-one": "value-one" + }, + "labels": { + "label": "key" + }, + "location": "us-central1", + "network": "${google_compute_network.default.id}", + "provider": "${google-beta}", + "subnetwork": "${google_compute_subnetwork.default.id}", + "workstation_cluster_id": "workstation-cluster" + } argumentDocs: accelerators.count: |- - @@ -142036,6 +163650,19 @@ resources: - (Required) Type of accelerator resource to attach to the instance, for example, "nvidia-tesla-p100". + allowed_ports: |- + - + (Optional) + A list of port ranges specifying single ports or ranges of ports that are externally accessible in the workstation. Allowed ports must be one of 22, 80, or within range 1024-65535. If not specified defaults to ports 22, 80, and ports 1024-65535. + Structure is documented below. + allowed_ports.first: |- + - + (Optional) + Starting port number for the current range of ports. Valid ports are 22, 80, and ports within the range 1024-65535. + allowed_ports.last: |- + - + (Optional) + Ending port number for the current range of ports. Valid ports are 22, 80, and ports within the range 1024-65535. annotations: |- - (Optional) @@ -142301,6 +163928,10 @@ resources: - (Required) The location where the workstation cluster config should reside. + max_usable_workstations: |- + - + (Optional) + Maximum number of workstations under this configuration a user can have workstations.workstation.use permission on. Only enforced on CreateWorkstation API calls on the user issuing the API request. name: |- - Full name of this resource. diff --git a/config/registry.go b/config/registry.go index f8627068f..27f06a87e 100644 --- a/config/registry.go +++ b/config/registry.go @@ -161,6 +161,7 @@ func GetProvider(_ context.Context, generationProvider bool) (*ujconfig.Provider defaultVersion(), resourceConfigurator(), descriptionOverrides(), + DeletionProtectionRemove(), ), ujconfig.WithRootGroup("gcp.upbound.io"), ujconfig.WithShortName("gcp"), diff --git a/config/schema.json b/config/schema.json index 1915ca477..9301005cb 100644 --- a/config/schema.json +++ b/config/schema.json @@ -66,6 +66,11 @@ "description_kind": "plain", "optional": true }, + "backup_dr_custom_endpoint": { + "type": "string", + "description_kind": "plain", + "optional": true + }, "beyondcorp_custom_endpoint": { "type": "string", "description_kind": "plain", @@ -306,12 +311,12 @@ "description_kind": "plain", "optional": true }, - "dataproc_metastore_custom_endpoint": { + "dataproc_gdc_custom_endpoint": { "type": "string", "description_kind": "plain", "optional": true }, - "datastore_custom_endpoint": { + "dataproc_metastore_custom_endpoint": { "type": "string", "description_kind": "plain", "optional": true @@ -334,6 +339,11 @@ "description_kind": "plain", "optional": true }, + "developer_connect_custom_endpoint": { + "type": "string", + "description_kind": "plain", + "optional": true + }, "dialogflow_custom_endpoint": { "type": "string", "description_kind": "plain", @@ -439,6 +449,11 @@ "description_kind": "plain", "optional": true }, + "iam3_custom_endpoint": { + "type": "string", + "description_kind": "plain", + "optional": true + }, "iam_beta_custom_endpoint": { "type": "string", "description_kind": "plain", @@ -507,11 +522,21 @@ "description_kind": "plain", "optional": true }, + "managed_kafka_custom_endpoint": { + "type": "string", + "description_kind": "plain", + "optional": true + }, "memcache_custom_endpoint": { "type": "string", "description_kind": "plain", "optional": true }, + "memorystore_custom_endpoint": { + "type": "string", + "description_kind": "plain", + "optional": true + }, "migration_center_custom_endpoint": { "type": "string", "description_kind": "plain", @@ -557,6 +582,11 @@ "description_kind": "plain", "optional": true }, + "oracle_database_custom_endpoint": { + "type": "string", + "description_kind": "plain", + "optional": true + }, "org_policy_custom_endpoint": { "type": "string", "description_kind": "plain", @@ -572,6 +602,11 @@ "description_kind": "plain", "optional": true }, + "parallelstore_custom_endpoint": { + "type": "string", + "description_kind": "plain", + "optional": true + }, "privateca_custom_endpoint": { "type": "string", "description_kind": "plain", @@ -650,6 +685,11 @@ "description_kind": "plain", "optional": true }, + "secret_manager_regional_custom_endpoint": { + "type": "string", + "description_kind": "plain", + "optional": true + }, "secure_source_manager_custom_endpoint": { "type": "string", "description_kind": "plain", @@ -745,6 +785,11 @@ "description_kind": "plain", "optional": true }, + "transcoder_custom_endpoint": { + "type": "string", + "description_kind": "plain", + "optional": true + }, "universe_domain": { "type": "string", "description_kind": "plain", @@ -803,8 +848,7 @@ } }, "description_kind": "plain" - }, - "max_items": 1 + } } }, "description_kind": "plain" @@ -1001,7 +1045,7 @@ "list", "string" ], - "description": "CIDR block IP subnetwork specification. Must be IPv4.", + "description": "A list of CIDR block IP subnetwork specification. Must be IPv4.", "description_kind": "plain", "optional": true } @@ -2468,7 +2512,7 @@ "set", "string" ], - "description": "A list of identities that are allowed access through this 'EgressPolicy'.\nShould be in the format of email address. The email address should\nrepresent individual user or service account only.", + "description": "Identities can be an individual user, service account, Google group,\nor third-party identity. For third-party identity, only single identities\nare supported and other identity types are not supported.The v1 identities\nthat have the prefix user, group and serviceAccount in\nhttps://cloud.google.com/iam/docs/principal-identifiers#v1 are supported.", "description_kind": "plain", "optional": true }, @@ -2593,7 +2637,7 @@ "set", "string" ], - "description": "A list of identities that are allowed access through this ingress policy.\nShould be in the format of email address. The email address should represent\nindividual user or service account only.", + "description": "Identities can be an individual user, service account, Google group,\nor third-party identity. For third-party identity, only single identities\nare supported and other identity types are not supported.The v1 identities\nthat have the prefix user, group and serviceAccount in\nhttps://cloud.google.com/iam/docs/principal-identifiers#v1 are supported.", "description_kind": "plain", "optional": true }, @@ -2781,7 +2825,7 @@ "list", "string" ], - "description": "A list of identities that are allowed access through this 'EgressPolicy'.\nShould be in the format of email address. The email address should\nrepresent individual user or service account only.", + "description": "Identities can be an individual user, service account, Google group,\nor third-party identity. For third-party identity, only single identities\nare supported and other identity types are not supported.The v1 identities\nthat have the prefix user, group and serviceAccount in\nhttps://cloud.google.com/iam/docs/principal-identifiers#v1 are supported.", "description_kind": "plain", "optional": true }, @@ -2937,7 +2981,7 @@ "list", "string" ], - "description": "A list of identities that are allowed access through this ingress policy.\nShould be in the format of email address. The email address should represent\nindividual user or service account only.", + "description": "Identities can be an individual user, service account, Google group,\nor third-party identity. For third-party identity, only single identities\nare supported and other identity types are not supported.The v1 identities\nthat have the prefix user, group and serviceAccount in\nhttps://cloud.google.com/iam/docs/principal-identifiers#v1 are supported.", "description_kind": "plain", "optional": true }, @@ -3130,7 +3174,7 @@ "list", "string" ], - "description": "A list of identities that are allowed access through this 'EgressPolicy'.\nShould be in the format of email address. The email address should\nrepresent individual user or service account only.", + "description": "Identities can be an individual user, service account, Google group,\nor third-party identity. For third-party identity, only single identities\nare supported and other identity types are not supported.The v1 identities\nthat have the prefix user, group and serviceAccount in\nhttps://cloud.google.com/iam/docs/principal-identifiers#v1 are supported.", "description_kind": "plain", "optional": true }, @@ -3286,7 +3330,7 @@ "list", "string" ], - "description": "A list of identities that are allowed access through this ingress policy.\nShould be in the format of email address. The email address should represent\nindividual user or service account only.", + "description": "Identities can be an individual user, service account, Google group,\nor third-party identity. For third-party identity, only single identities\nare supported and other identity types are not supported.The v1 identities\nthat have the prefix user, group and serviceAccount in\nhttps://cloud.google.com/iam/docs/principal-identifiers#v1 are supported.", "description_kind": "plain", "optional": true }, @@ -3564,7 +3608,7 @@ "set", "string" ], - "description": "A list of identities that are allowed access through this 'EgressPolicy'.\nShould be in the format of email address. The email address should\nrepresent individual user or service account only.", + "description": "Identities can be an individual user, service account, Google group,\nor third-party identity. For third-party identity, only single identities\nare supported and other identity types are not supported.The v1 identities\nthat have the prefix user, group and serviceAccount in\nhttps://cloud.google.com/iam/docs/principal-identifiers#v1 are supported.", "description_kind": "plain", "optional": true }, @@ -4179,6 +4223,12 @@ "description_kind": "plain", "optional": true }, + "deletion_protection": { + "type": "bool", + "description": "Whether Terraform will be prevented from destroying the domain. Defaults to true.\nWhen a'terraform destroy' or 'terraform apply' would delete the domain,\nthe command will fail if this field is not set to false in Terraform state.\nWhen the field is set to true or unset in Terraform state, a 'terraform apply'\nor 'terraform destroy' that would delete the domain will fail.\nWhen the field is set to false, deleting the domain is allowed.", + "description_kind": "plain", + "optional": true + }, "domain_name": { "type": "string", "description": "The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions\nof https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains.", @@ -4693,7 +4743,7 @@ }, "deletion_policy": { "type": "string", - "description": "Policy to determine if the cluster should be deleted forcefully.\nDeleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster.\nDeleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = \"FORCE\" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance.", + "description": "Policy to determine if the cluster should be deleted forcefully.\nDeleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster.\nDeleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = \"FORCE\" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance.\nPossible values: DEFAULT, FORCE", "description_kind": "plain", "optional": true }, @@ -4788,14 +4838,6 @@ "description_kind": "plain", "computed": true }, - "network": { - "type": "string", - "description": "The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form:\n\n\"projects/{projectNumber}/global/networks/{network_id}\".", - "description_kind": "plain", - "deprecated": true, - "optional": true, - "computed": true - }, "project": { "type": "string", "description_kind": "plain", @@ -4814,6 +4856,13 @@ "description_kind": "plain", "computed": true }, + "subscription_type": { + "type": "string", + "description": "The subscrition type of cluster. Possible values: [\"TRIAL\", \"STANDARD\"]", + "description_kind": "plain", + "optional": true, + "computed": true + }, "terraform_labels": { "type": [ "map", @@ -4823,6 +4872,23 @@ "description_kind": "plain", "computed": true }, + "trial_metadata": { + "type": [ + "list", + [ + "object", + { + "end_time": "string", + "grace_end_time": "string", + "start_time": "string", + "upgrade_time": "string" + } + ] + ], + "description": "Contains information and all metadata related to TRIAL clusters.", + "description_kind": "plain", + "computed": true + }, "uid": { "type": "string", "description": "The system-generated UID of the resource.", @@ -5348,6 +5414,15 @@ "description_kind": "plain", "computed": true }, + "outbound_public_ip_addresses": { + "type": [ + "list", + "string" + ], + "description": "The outbound public IP addresses for the instance. This is available ONLY when\nnetworkConfig.enableOutboundPublicIp is set to true. These IP addresses are used\nfor outbound connections.", + "description_kind": "plain", + "computed": true + }, "public_ip_address": { "type": "string", "description": "The public IP addresses for the Instance. This is available ONLY when\nnetworkConfig.enablePublicIp is set to true. This is the connection\nendpoint for an end-user application.", @@ -5445,6 +5520,12 @@ "nesting_mode": "list", "block": { "attributes": { + "enable_outbound_public_ip": { + "type": "bool", + "description": "Enabling outbound public ip for the instance.", + "description_kind": "plain", + "optional": true + }, "enable_public_ip": { "type": "bool", "description": "Enabling public ip for the instance. If a user wishes to disable this,\nplease also clear the list of the authorized external networks set on\nthe same instance.", @@ -5807,27 +5888,21 @@ "description_kind": "plain" } }, - "google_apigee_endpoint_attachment": { + "google_apigee_api": { "version": 0, "block": { "attributes": { - "connection_state": { - "type": "string", - "description": "State of the endpoint attachment connection to the service attachment.", - "description_kind": "plain", - "computed": true - }, - "endpoint_attachment_id": { + "config_bundle": { "type": "string", - "description": "ID of the endpoint attachment.", + "description": "Path to the config zip bundle", "description_kind": "plain", "required": true }, - "host": { + "detect_md5hash": { "type": "string", - "description": "Host that can be used in either HTTP Target Endpoint directly, or as the host in Target Server.", + "description": "A hash of local config bundle in string, user needs to use a Terraform Hash function of their choice. A change in hash will trigger an update.", "description_kind": "plain", - "computed": true + "optional": true }, "id": { "type": "string", @@ -5835,29 +5910,54 @@ "optional": true, "computed": true }, - "location": { + "latest_revision_id": { "type": "string", - "description": "Location of the endpoint attachment.", + "description": "The id of the most recently created revision for this API proxy.", "description_kind": "plain", - "required": true + "computed": true }, - "name": { + "md5hash": { "type": "string", - "description": "Name of the Endpoint Attachment in the following format:\norganizations/{organization}/endpointAttachments/{endpointAttachment}.", + "description": "Base 64 MD5 hash of the uploaded config bundle.", "description_kind": "plain", "computed": true }, - "org_id": { + "meta_data": { + "type": [ + "list", + [ + "object", + { + "created_at": "string", + "last_modified_at": "string", + "sub_type": "string" + } + ] + ], + "description": "Metadata describing the API proxy.", + "description_kind": "plain", + "computed": true + }, + "name": { "type": "string", - "description": "The Apigee Organization associated with the Apigee instance,\nin the format 'organizations/{{org_name}}'.", + "description": "Name of the API proxy. This field only accepts the following characters: A-Za-z0-9._-.", "description_kind": "plain", "required": true }, - "service_attachment": { + "org_id": { "type": "string", - "description": "Format: projects/*/regions/*/serviceAttachments/*", + "description": "The Apigee Organization name associated with the Apigee instance.", "description_kind": "plain", "required": true + }, + "revision": { + "type": [ + "list", + "string" + ], + "description": "A list of revisions of this API proxy.", + "description_kind": "plain", + "computed": true } }, "block_types": { @@ -5874,6 +5974,11 @@ "type": "string", "description_kind": "plain", "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true } }, "description_kind": "plain" @@ -5883,158 +5988,473 @@ "description_kind": "plain" } }, - "google_apigee_env_keystore": { + "google_apigee_app_group": { "version": 0, "block": { "attributes": { - "aliases": { - "type": [ - "list", - "string" - ], - "description": "Aliases in this keystore.", + "app_group_id": { + "type": "string", + "description": "Internal identifier that cannot be edited", "description_kind": "plain", "computed": true }, - "env_id": { + "channel_id": { "type": "string", - "description": "The Apigee environment group associated with the Apigee environment,\nin the format 'organizations/{{org_name}}/environments/{{env_name}}'.", + "description": "Channel identifier identifies the owner maintaining this grouping.", "description_kind": "plain", - "required": true + "optional": true }, - "id": { + "channel_uri": { "type": "string", + "description": "A reference to the associated storefront/marketplace.", "description_kind": "plain", - "optional": true, - "computed": true + "optional": true }, - "name": { + "created_at": { "type": "string", - "description": "The name of the newly created keystore.", + "description": "Created time as milliseconds since epoch.", "description_kind": "plain", - "optional": true - } - }, - "block_types": { - "timeouts": { - "nesting_mode": "single", - "block": { - "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "delete": { - "type": "string", - "description_kind": "plain", - "optional": true - } - }, - "description_kind": "plain" - } - } - }, - "description_kind": "plain" - } - }, - "google_apigee_env_references": { - "version": 0, - "block": { - "attributes": { - "description": { + "computed": true + }, + "display_name": { "type": "string", - "description": "Optional. A human-readable description of this reference.", + "description": "App group name displayed in the UI", "description_kind": "plain", "optional": true }, - "env_id": { + "id": { "type": "string", - "description": "The Apigee environment group associated with the Apigee environment,\nin the format 'organizations/{{org_name}}/environments/{{env_name}}'.", "description_kind": "plain", - "required": true + "optional": true, + "computed": true }, - "id": { + "last_modified_at": { "type": "string", + "description": "Modified time as milliseconds since epoch.", "description_kind": "plain", - "optional": true, "computed": true }, "name": { "type": "string", - "description": "Required. The resource id of this reference. Values must match the regular expression [\\w\\s-.]+.", + "description": "Name of the AppGroup. Characters you can use in the name are restricted to: A-Z0-9._-$ %.", "description_kind": "plain", "required": true }, - "refers": { + "org_id": { "type": "string", - "description": "Required. The id of the resource to which this reference refers. Must be the id of a resource that exists in the parent environment and is of the given resourceType.", + "description": "The Apigee Organization associated with the Apigee app group,\nin the format 'organizations/{{org_name}}'.", "description_kind": "plain", "required": true }, - "resource_type": { + "organization": { "type": "string", - "description": "The type of resource referred to by this reference. Valid values are 'KeyStore' or 'TrustStore'.", + "description": "App group name displayed in the UI", "description_kind": "plain", - "required": true + "computed": true + }, + "status": { + "type": "string", + "description": "Valid values are active or inactive. Note that the status of the AppGroup should be updated via UpdateAppGroupRequest by setting the action as active or inactive. Possible values: [\"active\", \"inactive\"]", + "description_kind": "plain", + "optional": true } }, "block_types": { - "timeouts": { - "nesting_mode": "single", + "attributes": { + "nesting_mode": "list", "block": { "attributes": { - "create": { + "name": { "type": "string", + "description": "Key of the attribute", "description_kind": "plain", "optional": true }, - "delete": { + "value": { "type": "string", + "description": "Value of the attribute", "description_kind": "plain", "optional": true } }, + "description": "A list of attributes", "description_kind": "plain" } - } - }, - "description_kind": "plain" - } - }, - "google_apigee_envgroup": { - "version": 0, - "block": { - "attributes": { - "hostnames": { - "type": [ - "list", - "string" - ], - "description": "Hostnames of the environment group.", - "description_kind": "plain", - "optional": true }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "name": { - "type": "string", - "description": "The resource ID of the environment group.", - "description_kind": "plain", - "required": true - }, - "org_id": { - "type": "string", - "description": "The Apigee Organization associated with the Apigee environment group,\nin the format 'organizations/{{org_name}}'.", - "description_kind": "plain", - "required": true - } - }, - "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_apigee_developer": { + "version": 0, + "block": { + "attributes": { + "created_at": { + "type": "string", + "description": "Time at which the developer was created in milliseconds since epoch.", + "description_kind": "plain", + "computed": true + }, + "email": { + "type": "string", + "description": "Email address of the developer. This value is used to uniquely identify the developer in Apigee hybrid. Note that the email address has to be in lowercase only..", + "description_kind": "plain", + "required": true + }, + "first_name": { + "type": "string", + "description": "First name of the developer.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "last_modified_at": { + "type": "string", + "description": "Time at which the developer was last modified in milliseconds since epoch.", + "description_kind": "plain", + "computed": true + }, + "last_name": { + "type": "string", + "description": "Last name of the developer.", + "description_kind": "plain", + "required": true + }, + "org_id": { + "type": "string", + "description": "The Apigee Organization associated with the Apigee instance,\nin the format 'organizations/{{org_name}}'.", + "description_kind": "plain", + "required": true + }, + "organizatio_name": { + "type": "string", + "description": "Name of the Apigee organization in which the developer resides.", + "description_kind": "plain", + "computed": true + }, + "status": { + "type": "string", + "description": "Status of the developer. Valid values are active and inactive.", + "description_kind": "plain", + "computed": true + }, + "user_name": { + "type": "string", + "description": "User name of the developer. Not used by Apigee hybrid.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "attributes": { + "nesting_mode": "list", + "block": { + "attributes": { + "name": { + "type": "string", + "description": "Key of the attribute", + "description_kind": "plain", + "optional": true + }, + "value": { + "type": "string", + "description": "Value of the attribute", + "description_kind": "plain", + "optional": true + } + }, + "description": "Developer attributes (name/value pairs). The custom attribute limit is 18.", + "description_kind": "plain" + } + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_apigee_endpoint_attachment": { + "version": 0, + "block": { + "attributes": { + "connection_state": { + "type": "string", + "description": "State of the endpoint attachment connection to the service attachment.", + "description_kind": "plain", + "computed": true + }, + "endpoint_attachment_id": { + "type": "string", + "description": "ID of the endpoint attachment.", + "description_kind": "plain", + "required": true + }, + "host": { + "type": "string", + "description": "Host that can be used in either HTTP Target Endpoint directly, or as the host in Target Server.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "Location of the endpoint attachment.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Name of the Endpoint Attachment in the following format:\norganizations/{organization}/endpointAttachments/{endpointAttachment}.", + "description_kind": "plain", + "computed": true + }, + "org_id": { + "type": "string", + "description": "The Apigee Organization associated with the Apigee instance,\nin the format 'organizations/{{org_name}}'.", + "description_kind": "plain", + "required": true + }, + "service_attachment": { + "type": "string", + "description": "Format: projects/*/regions/*/serviceAttachments/*", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_apigee_env_keystore": { + "version": 0, + "block": { + "attributes": { + "aliases": { + "type": [ + "list", + "string" + ], + "description": "Aliases in this keystore.", + "description_kind": "plain", + "computed": true + }, + "env_id": { + "type": "string", + "description": "The Apigee environment group associated with the Apigee environment,\nin the format 'organizations/{{org_name}}/environments/{{env_name}}'.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "The name of the newly created keystore.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_apigee_env_references": { + "version": 0, + "block": { + "attributes": { + "description": { + "type": "string", + "description": "Optional. A human-readable description of this reference.", + "description_kind": "plain", + "optional": true + }, + "env_id": { + "type": "string", + "description": "The Apigee environment group associated with the Apigee environment,\nin the format 'organizations/{{org_name}}/environments/{{env_name}}'.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "Required. The resource id of this reference. Values must match the regular expression [\\w\\s-.]+.", + "description_kind": "plain", + "required": true + }, + "refers": { + "type": "string", + "description": "Required. The id of the resource to which this reference refers. Must be the id of a resource that exists in the parent environment and is of the given resourceType.", + "description_kind": "plain", + "required": true + }, + "resource_type": { + "type": "string", + "description": "The type of resource referred to by this reference. Valid values are 'KeyStore' or 'TrustStore'.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_apigee_envgroup": { + "version": 0, + "block": { + "attributes": { + "hostnames": { + "type": [ + "list", + "string" + ], + "description": "Hostnames of the environment group.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "The resource ID of the environment group.", + "description_kind": "plain", + "required": true + }, + "org_id": { + "type": "string", + "description": "The Apigee Organization associated with the Apigee environment group,\nin the format 'organizations/{{org_name}}'.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { "timeouts": { "nesting_mode": "single", "block": { @@ -6669,6 +7089,11 @@ "type": "string", "description_kind": "plain", "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true } }, "description_kind": "plain" @@ -7231,6 +7656,12 @@ "version": 0, "block": { "attributes": { + "activate": { + "type": "bool", + "description": "Flag that specifies whether the reserved NAT address should be activate.", + "description_kind": "plain", + "optional": true + }, "id": { "type": "string", "description_kind": "plain", @@ -7276,6 +7707,11 @@ "type": "string", "description_kind": "plain", "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true } }, "description_kind": "plain" @@ -10171,7 +10607,7 @@ "attributes": { "type": { "type": "string", - "description": "Required. Scope Type. \n Possible values:\nREGIONAL Possible values: [\"REGIONAL\"]", + "description": "Required. Scope Type. \n Possible values:\nREGIONAL\nGLOBAL Possible values: [\"REGIONAL\", \"GLOBAL\"]", "description_kind": "plain", "required": true } @@ -10837,7 +11273,7 @@ }, "location": { "type": "string", - "description": "The name of the location this repository is located in.", + "description": "The name of the repository's location. In addition to specific regions,\nspecial values for multi-region locations are 'asia', 'europe', and 'us'.\nSee [here](https://cloud.google.com/artifact-registry/docs/repositories/repo-locations),\nor use the\n[google_artifact_registry_locations](https://registry.terraform.io/providers/hashicorp/google/latest/docs/data-sources/artifact_registry_locations)\ndata source for possible values.", "description_kind": "plain", "optional": true, "computed": true @@ -11073,6 +11509,22 @@ }, "max_items": 1 }, + "common_repository": { + "nesting_mode": "list", + "block": { + "attributes": { + "uri": { + "type": "string", + "description": "One of:\na. Artifact Registry Repository resource, e.g. 'projects/UPSTREAM_PROJECT_ID/locations/REGION/repositories/UPSTREAM_REPOSITORY'\nb. URI to the registry, e.g. '\"https://registry-1.docker.io\"'\nc. URI to Artifact Registry Repository, e.g. '\"https://REGION-docker.pkg.dev/UPSTREAM_PROJECT_ID/UPSTREAM_REPOSITORY\"'", + "description_kind": "plain", + "required": true + } + }, + "description": "Specific settings for an Artifact Registory remote repository.", + "description_kind": "plain" + }, + "max_items": 1 + }, "docker_repository": { "nesting_mode": "list", "block": { @@ -11096,7 +11548,7 @@ "optional": true } }, - "description": "Settings for a remote repository with a custom uri.", + "description": "[Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri.", "description_kind": "plain" }, "max_items": 1 @@ -11130,7 +11582,7 @@ "optional": true } }, - "description": "Settings for a remote repository with a custom uri.", + "description": "[Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri.", "description_kind": "plain" }, "max_items": 1 @@ -11164,7 +11616,7 @@ "optional": true } }, - "description": "Settings for a remote repository with a custom uri.", + "description": "[Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri.", "description_kind": "plain" }, "max_items": 1 @@ -11198,7 +11650,7 @@ "optional": true } }, - "description": "Settings for a remote repository with a custom uri.", + "description": "[Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri.", "description_kind": "plain" }, "max_items": 1 @@ -11338,6 +11790,34 @@ "description_kind": "plain" }, "max_items": 1 + }, + "vulnerability_scanning_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "enablement_config": { + "type": "string", + "description": "This configures whether vulnerability scanning is automatically performed for artifacts pushed to this repository. Possible values: [\"INHERITED\", \"DISABLED\"]", + "description_kind": "plain", + "optional": true + }, + "enablement_state": { + "type": "string", + "description": "This field returns whether scanning is active for this repository.", + "description_kind": "plain", + "computed": true + }, + "enablement_state_reason": { + "type": "string", + "description": "This provides an explanation for the state of scanning on this repository.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Configuration for vulnerability scanning of artifacts stored in this repository.", + "description_kind": "plain" + }, + "max_items": 1 } }, "description_kind": "plain" @@ -11543,7 +12023,7 @@ }, "compliance_regime": { "type": "string", - "description": "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT", + "description": "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT, IRS_1075", "description_kind": "plain", "required": true }, @@ -11661,7 +12141,13 @@ }, "partner": { "type": "string", - "description": "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN", + "description": "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM", + "description_kind": "plain", + "optional": true + }, + "partner_services_billing_account": { + "type": "string", + "description": "Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC.", "description_kind": "plain", "optional": true }, @@ -11821,191 +12307,130 @@ }, "description_kind": "plain" } + }, + "workload_options": { + "nesting_mode": "list", + "block": { + "attributes": { + "kaj_enrollment_type": { + "type": "string", + "description": "Indicates type of KAJ enrollment for the workload. Currently, only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not enroll in KAT-level KAJ enrollment for Regional Controls workloads. Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, KEY_ACCESS_TRANSPARENCY_OFF", + "description_kind": "plain", + "optional": true + } + }, + "description": "Optional. Used to specify certain options for a workload during workload creation - currently only supporting KAT Optionality for Regional Controls workloads.", + "description_kind": "plain" + }, + "max_items": 1 } }, "description_kind": "plain" } }, - "google_beyondcorp_app_connection": { + "google_backup_dr_backup_vault": { "version": 0, "block": { "attributes": { - "connectors": { - "type": [ - "list", - "string" - ], - "description": "List of AppConnectors that are authorised to be associated with this AppConnection", + "access_restriction": { + "type": "string", + "description": "Access restriction for the backup vault. Default value is 'WITHIN_ORGANIZATION' if not provided during creation. Default value: \"WITHIN_ORGANIZATION\" Possible values: [\"ACCESS_RESTRICTION_UNSPECIFIED\", \"WITHIN_PROJECT\", \"WITHIN_ORGANIZATION\", \"UNRESTRICTED\", \"WITHIN_ORG_BUT_UNRESTRICTED_FOR_BA\"]", "description_kind": "plain", "optional": true }, - "display_name": { - "type": "string", - "description": "An arbitrary user-provided name for the AppConnection.", + "allow_missing": { + "type": "bool", + "description": "Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist.", "description_kind": "plain", "optional": true }, - "effective_labels": { + "annotations": { "type": [ "map", "string" ], - "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description": "Optional. User annotations. See https://google.aip.dev/128#annotations\nStores small amounts of arbitrary data. \n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", "description_kind": "plain", - "computed": true + "optional": true }, - "id": { + "backup_count": { "type": "string", + "description": "Output only. The number of backups in this backup vault.", "description_kind": "plain", - "optional": true, "computed": true }, - "labels": { - "type": [ - "map", - "string" - ], - "description": "Resource labels to represent user provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "backup_minimum_enforced_retention_duration": { + "type": "string", + "description": "Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended.", "description_kind": "plain", - "optional": true + "required": true }, - "name": { + "backup_vault_id": { "type": "string", - "description": "ID of the AppConnection.", + "description": "Required. ID of the requesting object.", "description_kind": "plain", "required": true }, - "project": { + "create_time": { "type": "string", + "description": "Output only. The time when the instance was created.", "description_kind": "plain", - "optional": true, "computed": true }, - "region": { + "deletable": { + "type": "bool", + "description": "Output only. Set to true when there are no backups nested under this resource.", + "description_kind": "plain", + "computed": true + }, + "description": { "type": "string", - "description": "The region of the AppConnection.", + "description": "Optional. The description of the BackupVault instance (2048 characters or less).", "description_kind": "plain", "optional": true }, - "terraform_labels": { + "effective_annotations": { "type": [ "map", "string" ], - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", "description_kind": "plain", "computed": true }, - "type": { + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_time": { "type": "string", - "description": "The type of network connectivity used by the AppConnection. Refer\nto https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#type\nfor a list of possible values.", + "description": "Optional. Time after which the BackupVault resource is locked.", "description_kind": "plain", "optional": true - } - }, - "block_types": { - "application_endpoint": { - "nesting_mode": "list", - "block": { - "attributes": { - "host": { - "type": "string", - "description": "Hostname or IP address of the remote application endpoint.", - "description_kind": "plain", - "required": true - }, - "port": { - "type": "number", - "description": "Port of the remote application endpoint.", - "description_kind": "plain", - "required": true - } - }, - "description": "Address of the remote application endpoint for the BeyondCorp AppConnection.", - "description_kind": "plain" - }, - "min_items": 1, - "max_items": 1 - }, - "gateway": { - "nesting_mode": "list", - "block": { - "attributes": { - "app_gateway": { - "type": "string", - "description": "AppGateway name in following format: projects/{project_id}/locations/{locationId}/appgateways/{gateway_id}.", - "description_kind": "plain", - "required": true - }, - "ingress_port": { - "type": "number", - "description": "Ingress port reserved on the gateways for this AppConnection, if not specified or zero, the default port is 19443.", - "description_kind": "plain", - "computed": true - }, - "type": { - "type": "string", - "description": "The type of hosting used by the gateway. Refer to\nhttps://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#Type_1\nfor a list of possible values.", - "description_kind": "plain", - "optional": true - }, - "uri": { - "type": "string", - "description": "Server-defined URI for this resource.", - "description_kind": "plain", - "computed": true - } - }, - "description": "Gateway used by the AppConnection.", - "description_kind": "plain" - }, - "max_items": 1 }, - "timeouts": { - "nesting_mode": "single", - "block": { - "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "delete": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "update": { - "type": "string", - "description_kind": "plain", - "optional": true - } - }, - "description_kind": "plain" - } - } - }, - "description_kind": "plain" - } - }, - "google_beyondcorp_app_connector": { - "version": 0, - "block": { - "attributes": { - "display_name": { + "etag": { "type": "string", - "description": "An arbitrary user-provided name for the AppConnector.", + "description": "Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other.", "description_kind": "plain", + "computed": true + }, + "force_delete": { + "type": "bool", + "description": "If set, the following restrictions against deletion of the backup vault instance can be overridden:\n * deletion of a backup vault instance containing no backups, but still containing empty datasources.\n * deletion of a backup vault instance that is being referenced by an active backup plan.", + "description_kind": "plain", + "deprecated": true, "optional": true }, - "effective_labels": { - "type": [ - "map", - "string" - ], - "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "force_update": { + "type": "bool", + "description": "If set, allow update to extend the minimum enforced retention for backup vault. This overrides\n the restriction against conflicting retention periods. This conflict may occur when the\n expiration schedule defined by the associated backup plan is shorter than the minimum\n retention set by the backup vault.", "description_kind": "plain", - "computed": true + "optional": true }, "id": { "type": "string", @@ -12013,36 +12438,54 @@ "optional": true, "computed": true }, + "ignore_backup_plan_references": { + "type": "bool", + "description": "If set, the following restrictions against deletion of the backup vault instance can be overridden:\n * deletion of a backup vault instance that is being referenced by an active backup plan.", + "description_kind": "plain", + "optional": true + }, + "ignore_inactive_datasources": { + "type": "bool", + "description": "If set, the following restrictions against deletion of the backup vault instance can be overridden:\n * deletion of a backup vault instance containing no backups, but still containing empty datasources.", + "description_kind": "plain", + "optional": true + }, "labels": { "type": [ "map", "string" ], - "description": "Resource labels to represent user provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description": "Optional. Resource labels to represent user provided metadata. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "optional": true }, - "name": { + "location": { "type": "string", - "description": "ID of the AppConnector.", + "description": "The GCP location for the backup vault.", "description_kind": "plain", "required": true }, + "name": { + "type": "string", + "description": "Output only. Identifier. The resource name.", + "description_kind": "plain", + "computed": true + }, "project": { "type": "string", "description_kind": "plain", "optional": true, "computed": true }, - "region": { + "service_account": { "type": "string", - "description": "The region of the AppConnector.", + "description": "Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there.", "description_kind": "plain", - "optional": true + "computed": true }, "state": { "type": "string", - "description": "Represents the different states of a AppConnector.", + "description": "Output only. The BackupVault resource instance state. \n Possible values:\n STATE_UNSPECIFIED\n CREATING\n ACTIVE\n DELETING\n ERROR", "description_kind": "plain", "computed": true }, @@ -12054,37 +12497,313 @@ "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", "description_kind": "plain", "computed": true + }, + "total_stored_bytes": { + "type": "string", + "description": "Output only. Total size of the storage used by all backup resources.", + "description_kind": "plain", + "computed": true + }, + "uid": { + "type": "string", + "description": "Output only. Output only Immutable after resource creation until resource deletion.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. The time when the instance was updated.", + "description_kind": "plain", + "computed": true } }, "block_types": { - "principal_info": { - "nesting_mode": "list", - "block": { - "block_types": { - "service_account": { - "nesting_mode": "list", - "block": { - "attributes": { - "email": { - "type": "string", - "description": "Email address of the service account.", - "description_kind": "plain", - "required": true - } - }, - "description": "ServiceAccount represents a GCP service account.", - "description_kind": "plain" - }, - "min_items": 1, - "max_items": 1 - } - }, - "description": "Principal information about the Identity of the AppConnector.", - "description_kind": "plain" - }, - "min_items": 1, - "max_items": 1 - }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_beyondcorp_app_connection": { + "version": 0, + "block": { + "attributes": { + "connectors": { + "type": [ + "list", + "string" + ], + "description": "List of AppConnectors that are authorised to be associated with this AppConnection", + "description_kind": "plain", + "optional": true + }, + "display_name": { + "type": "string", + "description": "An arbitrary user-provided name for the AppConnection.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Resource labels to represent user provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "ID of the AppConnection.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "region": { + "type": "string", + "description": "The region of the AppConnection.", + "description_kind": "plain", + "optional": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "type": { + "type": "string", + "description": "The type of network connectivity used by the AppConnection. Refer\nto https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#type\nfor a list of possible values.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "application_endpoint": { + "nesting_mode": "list", + "block": { + "attributes": { + "host": { + "type": "string", + "description": "Hostname or IP address of the remote application endpoint.", + "description_kind": "plain", + "required": true + }, + "port": { + "type": "number", + "description": "Port of the remote application endpoint.", + "description_kind": "plain", + "required": true + } + }, + "description": "Address of the remote application endpoint for the BeyondCorp AppConnection.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "gateway": { + "nesting_mode": "list", + "block": { + "attributes": { + "app_gateway": { + "type": "string", + "description": "AppGateway name in following format: projects/{project_id}/locations/{locationId}/appgateways/{gateway_id}.", + "description_kind": "plain", + "required": true + }, + "ingress_port": { + "type": "number", + "description": "Ingress port reserved on the gateways for this AppConnection, if not specified or zero, the default port is 19443.", + "description_kind": "plain", + "computed": true + }, + "type": { + "type": "string", + "description": "The type of hosting used by the gateway. Refer to\nhttps://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#Type_1\nfor a list of possible values.", + "description_kind": "plain", + "optional": true + }, + "uri": { + "type": "string", + "description": "Server-defined URI for this resource.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Gateway used by the AppConnection.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_beyondcorp_app_connector": { + "version": 0, + "block": { + "attributes": { + "display_name": { + "type": "string", + "description": "An arbitrary user-provided name for the AppConnector.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Resource labels to represent user provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "ID of the AppConnector.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "region": { + "type": "string", + "description": "The region of the AppConnector.", + "description_kind": "plain", + "optional": true + }, + "state": { + "type": "string", + "description": "Represents the different states of a AppConnector.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "principal_info": { + "nesting_mode": "list", + "block": { + "block_types": { + "service_account": { + "nesting_mode": "list", + "block": { + "attributes": { + "email": { + "type": "string", + "description": "Email address of the service account.", + "description_kind": "plain", + "required": true + } + }, + "description": "ServiceAccount represents a GCP service account.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + } + }, + "description": "Principal information about the Identity of the AppConnector.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -12241,6 +12960,137 @@ "description_kind": "plain" } }, + "google_beyondcorp_security_gateway": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "Output only. Timestamp when the resource was created.", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "Optional. An arbitrary user-provided name for the SecurityGateway.\nCannot exceed 64 characters.", + "description_kind": "plain", + "optional": true + }, + "external_ips": { + "type": [ + "list", + "string" + ], + "description": "Output only. IP addresses that will be used for establishing\nconnection to the endpoints.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. It identifies the resource within its parent collection as described in https://google.aip.dev/122.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. Name of the resource.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "security_gateway_id": { + "type": "string", + "description": "Optional. User-settable SecurityGateway resource ID.\n* Must start with a letter.\n* Must contain between 4-63 characters from '/a-z-/'.\n* Must end with a number or letter.", + "description_kind": "plain", + "required": true + }, + "state": { + "type": "string", + "description": "Output only. The operational state of the SecurityGateway.\nPossible values:\nSTATE_UNSPECIFIED\nCREATING\nUPDATING\nDELETING\nRUNNING\nDOWN\nERROR", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. Timestamp when the resource was last modified.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "hubs": { + "nesting_mode": "set", + "block": { + "attributes": { + "region": { + "type": "string", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "internet_gateway": { + "nesting_mode": "list", + "block": { + "attributes": { + "assigned_ips": { + "type": [ + "list", + "string" + ], + "description": "Output only. List of IP addresses assigned to the Cloud NAT.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Internet Gateway configuration.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Optional. Map of Hubs that represents regional data path deployment with GCP region\nas a key.", + "description_kind": "plain" + } + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_biglake_catalog": { "version": 0, "block": { @@ -12638,6 +13488,32 @@ } }, "block_types": { + "sharing_environment_config": { + "nesting_mode": "list", + "block": { + "block_types": { + "dcr_exchange_config": { + "nesting_mode": "list", + "block": { + "description": "Data Clean Room (DCR), used for privacy-safe and secured data sharing.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "default_exchange_config": { + "nesting_mode": "list", + "block": { + "description": "Default Analytics Hub data exchange, used for secured data sharing.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Configurable data sharing environment option for a data exchange.\nThis field is required for data clean room exchanges.", + "description_kind": "plain" + }, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -12951,6 +13827,23 @@ "required": true } }, + "block_types": { + "selected_resources": { + "nesting_mode": "list", + "block": { + "attributes": { + "table": { + "type": "string", + "description": "Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:\"projects/test_project/datasets/test_dataset/tables/test_table\"", + "description_kind": "plain", + "optional": true + } + }, + "description": "Resource in this dataset that is selectively shared. This field is required for data clean room exchanges.", + "description_kind": "plain" + } + } + }, "description": "Shared dataset i.e. BigQuery dataset source.", "description_kind": "plain" }, @@ -13011,6 +13904,12 @@ "description_kind": "plain", "optional": true }, + "restrict_direct_table_access": { + "type": "bool", + "description": "If true, restrict direct table access(read api/tabledata.list) on linked table.", + "description_kind": "plain", + "computed": true + }, "restrict_query_result": { "type": "bool", "description": "If true, restrict export of query result derived from restricted linked dataset table.", @@ -14097,6 +14996,22 @@ }, "max_items": 1 }, + "encryption_configuration": { + "nesting_mode": "list", + "block": { + "attributes": { + "kms_key_name": { + "type": "string", + "description": "The name of the KMS key used for encrypting BigQuery data.", + "description_kind": "plain", + "required": true + } + }, + "description": "Represents the encryption configuration for a transfer.", + "description_kind": "plain" + }, + "max_items": 1 + }, "schedule_options": { "nesting_mode": "list", "block": { @@ -14643,6 +15558,40 @@ } }, "block_types": { + "condition": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "Description of the expression. This is a longer text which describes the expression,\ne.g. when hovered over it in a UI.", + "description_kind": "plain", + "optional": true + }, + "expression": { + "type": "string", + "description": "Textual representation of an expression in Common Expression Language syntax.", + "description_kind": "plain", + "required": true + }, + "location": { + "type": "string", + "description": "String indicating the location of the expression for error reporting, e.g. a file\nname and a position in the file.", + "description_kind": "plain", + "optional": true + }, + "title": { + "type": "string", + "description": "Title for the expression, i.e. a short string describing its purpose.\nThis can be used e.g. in UIs which allow to enter the expression.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Condition for the binding. If CEL expression in this field is true, this\naccess binding will be considered.", + "description_kind": "plain" + }, + "max_items": 1 + }, "dataset": { "nesting_mode": "list", "block": { @@ -14879,6 +15828,40 @@ } }, "block_types": { + "condition": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "Description of the expression. This is a longer text which describes the expression,\ne.g. when hovered over it in a UI.", + "description_kind": "plain", + "optional": true + }, + "expression": { + "type": "string", + "description": "Textual representation of an expression in Common Expression Language syntax.", + "description_kind": "plain", + "required": true + }, + "location": { + "type": "string", + "description": "String indicating the location of the expression for error reporting, e.g. a file\nname and a position in the file.", + "description_kind": "plain", + "optional": true + }, + "title": { + "type": "string", + "description": "Title for the expression, i.e. a short string describing its purpose.\nThis can be used e.g. in UIs which allow to enter the expression.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Condition for the binding. If CEL expression in this field is true, this\naccess binding will be considered.", + "description_kind": "plain" + }, + "max_items": 1 + }, "dataset": { "nesting_mode": "list", "block": { @@ -16000,13 +16983,6 @@ "description_kind": "plain", "optional": true }, - "multi_region_auxiliary": { - "type": "bool", - "description": "Applicable only for reservations located within one of the BigQuery multi-regions (US or EU).\nIf set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region.", - "description_kind": "plain", - "deprecated": true, - "optional": true - }, "name": { "type": "string", "description": "The name of the reservation. This field must only contain alphanumeric characters or dash.", @@ -16444,13 +17420,6 @@ "version": 0, "block": { "attributes": { - "allow_resource_tags_on_deletion": { - "type": "bool", - "description": "**Deprecated** Whether or not to allow table deletion when there are still resource tags attached.", - "description_kind": "plain", - "deprecated": true, - "optional": true - }, "clustering": { "type": [ "list", @@ -16621,6 +17590,40 @@ } }, "block_types": { + "biglake_configuration": { + "nesting_mode": "list", + "block": { + "attributes": { + "connection_id": { + "type": "string", + "description": "The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form \"<project\\_id>.<location\\_id>.<connection\\_id>\" or \"projects/<project\\_id>/locations/<location\\_id>/connections/<connection\\_id>\".", + "description_kind": "plain", + "required": true + }, + "file_format": { + "type": "string", + "description": "The file format the data is stored in.", + "description_kind": "plain", + "required": true + }, + "storage_uri": { + "type": "string", + "description": "The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format \"gs://bucket/path_to_table/\"", + "description_kind": "plain", + "required": true + }, + "table_format": { + "type": "string", + "description": "The table format the metadata only snapshots are stored in.", + "description_kind": "plain", + "required": true + } + }, + "description": "Specifies the configuration of a BigLake managed table.", + "description_kind": "plain" + }, + "max_items": 1 + }, "encryption_configuration": { "nesting_mode": "list", "block": { @@ -16661,7 +17664,7 @@ }, "connection_id": { "type": "string", - "description": "The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connectionId can have the form \"{{project}}.{{location}}.{{connection_id}}\" or \"projects/{{project}}/locations/{{location}}/connections/{{connection_id}}\".", + "description": "The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connectionId can have the form \"..\" or \"projects//locations//connections/\".", "description_kind": "plain", "optional": true }, @@ -17517,6 +18520,12 @@ "description_kind": "plain", "optional": true, "computed": true + }, + "row_affinity": { + "type": "bool", + "description": "Must be used with multi-cluster routing. If true, then this app profile will use row affinity sticky routing. With row affinity, Bigtable will route single row key requests based on the row key, rather than randomly. Instead, each row key will be assigned to a cluster by Cloud Bigtable, and will stick to that cluster. Choosing this option improves read-your-writes consistency for most requests under most circumstances, without sacrificing availability. Consistency is not guaranteed, as requests may still fail over between clusters in the event of errors or latency.", + "description_kind": "plain", + "optional": true } }, "block_types": { @@ -17850,7 +18859,7 @@ "attributes": { "deletion_protection": { "type": "bool", - "description": " When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the instance will fail. When the field is set to false, deleting the instance is allowed.", + "description": "When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the instance will fail. When the field is set to false, deleting the instance is allowed.", "description_kind": "plain", "optional": true }, @@ -18291,6 +19300,12 @@ "description": "The name of the column family.", "description_kind": "plain", "required": true + }, + "type": { + "type": "string", + "description": "The type of the column family.", + "description_kind": "plain", + "optional": true } }, "description": "A group of columns within a table which share a common configuration. This can be specified multiple times.", @@ -18798,7 +19813,7 @@ "list", "string" ], - "description": "Optional. If creditTypesTreatment is INCLUDE_SPECIFIED_CREDITS,\nthis is a list of credit types to be subtracted from gross cost to determine the spend for threshold calculations. See a list of acceptable credit type values.\nIf creditTypesTreatment is not INCLUDE_SPECIFIED_CREDITS, this field must be empty.\n\n**Note:** If the field has a value in the config and needs to be removed, the field has to be an emtpy array in the config.", + "description": "Optional. If creditTypesTreatment is INCLUDE_SPECIFIED_CREDITS,\nthis is a list of credit types to be subtracted from gross cost to determine the spend for threshold calculations. See a list of acceptable credit type values.\nIf creditTypesTreatment is not INCLUDE_SPECIFIED_CREDITS, this field must be empty.\n\n**Note:** If the field has a value in the config and needs to be removed, the field has to be an empty array in the config.", "description_kind": "plain", "optional": true, "computed": true @@ -18852,7 +19867,7 @@ "list", "string" ], - "description": "A set of subaccounts of the form billingAccounts/{account_id},\nspecifying that usage from only this set of subaccounts should\nbe included in the budget. If a subaccount is set to the name of\nthe parent account, usage from the parent account will be included.\nIf the field is omitted, the report will include usage from the parent\naccount and all subaccounts, if they exist.\n\n**Note:** If the field has a value in the config and needs to be removed, the field has to be an emtpy array in the config.", + "description": "A set of subaccounts of the form billingAccounts/{account_id},\nspecifying that usage from only this set of subaccounts should\nbe included in the budget. If a subaccount is set to the name of\nthe parent account, usage from the parent account will be included.\nIf the field is omitted, the report will include usage from the parent\naccount and all subaccounts, if they exist.\n\n**Note:** If the field has a value in the config and needs to be removed, the field has to be an empty array in the config.", "description_kind": "plain", "optional": true, "computed": true @@ -19802,6 +20817,15 @@ "optional": true, "computed": true }, + "san_dnsnames": { + "type": [ + "list", + "string" + ], + "description": "The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6)", + "description_kind": "plain", + "computed": true + }, "scope": { "type": "string", "description": "The scope of the certificate.\n\nDEFAULT: Certificates with default scope are served from core Google data centers.\nIf unsure, choose this option.\n\nEDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence.\nSee https://cloud.google.com/vpc/docs/edge-locations.\n\nALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs).\nSee https://cloud.google.com/compute/docs/regions-zones", @@ -22599,6 +23623,88 @@ } }, "block_types": { + "csi": { + "nesting_mode": "list", + "block": { + "attributes": { + "driver": { + "type": "string", + "description": "Unique name representing the type of file system to be created. Cloud Run supports the following values:\n * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the\n run.googleapis.com/execution-environment annotation to be unset or set to \"gen2\"", + "description_kind": "plain", + "required": true + }, + "read_only": { + "type": "bool", + "description": "If true, all mounts created from this volume will be read-only.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "volume_attributes": { + "type": [ + "map", + "string" + ], + "description": "Driver-specific attributes. The following options are supported for available drivers:\n * gcsfuse.run.googleapis.com\n * bucketName: The name of the Cloud Storage Bucket that backs this volume. The Cloud Run Service identity must have access to this bucket.", + "description_kind": "plain", + "optional": true + } + }, + "description": "A filesystem specified by the Container Storage Interface (CSI).", + "description_kind": "plain" + }, + "max_items": 1 + }, + "empty_dir": { + "nesting_mode": "list", + "block": { + "attributes": { + "medium": { + "type": "string", + "description": "The medium on which the data is stored. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory.", + "description_kind": "plain", + "optional": true + }, + "size_limit": { + "type": "string", + "description": "Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs).", + "description_kind": "plain" + }, + "max_items": 1 + }, + "nfs": { + "nesting_mode": "list", + "block": { + "attributes": { + "path": { + "type": "string", + "description": "Path exported by the NFS server", + "description_kind": "plain", + "required": true + }, + "read_only": { + "type": "bool", + "description": "If true, mount the NFS volume as read only in all mounts. Defaults to false.", + "description_kind": "plain", + "optional": true + }, + "server": { + "type": "string", + "description": "IP address or hostname of the NFS server", + "description_kind": "plain", + "required": true + } + }, + "description": "A filesystem backed by a Network File System share. This filesystem requires the\nrun.googleapis.com/execution-environment annotation to be unset or set to \"gen2\"", + "description_kind": "plain" + }, + "max_items": 1 + }, "secret": { "nesting_mode": "list", "block": { @@ -22985,6 +24091,12 @@ "description_kind": "plain", "computed": true }, + "deletion_protection": { + "type": "bool", + "description": "Whether Terraform will be prevented from destroying the job. Defaults to true.\nWhen a'terraform destroy' or 'terraform apply' would delete the job,\nthe command will fail if this field is not set to false in Terraform state.\nWhen the field is set to true or unset in Terraform state, a 'terraform apply'\nor 'terraform destroy' that would delete the job will fail.\nWhen the field is set to false, deleting the job is allowed.", + "description_kind": "plain", + "optional": true + }, "effective_annotations": { "type": [ "map", @@ -23017,7 +24129,7 @@ }, "expire_time": { "type": "string", - "description": "For a deleted resource, the time after which it will be permamently deleted.", + "description": "For a deleted resource, the time after which it will be permanently deleted.", "description_kind": "plain", "computed": true }, @@ -23293,7 +24405,7 @@ }, "block_types": { "env": { - "nesting_mode": "list", + "nesting_mode": "set", "block": { "attributes": { "name": { @@ -23445,6 +24557,78 @@ }, "max_items": 1 }, + "empty_dir": { + "nesting_mode": "list", + "block": { + "attributes": { + "medium": { + "type": "string", + "description": "The different types of medium supported for EmptyDir. Default value: \"MEMORY\" Possible values: [\"MEMORY\"]", + "description_kind": "plain", + "optional": true + }, + "size_limit": { + "type": "string", + "description": "Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Ephemeral storage used as a shared volume.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "gcs": { + "nesting_mode": "list", + "block": { + "attributes": { + "bucket": { + "type": "string", + "description": "Name of the cloud storage bucket to back the volume. The resource service account must have permission to access the bucket.", + "description_kind": "plain", + "required": true + }, + "read_only": { + "type": "bool", + "description": "If true, mount this volume as read-only in all mounts. If false, mount this volume as read-write.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Cloud Storage bucket mounted as a volume using GCSFuse.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "nfs": { + "nesting_mode": "list", + "block": { + "attributes": { + "path": { + "type": "string", + "description": "Path that is exported by the NFS server.", + "description_kind": "plain", + "optional": true + }, + "read_only": { + "type": "bool", + "description": "If true, mount this volume as read-only in all mounts.", + "description_kind": "plain", + "optional": true + }, + "server": { + "type": "string", + "description": "Hostname or IP address of the NFS server.", + "description_kind": "plain", + "required": true + } + }, + "description": "NFS share mounted as a volume.", + "description_kind": "plain" + }, + "max_items": 1 + }, "secret": { "nesting_mode": "list", "block": { @@ -23860,6 +25044,12 @@ "description_kind": "plain", "computed": true }, + "deletion_protection": { + "type": "bool", + "description": "Whether Terraform will be prevented from destroying the service. Defaults to true.\nWhen a'terraform destroy' or 'terraform apply' would delete the service,\nthe command will fail if this field is not set to false in Terraform state.\nWhen the field is set to true or unset in Terraform state, a 'terraform apply'\nor 'terraform destroy' that would delete the service will fail.\nWhen the field is set to false, deleting the service is allowed.", + "description_kind": "plain", + "optional": true + }, "description": { "type": "string", "description": "User-provided description of the Service. This field currently has a 512-character limit.", @@ -23892,7 +25082,7 @@ }, "expire_time": { "type": "string", - "description": "For a deleted resource, the time after which it will be permamently deleted.", + "description": "For a deleted resource, the time after which it will be permanently deleted.", "description_kind": "plain", "computed": true }, @@ -23915,6 +25105,12 @@ "optional": true, "computed": true }, + "invoker_iam_disabled": { + "type": "bool", + "description": "Disables IAM permission check for run.routes.invoke for callers of this service. This feature is available by invitation only. For more information, visit https://cloud.google.com/run/docs/securing/managing-access#invoker_check.", + "description_kind": "plain", + "optional": true + }, "labels": { "type": [ "map", @@ -24044,6 +25240,15 @@ "description": "The main URI in which this Service is serving traffic.", "description_kind": "plain", "computed": true + }, + "urls": { + "type": [ + "list", + "string" + ], + "description": "All URLs serving traffic for this Service.", + "description_kind": "plain", + "computed": true } }, "block_types": { @@ -24075,6 +25280,22 @@ }, "max_items": 1 }, + "scaling": { + "nesting_mode": "list", + "block": { + "attributes": { + "min_instance_count": { + "type": "number", + "description": "Minimum number of instances for the service, to be divided among all revisions receiving traffic.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Scaling settings that apply to the whole service", + "description_kind": "plain" + }, + "max_items": 1 + }, "template": { "nesting_mode": "list", "block": { @@ -24196,7 +25417,7 @@ }, "block_types": { "env": { - "nesting_mode": "list", + "nesting_mode": "set", "block": { "attributes": { "name": { @@ -24409,7 +25630,7 @@ "map", "string" ], - "description": "Only memory and CPU are supported. Use key 'cpu' for CPU limit and 'memory' for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go", + "description": "Only memory, CPU, and nvidia.com/gpu are supported. Use key 'cpu' for CPU limit, 'memory' for memory limit, 'nvidia.com/gpu' for gpu limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go", "description_kind": "plain", "optional": true, "computed": true @@ -24580,13 +25801,13 @@ "attributes": { "max_instance_count": { "type": "number", - "description": "Maximum number of serving instances that this resource should have.", + "description": "Maximum number of serving instances that this resource should have. Must not be less than minimum instance count. If absent, Cloud Run will calculate\na default value based on the project's available container instances quota in the region and specified instance size.", "description_kind": "plain", "optional": true }, "min_instance_count": { "type": "number", - "description": "Minimum number of serving instances that this resource should have.", + "description": "Minimum number of serving instances that this resource should have. Defaults to 0. Must not be greater than maximum instance count.", "description_kind": "plain", "optional": true } @@ -24627,6 +25848,28 @@ }, "max_items": 1 }, + "empty_dir": { + "nesting_mode": "list", + "block": { + "attributes": { + "medium": { + "type": "string", + "description": "The different types of medium supported for EmptyDir. Default value: \"MEMORY\" Possible values: [\"MEMORY\"]", + "description_kind": "plain", + "optional": true + }, + "size_limit": { + "type": "string", + "description": "Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Ephemeral storage used as a shared volume.", + "description_kind": "plain" + }, + "max_items": 1 + }, "gcs": { "nesting_mode": "list", "block": { @@ -24644,7 +25887,7 @@ "optional": true } }, - "description": "Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA.", + "description": "Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment.", "description_kind": "plain" }, "max_items": 1 @@ -25431,6 +26674,175 @@ }, "max_items": 1 }, + "http_target": { + "nesting_mode": "list", + "block": { + "attributes": { + "http_method": { + "type": "string", + "description": "The HTTP method to use for the request.\n\nWhen specified, it overrides HttpRequest for the task.\nNote that if the value is set to GET the body of the task will be ignored at execution time. Possible values: [\"HTTP_METHOD_UNSPECIFIED\", \"POST\", \"GET\", \"HEAD\", \"PUT\", \"DELETE\", \"PATCH\", \"OPTIONS\"]", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "header_overrides": { + "nesting_mode": "list", + "block": { + "block_types": { + "header": { + "nesting_mode": "list", + "block": { + "attributes": { + "key": { + "type": "string", + "description": "The Key of the header.", + "description_kind": "plain", + "required": true + }, + "value": { + "type": "string", + "description": "The Value of the header.", + "description_kind": "plain", + "required": true + } + }, + "description": "Header embodying a key and a value.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + } + }, + "description": "HTTP target headers.\n\nThis map contains the header field names and values.\nHeaders will be set when running the CreateTask and/or BufferTask.\n\nThese headers represent a subset of the headers that will be configured for the task's HTTP request.\nSome HTTP request headers will be ignored or replaced.\n\nHeaders which can have multiple values (according to RFC2616) can be specified using comma-separated values.\n\nThe size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue.", + "description_kind": "plain" + } + }, + "oauth_token": { + "nesting_mode": "list", + "block": { + "attributes": { + "scope": { + "type": "string", + "description": "OAuth scope to be used for generating OAuth access token.\nIf not specified, \"https://www.googleapis.com/auth/cloud-platform\" will be used.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "service_account_email": { + "type": "string", + "description": "Service account email to be used for generating OAuth token.\nThe service account must be within the same project as the queue.\nThe caller must have iam.serviceAccounts.actAs permission for the service account.", + "description_kind": "plain", + "required": true + } + }, + "description": "If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request.\n\nThis type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com.\nNote that both the service account email and the scope MUST be specified when using the queue-level authorization override.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "oidc_token": { + "nesting_mode": "list", + "block": { + "attributes": { + "audience": { + "type": "string", + "description": "Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "service_account_email": { + "type": "string", + "description": "Service account email to be used for generating OIDC token.\nThe service account must be within the same project as the queue.\nThe caller must have iam.serviceAccounts.actAs permission for the service account.", + "description_kind": "plain", + "required": true + } + }, + "description": "If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request.\n\nThis type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself.\nNote that both the service account email and the audience MUST be specified when using the queue-level authorization override.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "uri_override": { + "nesting_mode": "list", + "block": { + "attributes": { + "host": { + "type": "string", + "description": "Host override.\n\nWhen specified, replaces the host part of the task URL.\nFor example, if the task URL is \"https://www.google.com\", and host value\nis set to \"example.net\", the overridden URI will be changed to \"https://example.net\".\nHost value cannot be an empty string (INVALID_ARGUMENT).", + "description_kind": "plain", + "optional": true + }, + "port": { + "type": "string", + "description": "Port override.\n\nWhen specified, replaces the port part of the task URI.\nFor instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo.\nNote that the port value must be a positive integer.\nSetting the port to 0 (Zero) clears the URI port.", + "description_kind": "plain", + "optional": true + }, + "scheme": { + "type": "string", + "description": "Scheme override.\n\nWhen specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). Possible values: [\"HTTP\", \"HTTPS\"]", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "uri_override_enforce_mode": { + "type": "string", + "description": "URI Override Enforce Mode\n\nWhen specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. Possible values: [\"ALWAYS\", \"IF_NOT_EXISTS\"]", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "path_override": { + "nesting_mode": "list", + "block": { + "attributes": { + "path": { + "type": "string", + "description": "The URI path (e.g., /users/1234). Default is an empty string.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "URI path.\n\nWhen specified, replaces the existing path of the task URL.\nSetting the path value to an empty string clears the URI path segment.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "query_override": { + "nesting_mode": "list", + "block": { + "attributes": { + "query_params": { + "type": "string", + "description": "The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "URI query.\n\nWhen specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "URI override.\n\nWhen specified, overrides the execution URI for all the tasks in the queue.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Modifies HTTP target for HTTP tasks.", + "description_kind": "plain" + }, + "max_items": 1 + }, "rate_limits": { "nesting_mode": "list", "block": { @@ -27195,6 +28607,28 @@ }, "max_items": 1 }, + "private_service_connect": { + "nesting_mode": "list", + "block": { + "attributes": { + "network_attachment": { + "type": "string", + "description": "Required. Immutable. The network attachment that the worker network interface is connected to. Must be in the format `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. The region of network attachment must be the same as the worker pool. See [Network Attachments](https://cloud.google.com/vpc/docs/about-network-attachments)", + "description_kind": "plain", + "required": true + }, + "route_all_traffic": { + "type": "bool", + "description": "Immutable. Route all traffic through PSC interface. Enable this if you want full control of traffic in the private pool. Configure Cloud NAT for the subnet of network attachment if you need to access public Internet. If false, Only route private IPs, e.g. 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 through PSC interface.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Private Service Connect configuration for the pool.", + "description_kind": "plain" + }, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -29211,6 +30645,33 @@ "optional": true } }, + "block_types": { + "route_destinations": { + "nesting_mode": "list", + "block": { + "attributes": { + "destination_ids": { + "type": [ + "list", + "string" + ], + "description": "Required. The clusters where the Gateway API HTTPRoute resource will be deployed to. Valid entries include the associated entities IDs configured in the Target resource and \"@self\" to include the Target cluster.", + "description_kind": "plain", + "required": true + }, + "propagate_service": { + "type": "bool", + "description": "Optional. Whether to propagate the Kubernetes Service to the route destination clusters. The Service will always be deployed to the Target cluster even if the HTTPRoute is not. This option may be used to facilitiate successful DNS lookup in the route destination clusters. Can only be set to true if destinations are specified.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Optional. Route destinations allow configuring the Gateway API HTTPRoute to be deployed to additional clusters. This option is available for multi-cluster service mesh set ups that require the route to exist in the clusters that call the service. If unspecified, the HTTPRoute will only be deployed to the Target cluster.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, "description": "Kubernetes Gateway API service mesh configuration.", "description_kind": "plain" }, @@ -29699,6 +31160,65 @@ }, "max_items": 1 }, + "associated_entities": { + "nesting_mode": "set", + "block": { + "attributes": { + "entity_id": { + "type": "string", + "description": "The name for the key in the map for which this object is mapped to in the API", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "anthos_clusters": { + "nesting_mode": "list", + "block": { + "attributes": { + "membership": { + "type": "string", + "description": "Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Optional. Information specifying Anthos clusters as associated entities.", + "description_kind": "plain" + } + }, + "gke_clusters": { + "nesting_mode": "list", + "block": { + "attributes": { + "cluster": { + "type": "string", + "description": "Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`.", + "description_kind": "plain", + "optional": true + }, + "internal_ip": { + "type": "bool", + "description": "Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).", + "description_kind": "plain", + "optional": true + }, + "proxy_url": { + "type": "string", + "description": "Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Optional. Information specifying GKE clusters as associated entities.", + "description_kind": "plain" + } + } + }, + "description": "Optional. Map of entity IDs to their associated entities. Associated entities allows specifying places other than the deployment target for specific features. For example, the Gateway API canary can be configured to deploy the HTTPRoute to a different cluster(s) than the deployment cluster using associated entities. An entity ID must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", + "description_kind": "plain" + } + }, "custom_target": { "nesting_mode": "list", "block": { @@ -31098,7 +32618,7 @@ }, "project_id": { "type": "string", - "description": "Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function.", + "description": "Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function.", "description_kind": "plain", "required": true }, @@ -31131,7 +32651,7 @@ }, "project_id": { "type": "string", - "description": "Project identifier (preferrably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function.", + "description": "Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function.", "description_kind": "plain", "required": true }, @@ -32054,6 +33574,20 @@ "description_kind": "plain", "computed": true }, + "enable_private_builds_only": { + "type": "bool", + "description": "Optional. If true, builds performed during operations that install Python packages have only private connectivity to Google services. If false, the builds also have access to the internet.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "enable_private_environment": { + "type": "bool", + "description": "Optional. If true, a private Composer environment will be created.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "environment_size": { "type": "string", "description": "The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.", @@ -32087,6 +33621,29 @@ "nesting_mode": "list", "block": { "block_types": { + "airflow_metadata_retention_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "retention_days": { + "type": "number", + "description": "How many days data should be retained for. This field is supported for Cloud Composer environments in composer 3 and newer.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "retention_mode": { + "type": "string", + "description": "Whether database retention is enabled or not. This field is supported for Cloud Composer environments in composer 3 and newer.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Optional. The configuration setting for database retention.", + "description_kind": "plain" + } + }, "task_logs_retention_config": { "nesting_mode": "list", "block": { @@ -32100,8 +33657,7 @@ }, "description": "Optional. The configuration setting for Task Logs.", "description_kind": "plain" - }, - "min_items": 1 + } } }, "description": "The configuration setting for Airflow data retention mechanism. This field is supported for Cloud Composer environments in versions composer-2.0.32-airflow-2.1.4. or newer", @@ -32218,6 +33774,20 @@ "nesting_mode": "list", "block": { "attributes": { + "composer_internal_ipv4_cidr_block": { + "type": "string", + "description": "IPv4 cidr range that will be used by Composer internal components.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "composer_network_attachment": { + "type": "string", + "description": "PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment and point Cloud Composer environment to use. It is possible to share network attachment among many environments, provided enough IP addresses are available.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "disk_size_gb": { "type": "number", "description": "The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", @@ -32232,25 +33802,6 @@ "optional": true, "computed": true }, - "ip_allocation_policy": { - "type": [ - "list", - [ - "object", - { - "cluster_ipv4_cidr_block": "string", - "cluster_secondary_range_name": "string", - "services_ipv4_cidr_block": "string", - "services_secondary_range_name": "string", - "use_ip_aliases": "bool" - } - ] - ], - "description": "Configuration for controlling how IPs are allocated in the GKE cluster. Cannot be updated.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "machine_type": { "type": "string", "description": "The Compute Engine machine type used for cluster instances, specified as a name or relative resource name. For example: \"projects/{project}/zones/{zone}/machineTypes/{machineType}\". Must belong to the enclosing environment's project and region/zone. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", @@ -32286,7 +33837,8 @@ "type": "string", "description": "The Compute Engine subnetwork to be used for machine communications, specified as a self-link, relative resource name (e.g. \"projects/{project}/regions/{region}/subnetworks/{subnetwork}\"), or by name. If subnetwork is provided, network must also be provided and the subnetwork must belong to the enclosing environment's project and region.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "tags": { "type": [ @@ -32305,6 +33857,48 @@ "computed": true } }, + "block_types": { + "ip_allocation_policy": { + "nesting_mode": "list", + "block": { + "attributes": { + "cluster_ipv4_cidr_block": { + "type": "string", + "description": "The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both.", + "description_kind": "plain", + "optional": true + }, + "cluster_secondary_range_name": { + "type": "string", + "description": "The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true.", + "description_kind": "plain", + "optional": true + }, + "services_ipv4_cidr_block": { + "type": "string", + "description": "The IP address range used to allocate IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either services_secondary_range_name or services_ipv4_cidr_block but not both.", + "description_kind": "plain", + "optional": true + }, + "services_secondary_range_name": { + "type": "string", + "description": "The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either services_secondary_range_name or services_ipv4_cidr_block but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true.", + "description_kind": "plain", + "optional": true + }, + "use_ip_aliases": { + "type": "bool", + "description": "Whether or not to enable Alias IPs in the GKE cluster. If true, a VPC-native cluster is created. Defaults to true if the ip_allocation_policy block is present in config. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Configuration for controlling how IPs are allocated in the GKE cluster. Cannot be updated.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, "description": "The configuration used for the Kubernetes Engine cluster.", "description_kind": "plain" }, @@ -32470,6 +34064,31 @@ "description_kind": "plain", "optional": true, "computed": true + }, + "web_server_plugins_mode": { + "type": "string", + "description": "Should be either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. Used in Composer 3.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "cloud_data_lineage_integration": { + "nesting_mode": "list", + "block": { + "attributes": { + "enabled": { + "type": "bool", + "description": "Whether or not Cloud Data Lineage integration is enabled.", + "description_kind": "plain", + "required": true + } + }, + "description": "The configuration for Cloud Data Lineage integration. Supported for Cloud Composer environments in versions composer-2.1.2-airflow-*.*.* and newer", + "description_kind": "plain" + }, + "max_items": 1 } }, "description": "The configuration settings for software inside the environment.", @@ -32528,6 +34147,44 @@ "nesting_mode": "list", "block": { "block_types": { + "dag_processor": { + "nesting_mode": "list", + "block": { + "attributes": { + "count": { + "type": "number", + "description": "Number of DAG processors.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "cpu": { + "type": "number", + "description": "CPU request and limit for DAG processor.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "memory_gb": { + "type": "number", + "description": "Memory (GB) request and limit for DAG processor.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "storage_gb": { + "type": "number", + "description": "Storage (GB) request and limit for DAG processor.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Configuration for resources used by DAG processor.", + "description_kind": "plain" + }, + "max_items": 1 + }, "scheduler": { "nesting_mode": "list", "block": { @@ -32725,152 +34382,300 @@ "description_kind": "plain" } }, - "google_compute_address": { + "google_composer_user_workloads_config_map": { "version": 0, "block": { "attributes": { - "address": { - "type": "string", - "description": "The static external IP address represented by this resource.\nThe IP address must be inside the specified subnetwork,\nif any. Set by the API if undefined.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "address_type": { - "type": "string", - "description": "The type of address to reserve.\nNote: if you set this argument's value as 'INTERNAL' you need to leave the 'network_tier' argument unset in that resource block. Default value: \"EXTERNAL\" Possible values: [\"INTERNAL\", \"EXTERNAL\"]", - "description_kind": "plain", - "optional": true - }, - "creation_timestamp": { - "type": "string", - "description": "Creation timestamp in RFC3339 text format.", - "description_kind": "plain", - "computed": true - }, - "description": { - "type": "string", - "description": "An optional description of this resource.", - "description_kind": "plain", - "optional": true - }, - "effective_labels": { + "data": { "type": [ "map", "string" ], - "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "ip_version": { - "type": "string", - "description": "The IP Version that will be used by this address. The default value is 'IPV4'. Possible values: [\"IPV4\", \"IPV6\"]", + "description": "The \"data\" field of Kubernetes ConfigMap, organized in key-value pairs.\nFor details see: https://kubernetes.io/docs/concepts/configuration/configmap/", "description_kind": "plain", "optional": true }, - "ipv6_endpoint_type": { + "environment": { "type": "string", - "description": "The endpoint type of this address, which should be VM or NETLB. This is\nused for deciding which type of endpoint this address can be used after\nthe external IPv6 address reservation. Possible values: [\"VM\", \"NETLB\"]", + "description": "Environment where the Kubernetes ConfigMap will be stored and used.", "description_kind": "plain", - "optional": true + "required": true }, - "label_fingerprint": { + "id": { "type": "string", - "description": "The fingerprint used for optimistic locking of this resource. Used\ninternally during updates.", "description_kind": "plain", + "optional": true, "computed": true }, - "labels": { - "type": [ - "map", - "string" - ], - "description": "Labels to apply to this address. A list of key->value pairs.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", - "description_kind": "plain", - "optional": true - }, "name": { "type": "string", - "description": "Name of the resource. The name must be 1-63 characters long, and\ncomply with RFC1035. Specifically, the name must be 1-63 characters\nlong and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?'\nwhich means the first character must be a lowercase letter, and all\nfollowing characters must be a dash, lowercase letter, or digit,\nexcept the last character, which cannot be a dash.", + "description": "Name of the Kubernetes ConfigMap.", "description_kind": "plain", "required": true }, - "network": { - "type": "string", - "description": "The URL of the network in which to reserve the address. This field\ncan only be used with INTERNAL type with the VPC_PEERING and\nIPSEC_INTERCONNECT purposes.", - "description_kind": "plain", - "optional": true - }, - "network_tier": { - "type": "string", - "description": "The networking tier used for configuring this address. If this field is not\nspecified, it is assumed to be PREMIUM.\nThis argument should not be used when configuring Internal addresses, because [network tier cannot be set for internal traffic; it's always Premium](https://cloud.google.com/network-tiers/docs/overview). Possible values: [\"PREMIUM\", \"STANDARD\"]", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "prefix_length": { - "type": "number", - "description": "The prefix length if the resource represents an IP range.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "project": { "type": "string", "description_kind": "plain", "optional": true, "computed": true }, - "purpose": { - "type": "string", - "description": "The purpose of this resource, which can be one of the following values.\n\n* GCE_ENDPOINT for addresses that are used by VM instances, alias IP\nranges, load balancers, and similar resources.\n\n* SHARED_LOADBALANCER_VIP for an address that can be used by multiple\ninternal load balancers.\n\n* VPC_PEERING for addresses that are reserved for VPC peer networks.\n\n* IPSEC_INTERCONNECT for addresses created from a private IP range that\nare reserved for a VLAN attachment in an HA VPN over Cloud Interconnect\nconfiguration. These addresses are regional resources.\n\n* PRIVATE_SERVICE_CONNECT for a private network address that is used to\nconfigure Private Service Connect. Only global internal addresses can use\nthis purpose.\n\nThis should only be set when using an Internal address.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "region": { "type": "string", - "description": "The Region in which the created address should reside.\nIf it is not provided, the provider region is used.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "self_link": { - "type": "string", - "description_kind": "plain", - "computed": true - }, - "subnetwork": { - "type": "string", - "description": "The URL of the subnetwork in which to reserve the address. If an IP\naddress is specified, it must be within the subnetwork's IP range.\nThis field can only be used with INTERNAL type with\nGCE_ENDPOINT/DNS_RESOLVER purposes.", + "description": "The location or Compute Engine region for the environment.", "description_kind": "plain", "optional": true, "computed": true - }, - "terraform_labels": { - "type": [ - "map", - "string" - ], - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", - "description_kind": "plain", - "computed": true - }, - "users": { - "type": [ - "list", - "string" - ], - "description": "The URLs of the resources that are using this address.", - "description_kind": "plain", - "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_composer_user_workloads_secret": { + "version": 0, + "block": { + "attributes": { + "data": { + "type": [ + "map", + "string" + ], + "description": "A map of the secret data.", + "description_kind": "plain", + "optional": true, + "sensitive": true + }, + "environment": { + "type": "string", + "description": "Name of the environment.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "Name of the secret.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description": "The ID of the project in which the resource belongs. If it is not provided, the provider project is used.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "region": { + "type": "string", + "description": "The location or Compute Engine region for the environment.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_compute_address": { + "version": 0, + "block": { + "attributes": { + "address": { + "type": "string", + "description": "The static external IP address represented by this resource.\nThe IP address must be inside the specified subnetwork,\nif any. Set by the API if undefined.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "address_type": { + "type": "string", + "description": "The type of address to reserve.\nNote: if you set this argument's value as 'INTERNAL' you need to leave the 'network_tier' argument unset in that resource block. Default value: \"EXTERNAL\" Possible values: [\"INTERNAL\", \"EXTERNAL\"]", + "description_kind": "plain", + "optional": true + }, + "creation_timestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format.", + "description_kind": "plain", + "computed": true + }, + "description": { + "type": "string", + "description": "An optional description of this resource.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "ip_version": { + "type": "string", + "description": "The IP Version that will be used by this address. The default value is 'IPV4'. Possible values: [\"IPV4\", \"IPV6\"]", + "description_kind": "plain", + "optional": true + }, + "ipv6_endpoint_type": { + "type": "string", + "description": "The endpoint type of this address, which should be VM or NETLB. This is\nused for deciding which type of endpoint this address can be used after\nthe external IPv6 address reservation. Possible values: [\"VM\", \"NETLB\"]", + "description_kind": "plain", + "optional": true + }, + "label_fingerprint": { + "type": "string", + "description": "The fingerprint used for optimistic locking of this resource. Used\ninternally during updates.", + "description_kind": "plain", + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Labels to apply to this address. A list of key->value pairs.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "Name of the resource. The name must be 1-63 characters long, and\ncomply with RFC1035. Specifically, the name must be 1-63 characters\nlong and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?'\nwhich means the first character must be a lowercase letter, and all\nfollowing characters must be a dash, lowercase letter, or digit,\nexcept the last character, which cannot be a dash.", + "description_kind": "plain", + "required": true + }, + "network": { + "type": "string", + "description": "The URL of the network in which to reserve the address. This field\ncan only be used with INTERNAL type with the VPC_PEERING and\nIPSEC_INTERCONNECT purposes.", + "description_kind": "plain", + "optional": true + }, + "network_tier": { + "type": "string", + "description": "The networking tier used for configuring this address. If this field is not\nspecified, it is assumed to be PREMIUM.\nThis argument should not be used when configuring Internal addresses, because [network tier cannot be set for internal traffic; it's always Premium](https://cloud.google.com/network-tiers/docs/overview). Possible values: [\"PREMIUM\", \"STANDARD\"]", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "prefix_length": { + "type": "number", + "description": "The prefix length if the resource represents an IP range.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "purpose": { + "type": "string", + "description": "The purpose of this resource, which can be one of the following values.\n\n* GCE_ENDPOINT for addresses that are used by VM instances, alias IP\nranges, load balancers, and similar resources.\n\n* SHARED_LOADBALANCER_VIP for an address that can be used by multiple\ninternal load balancers.\n\n* VPC_PEERING for addresses that are reserved for VPC peer networks.\n\n* IPSEC_INTERCONNECT for addresses created from a private IP range that\nare reserved for a VLAN attachment in an HA VPN over Cloud Interconnect\nconfiguration. These addresses are regional resources.\n\n* PRIVATE_SERVICE_CONNECT for a private network address that is used to\nconfigure Private Service Connect. Only global internal addresses can use\nthis purpose.\n\nThis should only be set when using an Internal address.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "region": { + "type": "string", + "description": "The Region in which the created address should reside.\nIf it is not provided, the provider region is used.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "self_link": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "subnetwork": { + "type": "string", + "description": "The URL of the subnetwork in which to reserve the address. If an IP\naddress is specified, it must be within the subnetwork's IP range.\nThis field can only be used with INTERNAL type with\nGCE_ENDPOINT/DNS_RESOLVER purposes.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "users": { + "type": [ + "list", + "string" + ], + "description": "The URLs of the resources that are using this address.", + "description_kind": "plain", + "computed": true } }, "block_types": { @@ -32930,6 +34735,12 @@ "description_kind": "plain", "required": true }, + "interface": { + "type": "string", + "description": "The disk interface used for attaching this disk. One of SCSI or NVME. (This field is only used for specific cases, please don't specify this field without advice from Google.)", + "description_kind": "plain", + "optional": true + }, "mode": { "type": "string", "description": "The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.", @@ -33152,7 +34963,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -33629,6 +35440,12 @@ "optional": true, "computed": true }, + "ip_address_selection_policy": { + "type": "string", + "description": "Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). Possible values: [\"IPV4_ONLY\", \"PREFER_IPV6\", \"IPV6_ONLY\"]", + "description_kind": "plain", + "optional": true + }, "load_balancing_scheme": { "type": "string", "description": "Indicates whether the backend service will be used with internal or\nexternal load balancing. A backend service created for one type of\nload balancing cannot be used with the other. For more information, refer to\n[Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service). Default value: \"EXTERNAL\" Possible values: [\"EXTERNAL\", \"INTERNAL_SELF_MANAGED\", \"INTERNAL_MANAGED\", \"EXTERNAL_MANAGED\"]", @@ -33637,7 +35454,7 @@ }, "locality_lb_policy": { "type": "string", - "description": "The load balancing algorithm used within the scope of the locality.\nThe possible values are:\n\n* 'ROUND_ROBIN': This is a simple policy in which each healthy backend\n is selected in round robin order.\n\n* 'LEAST_REQUEST': An O(1) algorithm which selects two random healthy\n hosts and picks the host which has fewer active requests.\n\n* 'RING_HASH': The ring/modulo hash load balancer implements consistent\n hashing to backends. The algorithm has the property that the\n addition/removal of a host from a set of N hosts only affects\n 1/N of the requests.\n\n* 'RANDOM': The load balancer selects a random healthy host.\n\n* 'ORIGINAL_DESTINATION': Backend host is selected based on the client\n connection metadata, i.e., connections are opened\n to the same address as the destination address of\n the incoming connection before the connection\n was redirected to the load balancer.\n\n* 'MAGLEV': used as a drop in replacement for the ring hash load balancer.\n Maglev is not as stable as ring hash but has faster table lookup\n build times and host selection times. For more information about\n Maglev, refer to https://ai.google/research/pubs/pub44824\n\n* 'WEIGHTED_MAGLEV': Per-instance weighted Load Balancing via health check\n reported weights. If set, the Backend Service must\n configure a non legacy HTTP-based Health Check, and\n health check replies are expected to contain\n non-standard HTTP response header field\n X-Load-Balancing-Endpoint-Weight to specify the\n per-instance weights. If set, Load Balancing is weight\n based on the per-instance weights reported in the last\n processed health check replies, as long as every\n instance either reported a valid weight or had\n UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains\n equal-weight.\n\nThis field is applicable to either:\n\n* A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2,\n and loadBalancingScheme set to INTERNAL_MANAGED.\n* A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.\n* A regional backend service with loadBalancingScheme set to EXTERNAL (External Network\n Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External\n Network Load Balancing. The default is MAGLEV.\n\nIf session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV,\nor RING_HASH, session affinity settings will not take effect.\n\nOnly ROUND_ROBIN and RING_HASH are supported when the backend service is referenced\nby a URL map that is bound to target gRPC proxy that has validate_for_proxyless\nfield set to true. Possible values: [\"ROUND_ROBIN\", \"LEAST_REQUEST\", \"RING_HASH\", \"RANDOM\", \"ORIGINAL_DESTINATION\", \"MAGLEV\", \"WEIGHTED_MAGLEV\"]", + "description": "The load balancing algorithm used within the scope of the locality.\nThe possible values are:\n\n* 'ROUND_ROBIN': This is a simple policy in which each healthy backend\n is selected in round robin order.\n\n* 'LEAST_REQUEST': An O(1) algorithm which selects two random healthy\n hosts and picks the host which has fewer active requests.\n\n* 'RING_HASH': The ring/modulo hash load balancer implements consistent\n hashing to backends. The algorithm has the property that the\n addition/removal of a host from a set of N hosts only affects\n 1/N of the requests.\n\n* 'RANDOM': The load balancer selects a random healthy host.\n\n* 'ORIGINAL_DESTINATION': Backend host is selected based on the client\n connection metadata, i.e., connections are opened\n to the same address as the destination address of\n the incoming connection before the connection\n was redirected to the load balancer.\n\n* 'MAGLEV': used as a drop in replacement for the ring hash load balancer.\n Maglev is not as stable as ring hash but has faster table lookup\n build times and host selection times. For more information about\n Maglev, refer to https://ai.google/research/pubs/pub44824\n\n* 'WEIGHTED_MAGLEV': Per-instance weighted Load Balancing via health check\n reported weights. Only applicable to loadBalancingScheme\n EXTERNAL. If set, the Backend Service must\n configure a non legacy HTTP-based Health Check, and\n health check replies are expected to contain\n non-standard HTTP response header field\n X-Load-Balancing-Endpoint-Weight to specify the\n per-instance weights. If set, Load Balancing is weight\n based on the per-instance weights reported in the last\n processed health check replies, as long as every\n instance either reported a valid weight or had\n UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains\n equal-weight.\n\nlocality_lb_policy is applicable to either:\n\n* A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2,\n and loadBalancingScheme set to INTERNAL_MANAGED.\n* A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.\n* A regional backend service with loadBalancingScheme set to EXTERNAL (External Network\n Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External\n Network Load Balancing. The default is MAGLEV.\n\nIf session_affinity is not NONE, and locality_lb_policy is not set to MAGLEV, WEIGHTED_MAGLEV,\nor RING_HASH, session affinity settings will not take effect.\n\nOnly ROUND_ROBIN and RING_HASH are supported when the backend service is referenced\nby a URL map that is bound to target gRPC proxy that has validate_for_proxyless\nfield set to true. Possible values: [\"ROUND_ROBIN\", \"LEAST_REQUEST\", \"RING_HASH\", \"RANDOM\", \"ORIGINAL_DESTINATION\", \"MAGLEV\", \"WEIGHTED_MAGLEV\"]", "description_kind": "plain", "optional": true }, @@ -33686,7 +35503,7 @@ }, "session_affinity": { "type": "string", - "description": "Type of session affinity to use. The default is NONE. Session affinity is\nnot applicable if the protocol is UDP. Possible values: [\"NONE\", \"CLIENT_IP\", \"CLIENT_IP_PORT_PROTO\", \"CLIENT_IP_PROTO\", \"GENERATED_COOKIE\", \"HEADER_FIELD\", \"HTTP_COOKIE\"]", + "description": "Type of session affinity to use. The default is NONE. Session affinity is\nnot applicable if the protocol is UDP. Possible values: [\"NONE\", \"CLIENT_IP\", \"CLIENT_IP_PORT_PROTO\", \"CLIENT_IP_PROTO\", \"GENERATED_COOKIE\", \"HEADER_FIELD\", \"HTTP_COOKIE\", \"STRONG_COOKIE_AFFINITY\"]", "description_kind": "plain", "optional": true, "computed": true @@ -33706,7 +35523,7 @@ "attributes": { "balancing_mode": { "type": "string", - "description": "Specifies the balancing mode for this backend.\n\nFor global HTTP(S) or TCP/SSL load balancing, the default is\nUTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S))\nand CONNECTION (for TCP/SSL).\n\nSee the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode)\nfor an explanation of load balancing modes.\n\nFrom version 6.0.0 default value will be UTILIZATION to match default GCP value. Default value: \"UTILIZATION\" Possible values: [\"UTILIZATION\", \"RATE\", \"CONNECTION\"]", + "description": "Specifies the balancing mode for this backend.\n\nFor global HTTP(S) or TCP/SSL load balancing, the default is\nUTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S))\nand CONNECTION (for TCP/SSL).\n\nSee the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode)\nfor an explanation of load balancing modes. Default value: \"UTILIZATION\" Possible values: [\"UTILIZATION\", \"RATE\", \"CONNECTION\"]", "description_kind": "plain", "optional": true }, @@ -34056,17 +35873,23 @@ "nesting_mode": "list", "block": { "attributes": { + "enabled": { + "type": "bool", + "description": "Whether the serving infrastructure will authenticate and authorize all incoming requests.", + "description_kind": "plain", + "required": true + }, "oauth2_client_id": { "type": "string", "description": "OAuth2 Client ID for IAP", "description_kind": "plain", - "required": true + "optional": true }, "oauth2_client_secret": { "type": "string", "description": "OAuth2 Client Secret for IAP", "description_kind": "plain", - "required": true, + "optional": true, "sensitive": true }, "oauth2_client_secret_sha256": { @@ -34256,7 +36079,7 @@ "max_items": 1 } }, - "description": "Settings controlling eviction of unhealthy hosts from the load balancing pool.\nApplicable backend service types can be a global backend service with the\nloadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED.\n\nFrom version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value.\nDefault values are enforce by GCP without providing them.", + "description": "Settings controlling eviction of unhealthy hosts from the load balancing pool.\nApplicable backend service types can be a global backend service with the\nloadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED.", "description_kind": "plain" }, "max_items": 1 @@ -34323,6 +36146,52 @@ }, "max_items": 1 }, + "strong_session_affinity_cookie": { + "nesting_mode": "list", + "block": { + "attributes": { + "name": { + "type": "string", + "description": "Name of the cookie.", + "description_kind": "plain", + "optional": true + }, + "path": { + "type": "string", + "description": "Path to set for the cookie.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "ttl": { + "nesting_mode": "list", + "block": { + "attributes": { + "nanos": { + "type": "number", + "description": "Span of time that's a fraction of a second at nanosecond\nresolution. Durations less than one second are represented\nwith a 0 seconds field and a positive nanos field. Must\nbe from 0 to 999,999,999 inclusive.", + "description_kind": "plain", + "optional": true + }, + "seconds": { + "type": "number", + "description": "Span of time at a resolution of a second.\nMust be from 0 to 315,576,000,000 inclusive.", + "description_kind": "plain", + "required": true + } + }, + "description": "Lifetime of the cookie.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY.", + "description_kind": "plain" + }, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -34580,7 +36449,7 @@ }, "storage_pool": { "type": "string", - "description": "The URL of the storage pool in which the new disk is created.\nFor example:\n* https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/storagePools/{storagePool}\n* /projects/{project}/zones/{zone}/storagePools/{storagePool}", + "description": "The URL or the name of the storage pool in which the new disk is created.\nFor example:\n* https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/storagePools/{storagePool}\n* /projects/{project}/zones/{zone}/storagePools/{storagePool}\n* /zones/{zone}/storagePools/{storagePool}\n* /{storagePool}", "description_kind": "plain", "optional": true }, @@ -34628,7 +36497,7 @@ "required": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -35184,6 +37053,12 @@ "description": "IP address of the interface in the external VPN gateway.\nOnly IPv4 is supported. This IP address can be either from\nyour on-premise gateway or another Cloud provider's VPN gateway,\nit cannot be an IP address from Google Compute Engine.", "description_kind": "plain", "optional": true + }, + "ipv6_address": { + "type": "string", + "description": "IPv6 address of the interface in the external VPN gateway. This IPv6\naddress can be either from your on-premise gateway or another Cloud\nprovider's VPN gateway, it cannot be an IP address from Google Compute\nEngine. Must specify an IPv6 address (not IPV4-mapped) using any format\ndescribed in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format\nis RFC 5952 format (e.g. 2001:db8::2d9:51:0:0).", + "description_kind": "plain", + "optional": true } }, "description": "A list of interfaces on this external VPN gateway.", @@ -35355,7 +37230,7 @@ "list", "string" ], - "description": "An optional list of ports to which this rule applies. This field\nis only applicable for UDP or TCP protocol. Each entry must be\neither an integer or a range. If not specified, this rule\napplies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and\n[\"12345-12349\"].", + "description": "An optional list of ports to which this rule applies. This field\nis only applicable for UDP or TCP protocol. Each entry must be\neither an integer or a range. If not specified, this rule\napplies to connections through any port.\n\nExample inputs include: [22], [80, 443], and\n[\"12345-12349\"].", "description_kind": "plain", "optional": true }, @@ -35379,7 +37254,7 @@ "list", "string" ], - "description": "An optional list of ports to which this rule applies. This field\nis only applicable for UDP or TCP protocol. Each entry must be\neither an integer or a range. If not specified, this rule\napplies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and\n[\"12345-12349\"].", + "description": "An optional list of ports to which this rule applies. This field\nis only applicable for UDP or TCP protocol. Each entry must be\neither an integer or a range. If not specified, this rule\napplies to connections through any port.\n\nExample inputs include: [22], [80, 443], and\n[\"12345-12349\"].", "description_kind": "plain", "optional": true }, @@ -35548,7 +37423,7 @@ }, "firewall_policy": { "type": "string", - "description": "The firewall policy ID of the association.", + "description": "The firewall policy of the resource.", "description_kind": "plain", "required": true }, @@ -35604,6 +37479,12 @@ "description_kind": "plain", "required": true }, + "creation_timestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format.", + "description_kind": "plain", + "computed": true + }, "description": { "type": "string", "description": "An optional description for this resource.", @@ -35612,19 +37493,19 @@ }, "direction": { "type": "string", - "description": "The direction in which this rule applies. Possible values: INGRESS, EGRESS", + "description": "The direction in which this rule applies. Possible values: [\"INGRESS\", \"EGRESS\"]", "description_kind": "plain", "required": true }, "disabled": { "type": "bool", - "description": "Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled.", + "description": "Denotes whether the firewall policy rule is disabled.\nWhen set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist.\nIf this is unspecified, the firewall policy rule will be enabled.", "description_kind": "plain", "optional": true }, "enable_logging": { "type": "bool", - "description": "Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on \"goto_next\" rules.", + "description": "Denotes whether to enable logging for a particular rule.\nIf logging is enabled, logs will be exported to the configured export destination in Stackdriver.\nLogs may be exported to BigQuery or Pub/Sub.\nNote: you cannot enable logging on \"goto_next\" rules.", "description_kind": "plain", "optional": true }, @@ -35642,13 +37523,13 @@ }, "kind": { "type": "string", - "description": "Type of the resource. Always `compute#firewallPolicyRule` for firewall policy rules", + "description": "Type of the resource. Always 'compute#firewallPolicyRule' for firewall policy rules", "description_kind": "plain", "computed": true }, "priority": { "type": "number", - "description": "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", + "description": "An integer indicating the priority of a rule in the list.\nThe priority must be a positive value between 0 and 2147483647.\nRules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", "description_kind": "plain", "required": true }, @@ -35660,7 +37541,7 @@ }, "security_profile_group": { "type": "string", - "description": "A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.", + "description": "A fully-qualified URL of a SecurityProfile resource instance.\nExample: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group\nMust be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.", "description_kind": "plain", "optional": true }, @@ -35669,7 +37550,7 @@ "list", "string" ], - "description": "A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule.", + "description": "A list of network resource URLs to which this rule applies.\nThis field allows you to control which network's VMs get this rule.\nIf this field is left blank, all VMs within the organization will receive the rule.", "description_kind": "plain", "optional": true }, @@ -35684,7 +37565,7 @@ }, "tls_inspect": { "type": "bool", - "description": "Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions.", + "description": "Boolean flag indicating if the traffic should be TLS decrypted.\nCan be set only if action = 'apply_security_profile_group' and cannot be set for other actions.", "description_kind": "plain", "optional": true } @@ -35699,7 +37580,7 @@ "list", "string" ], - "description": "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules.", + "description": "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10.", "description_kind": "plain", "optional": true }, @@ -35708,7 +37589,7 @@ "list", "string" ], - "description": "Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress.", + "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100.", "description_kind": "plain", "optional": true }, @@ -35717,7 +37598,7 @@ "list", "string" ], - "description": "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 256.", + "description": "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000.", "description_kind": "plain", "optional": true }, @@ -35726,7 +37607,7 @@ "list", "string" ], - "description": "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress.", + "description": "Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of dest region codes allowed is 5000.", "description_kind": "plain", "optional": true }, @@ -35735,7 +37616,7 @@ "list", "string" ], - "description": "Name of the Google Cloud Threat Intelligence list.", + "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination.", "description_kind": "plain", "optional": true }, @@ -35744,7 +37625,7 @@ "list", "string" ], - "description": "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules.", + "description": "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10.", "description_kind": "plain", "optional": true }, @@ -35753,7 +37634,7 @@ "list", "string" ], - "description": "Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress.", + "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100.", "description_kind": "plain", "optional": true }, @@ -35762,7 +37643,7 @@ "list", "string" ], - "description": "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 256.", + "description": "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000.", "description_kind": "plain", "optional": true }, @@ -35771,7 +37652,7 @@ "list", "string" ], - "description": "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress.", + "description": "Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of source region codes allowed is 5000.", "description_kind": "plain", "optional": true }, @@ -35780,7 +37661,7 @@ "list", "string" ], - "description": "Name of the Google Cloud Threat Intelligence list.", + "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source.", "description_kind": "plain", "optional": true } @@ -35792,7 +37673,7 @@ "attributes": { "ip_protocol": { "type": "string", - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (`tcp`, `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol number.", + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule.\nThis value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", "description_kind": "plain", "required": true }, @@ -35801,7 +37682,7 @@ "list", "string" ], - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``.", + "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", "description_kind": "plain", "optional": true } @@ -36297,6 +38178,12 @@ "description_kind": "plain", "computed": true }, + "forwarding_rule_id": { + "type": "number", + "description": "The unique identifier number for the resource. This identifier is defined by the server.", + "description_kind": "plain", + "computed": true + }, "id": { "type": "string", "description_kind": "plain", @@ -36357,6 +38244,13 @@ "optional": true, "computed": true }, + "network_tier": { + "type": "string", + "description": "This signifies the networking tier used for configuring\nthis load balancer and can only take the following values:\n'PREMIUM', 'STANDARD'.\n\nFor regional ForwardingRule, the valid values are 'PREMIUM' and\n'STANDARD'. For GlobalForwardingRule, the valid value is\n'PREMIUM'.\n\nIf this field is not specified, it is assumed to be 'PREMIUM'.\nIf 'IPAddress' is specified, this value must be equal to the\nnetworkTier of the Address. Possible values: [\"PREMIUM\", \"STANDARD\"]", + "description_kind": "plain", + "optional": true, + "computed": true + }, "no_automate_dns_zone": { "type": "bool", "description": "This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field.", @@ -36649,7 +38543,7 @@ } }, "google_compute_ha_vpn_gateway": { - "version": 0, + "version": 1, "block": { "attributes": { "description": { @@ -36866,7 +38760,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -36918,7 +38812,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -36970,7 +38864,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -37022,7 +38916,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -37084,7 +38978,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -37130,7 +39024,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -37519,7 +39413,7 @@ "attributes": { "type": { "type": "string", - "description": "The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. Possible values: [\"MULTI_IP_SUBNET\", \"SECURE_BOOT\", \"SEV_CAPABLE\", \"UEFI_COMPATIBLE\", \"VIRTIO_SCSI_MULTIQUEUE\", \"WINDOWS\", \"GVNIC\", \"SEV_LIVE_MIGRATABLE\", \"SEV_SNP_CAPABLE\", \"SUSPEND_RESUME_COMPATIBLE\", \"TDX_CAPABLE\", \"SEV_LIVE_MIGRATABLE_V2\"]", + "description": "The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. Possible values: [\"MULTI_IP_SUBNET\", \"SECURE_BOOT\", \"SEV_CAPABLE\", \"UEFI_COMPATIBLE\", \"VIRTIO_SCSI_MULTIQUEUE\", \"WINDOWS\", \"GVNIC\", \"IDPF\", \"SEV_LIVE_MIGRATABLE\", \"SEV_SNP_CAPABLE\", \"SUSPEND_RESUME_COMPATIBLE\", \"TDX_CAPABLE\", \"SEV_LIVE_MIGRATABLE_V2\"]", "description_kind": "plain", "required": true } @@ -37797,6 +39691,12 @@ "description_kind": "plain", "computed": true }, + "creation_timestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format.", + "description_kind": "plain", + "computed": true + }, "current_status": { "type": "string", "description": "\n\t\t\t\t\tCurrent status of the instance.\n\t\t\t\t\tThis could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED.\n\t\t\t\t\tFor more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).", @@ -37817,7 +39717,7 @@ }, "desired_status": { "type": "string", - "description": "Desired status of the instance. Either \"RUNNING\" or \"TERMINATED\".", + "description": "Desired status of the instance. Either \"RUNNING\", \"SUSPENDED\" or \"TERMINATED\".", "description_kind": "plain", "optional": true }, @@ -37836,22 +39736,6 @@ "description_kind": "plain", "optional": true }, - "guest_accelerator": { - "type": [ - "list", - [ - "object", - { - "count": "number", - "type": "string" - } - ] - ], - "description": "List of the type and count of accelerator cards attached to the instance.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "hostname": { "type": "string", "description": "A custom hostname for the instance. Must be a fully qualified DNS name and RFC-1035-valid. Valid format is a series of labels 1-63 characters long matching the regular expression [a-z]([-a-z0-9]*[a-z0-9]), concatenated with periods. The entire hostname must not exceed 253 characters. Changing this forces a new resource to be created.", @@ -37870,6 +39754,12 @@ "description_kind": "plain", "computed": true }, + "key_revocation_action_type": { + "type": "string", + "description": "Action to be taken when a customer's encryption key is revoked. Supports \"STOP\" and \"NONE\", with \"NONE\" being the default.", + "description_kind": "plain", + "optional": true + }, "label_fingerprint": { "type": "string", "description": "The unique fingerprint of the labels.", @@ -37990,12 +39880,30 @@ "description_kind": "plain", "optional": true }, + "enable_uefi_networking": { + "type": "bool", + "description": "Whether to enable UEFI networking for the instance.", + "description_kind": "plain", + "optional": true + }, + "performance_monitoring_unit": { + "type": "string", + "description": "The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are \"STANDARD\", \"ENHANCED\", and \"ARCHITECTURAL\".", + "description_kind": "plain", + "optional": true + }, "threads_per_core": { "type": "number", "description": "The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.", "description_kind": "plain", "optional": true }, + "turbo_mode": { + "type": "string", + "description": "Turbo frequency mode to use for the instance. Currently supported modes is \"ALL_CORE_MAX\".", + "description_kind": "plain", + "optional": true + }, "visible_core_count": { "type": "number", "description": "The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\\'s nominal CPU count and the underlying platform\\'s SMT width.", @@ -38086,6 +39994,12 @@ "description_kind": "plain", "computed": true }, + "interface": { + "type": "string", + "description": "The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.)", + "description_kind": "plain", + "optional": true + }, "kms_key_self_link": { "type": "string", "description": "The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.", @@ -38158,6 +40072,16 @@ "description_kind": "plain", "optional": true }, + "resource_policies": { + "type": [ + "list", + "string" + ], + "description": "A list of self_links of resource policies to attach to the instance's boot disk. Modifying this list will cause the instance to recreate. Currently a max of 1 resource policy is supported.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "size": { "type": "number", "description": "The size of the image in gigabytes.", @@ -38197,7 +40121,7 @@ "attributes": { "confidential_instance_type": { "type": "string", - "description": "\n\t\t\t\t\t\t\t\tThe confidential computing technology the instance uses.\n\t\t\t\t\t\t\t\tSEV is an AMD feature. TDX is an Intel feature. One of the following\n\t\t\t\t\t\t\t\tvalues is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform =\n\t\t\t\t\t\t\t\t\"AMD Milan\" is currently required. TDX is only available in beta.", + "description": "\n\t\t\t\t\t\t\t\tThe confidential computing technology the instance uses.\n\t\t\t\t\t\t\t\tSEV is an AMD feature. TDX is an Intel feature. One of the following\n\t\t\t\t\t\t\t\tvalues is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform =\n\t\t\t\t\t\t\t\t\"AMD Milan\" is currently required.", "description_kind": "plain", "optional": true }, @@ -38213,6 +40137,27 @@ }, "max_items": 1 }, + "guest_accelerator": { + "nesting_mode": "list", + "block": { + "attributes": { + "count": { + "type": "number", + "description": "The number of the guest accelerator cards exposed to this instance.", + "description_kind": "plain", + "required": true + }, + "type": { + "type": "string", + "description": "The accelerator type resource exposed to this instance. E.g. nvidia-tesla-k80.", + "description_kind": "plain", + "required": true + } + }, + "description": "List of the type and count of accelerator cards attached to the instance.", + "description_kind": "plain" + } + }, "network_interface": { "nesting_mode": "list", "block": { @@ -38259,7 +40204,7 @@ }, "nic_type": { "type": "string", - "description": "The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET", + "description": "The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET, IDPF", "description_kind": "plain", "optional": true }, @@ -38478,6 +40423,12 @@ "description_kind": "plain", "optional": true }, + "availability_domain": { + "type": "number", + "description": "Specifies the availability domain, which this instance should be scheduled on.", + "description_kind": "plain", + "optional": true + }, "instance_termination_action": { "type": "string", "description": "Specifies the action GCE should take when SPOT VM is preempted.", @@ -38724,26 +40675,6 @@ "optional": true, "computed": true }, - "attached_disk": { - "type": [ - "list", - [ - "object", - { - "device_name": "string", - "disk_encryption_key_raw": "string", - "disk_encryption_key_sha256": "string", - "kms_key_self_link": "string", - "mode": "string", - "source": "string" - } - ] - ], - "description": "List of disks attached to the instance", - "description_kind": "plain", - "optional": true, - "computed": true - }, "can_ip_forward": { "type": "bool", "description": "Whether sending and receiving of packets with non-matching source or destination IPs is allowed.", @@ -38757,6 +40688,12 @@ "description_kind": "plain", "computed": true }, + "creation_timestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format.", + "description_kind": "plain", + "computed": true + }, "current_status": { "type": "string", "description": "\n\t\t\t\t\tCurrent status of the instance.\n\t\t\t\t\tThis could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED.\n\t\t\t\t\tFor more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).", @@ -38779,7 +40716,7 @@ }, "desired_status": { "type": "string", - "description": "Desired status of the instance. Either \"RUNNING\" or \"TERMINATED\".", + "description": "Desired status of the instance. Either \"RUNNING\", \"SUSPENDED\" or \"TERMINATED\".", "description_kind": "plain", "optional": true, "computed": true @@ -38800,22 +40737,6 @@ "optional": true, "computed": true }, - "guest_accelerator": { - "type": [ - "list", - [ - "object", - { - "count": "number", - "type": "string" - } - ] - ], - "description": "List of the type and count of accelerator cards attached to the instance.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "hostname": { "type": "string", "description": "A custom hostname for the instance. Must be a fully qualified DNS name and RFC-1035-valid. Valid format is a series of labels 1-63 characters long matching the regular expression [a-z]([-a-z0-9]*[a-z0-9]), concatenated with periods. The entire hostname must not exceed 253 characters. Changing this forces a new resource to be created.", @@ -38835,6 +40756,13 @@ "description_kind": "plain", "computed": true }, + "key_revocation_action_type": { + "type": "string", + "description": "Action to be taken when a customer's encryption key is revoked. Supports \"STOP\" and \"NONE\", with \"NONE\" being the default.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "label_fingerprint": { "type": "string", "description": "The unique fingerprint of the labels.", @@ -38911,48 +40839,12 @@ "optional": true, "computed": true }, - "scratch_disk": { - "type": [ - "list", - [ - "object", - { - "device_name": "string", - "interface": "string", - "size": "number" - } - ] - ], - "description": "The scratch disks attached to the instance.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "self_link": { "type": "string", "description": "The URI of the created resource.", "description_kind": "plain", "computed": true }, - "service_account": { - "type": [ - "list", - [ - "object", - { - "email": "string", - "scopes": [ - "set", - "string" - ] - } - ] - ], - "description": "The service account to attach to the instance.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "source_instance_template": { "type": "string", "description": "Name or self link of an instance template to create the instance based on.", @@ -39004,6 +40896,20 @@ "optional": true, "computed": true }, + "enable_uefi_networking": { + "type": "bool", + "description": "Whether to enable UEFI networking for the instance.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "performance_monitoring_unit": { + "type": "string", + "description": "The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are \"STANDARD\", \"ENHANCED\", and \"ARCHITECTURAL\".", + "description_kind": "plain", + "optional": true, + "computed": true + }, "threads_per_core": { "type": "number", "description": "The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.", @@ -39011,6 +40917,13 @@ "optional": true, "computed": true }, + "turbo_mode": { + "type": "string", + "description": "Turbo frequency mode to use for the instance. Currently supported modes is \"ALL_CORE_MAX\".", + "description_kind": "plain", + "optional": true, + "computed": true + }, "visible_core_count": { "type": "number", "description": "The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\\'s nominal CPU count and the underlying platform\\'s SMT width.", @@ -39024,20 +40937,13 @@ }, "max_items": 1 }, - "boot_disk": { + "attached_disk": { "nesting_mode": "list", "block": { "attributes": { - "auto_delete": { - "type": "bool", - "description": "Whether the disk will be auto-deleted when the instance is deleted.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "device_name": { "type": "string", - "description": "Name with which attached disk will be accessible under /dev/disk/by-id/", + "description": "Name with which the attached disk is accessible under /dev/disk/by-id/", "description_kind": "plain", "optional": true, "computed": true @@ -39070,6 +40976,70 @@ "optional": true, "computed": true }, + "source": { + "type": "string", + "description": "The name or self_link of the disk attached to this instance.", + "description_kind": "plain", + "required": true + } + }, + "description": "List of disks attached to the instance", + "description_kind": "plain" + } + }, + "boot_disk": { + "nesting_mode": "list", + "block": { + "attributes": { + "auto_delete": { + "type": "bool", + "description": "Whether the disk will be auto-deleted when the instance is deleted.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "device_name": { + "type": "string", + "description": "Name with which attached disk will be accessible under /dev/disk/by-id/", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "disk_encryption_key_raw": { + "type": "string", + "description": "A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.", + "description_kind": "plain", + "optional": true, + "computed": true, + "sensitive": true + }, + "disk_encryption_key_sha256": { + "type": "string", + "description": "The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource.", + "description_kind": "plain", + "computed": true + }, + "interface": { + "type": "string", + "description": "The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.)", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "kms_key_self_link": { + "type": "string", + "description": "The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "mode": { + "type": "string", + "description": "Read/write mode for the disk. One of \"READ_ONLY\" or \"READ_WRITE\".", + "description_kind": "plain", + "optional": true, + "computed": true + }, "source": { "type": "string", "description": "The name or self_link of the disk attached to this instance.", @@ -39131,6 +41101,16 @@ "optional": true, "computed": true }, + "resource_policies": { + "type": [ + "list", + "string" + ], + "description": "A list of self_links of resource policies to attach to the instance's boot disk. Modifying this list will cause the instance to recreate. Currently a max of 1 resource policy is supported.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "size": { "type": "number", "description": "The size of the image in gigabytes.", @@ -39170,7 +41150,7 @@ "attributes": { "confidential_instance_type": { "type": "string", - "description": "\n\t\t\t\t\t\t\t\tThe confidential computing technology the instance uses.\n\t\t\t\t\t\t\t\tSEV is an AMD feature. TDX is an Intel feature. One of the following\n\t\t\t\t\t\t\t\tvalues is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform =\n\t\t\t\t\t\t\t\t\"AMD Milan\" is currently required. TDX is only available in beta.", + "description": "\n\t\t\t\t\t\t\t\tThe confidential computing technology the instance uses.\n\t\t\t\t\t\t\t\tSEV is an AMD feature. TDX is an Intel feature. One of the following\n\t\t\t\t\t\t\t\tvalues is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform =\n\t\t\t\t\t\t\t\t\"AMD Milan\" is currently required.", "description_kind": "plain", "optional": true, "computed": true @@ -39188,43 +41168,31 @@ }, "max_items": 1 }, - "network_interface": { + "guest_accelerator": { "nesting_mode": "list", "block": { "attributes": { - "access_config": { - "type": [ - "list", - [ - "object", - { - "nat_ip": "string", - "network_tier": "string", - "public_ptr_domain_name": "string" - } - ] - ], - "description": "Access configurations, i.e. IPs via which this instance can be accessed via the Internet.", + "count": { + "type": "number", + "description": "The number of the guest accelerator cards exposed to this instance.", "description_kind": "plain", - "optional": true, - "computed": true + "required": true }, - "alias_ip_range": { - "type": [ - "list", - [ - "object", - { - "ip_cidr_range": "string", - "subnetwork_range_name": "string" - } - ] - ], - "description": "An array of alias IP ranges for this network interface.", + "type": { + "type": "string", + "description": "The accelerator type resource exposed to this instance. E.g. nvidia-tesla-k80.", "description_kind": "plain", - "optional": true, - "computed": true - }, + "required": true + } + }, + "description": "List of the type and count of accelerator cards attached to the instance.", + "description_kind": "plain" + } + }, + "network_interface": { + "nesting_mode": "list", + "block": { + "attributes": { "internal_ipv6_prefix_length": { "type": "number", "description": "The prefix length of the primary internal IPv6 range.", @@ -39267,7 +41235,7 @@ }, "nic_type": { "type": "string", - "description": "The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET", + "description": "The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET, IDPF", "description_kind": "plain", "optional": true, "computed": true @@ -39302,6 +41270,58 @@ } }, "block_types": { + "access_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "nat_ip": { + "type": "string", + "description": "The IP address that is be 1:1 mapped to the instance's network ip.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "network_tier": { + "type": "string", + "description": "The networking tier used for configuring this instance. One of PREMIUM or STANDARD.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "public_ptr_domain_name": { + "type": "string", + "description": "The DNS domain name for the public PTR record.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Access configurations, i.e. IPs via which this instance can be accessed via the Internet.", + "description_kind": "plain" + } + }, + "alias_ip_range": { + "nesting_mode": "list", + "block": { + "attributes": { + "ip_cidr_range": { + "type": "string", + "description": "The IP CIDR range represented by this alias IP range.", + "description_kind": "plain", + "required": true + }, + "subnetwork_range_name": { + "type": "string", + "description": "The subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "An array of alias IP ranges for this network interface.", + "description_kind": "plain" + } + }, "ipv6_access_config": { "nesting_mode": "list", "block": { @@ -39440,6 +41460,13 @@ "optional": true, "computed": true }, + "availability_domain": { + "type": "number", + "description": "Specifies the availability domain, which this instance should be scheduled on.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "instance_termination_action": { "type": "string", "description": "Specifies the action GCE should take when SPOT VM is preempted.", @@ -39572,6 +41599,61 @@ }, "max_items": 1 }, + "scratch_disk": { + "nesting_mode": "list", + "block": { + "attributes": { + "device_name": { + "type": "string", + "description": "Name with which the attached disk is accessible under /dev/disk/by-id/", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "interface": { + "type": "string", + "description": "The disk interface used for attaching this disk. One of SCSI or NVME.", + "description_kind": "plain", + "required": true + }, + "size": { + "type": "number", + "description": "The size of the disk in gigabytes. One of 375 or 3000.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "The scratch disks attached to the instance.", + "description_kind": "plain" + } + }, + "service_account": { + "nesting_mode": "list", + "block": { + "attributes": { + "email": { + "type": "string", + "description": "The service account e-mail address.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "scopes": { + "type": [ + "set", + "string" + ], + "description": "A list of service scopes.", + "description_kind": "plain", + "required": true + } + }, + "description": "The service account to attach to the instance.", + "description_kind": "plain" + }, + "max_items": 1 + }, "shielded_instance_config": { "nesting_mode": "list", "block": { @@ -39785,6 +41867,12 @@ "description_kind": "plain", "computed": true }, + "instance_group_manager_id": { + "type": "number", + "description": "The unique identifier number for the resource. This identifier is defined by the server.", + "description_kind": "plain", + "computed": true + }, "list_managed_instances_results": { "type": "string", "description": "Pagination behavior of the listManagedInstances API method for this managed instance group. Valid values are: \"PAGELESS\", \"PAGINATED\". If PAGELESS (default), Pagination is disabled for the group's listManagedInstances API method. maxResults and pageToken query parameters are ignored and all instances are returned in a single response. If PAGINATED, pagination is enabled, maxResults and pageToken query parameters are respected.", @@ -39882,6 +41970,20 @@ "optional": true, "computed": true }, + "target_stopped_size": { + "type": "number", + "description": "The target number of stopped instances for this managed instance group.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "target_suspended_size": { + "type": "number", + "description": "The target number of suspended instances for this managed instance group.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "wait_for_instances": { "type": "bool", "description": "Whether to wait for all instances to be created/updated before returning. Note that if this is set to true and the operation does not succeed, Terraform will continue trying until it times out.", @@ -39996,6 +42098,30 @@ "description_kind": "plain" } }, + "standby_policy": { + "nesting_mode": "list", + "block": { + "attributes": { + "initial_delay_sec": { + "type": "number", + "description": "Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "mode": { + "type": "string", + "description": "Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is \"MANUAL\".", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Standby policy for stopped and suspended instances.", + "description_kind": "plain" + }, + "max_items": 1 + }, "stateful_disk": { "nesting_mode": "set", "block": { @@ -40589,6 +42715,12 @@ "description_kind": "plain", "optional": true }, + "creation_timestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format.", + "description_kind": "plain", + "computed": true + }, "description": { "type": "string", "description": "A brief description of this resource.", @@ -40616,6 +42748,12 @@ "description_kind": "plain", "optional": true }, + "key_revocation_action_type": { + "type": "string", + "description": "Action to be taken when a customer's encryption key is revoked. Supports \"STOP\" and \"NONE\", with \"NONE\" being the default.", + "description_kind": "plain", + "optional": true + }, "labels": { "type": [ "map", @@ -40667,7 +42805,7 @@ }, "name_prefix": { "type": "string", - "description": "Creates a unique name beginning with the specified prefix. Conflicts with name.", + "description": "Creates a unique name beginning with the specified prefix. Conflicts with name. Max length is 54 characters. Prefixes with lengths longer than 37 characters will use a shortened UUID that will be more prone to collisions.", "description_kind": "plain", "optional": true, "computed": true @@ -40752,12 +42890,30 @@ "description_kind": "plain", "optional": true }, + "enable_uefi_networking": { + "type": "bool", + "description": "Whether to enable UEFI networking or not.", + "description_kind": "plain", + "optional": true + }, + "performance_monitoring_unit": { + "type": "string", + "description": "The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are \"STANDARD\", \"ENHANCED\", and \"ARCHITECTURAL\".", + "description_kind": "plain", + "optional": true + }, "threads_per_core": { "type": "number", "description": "The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.", "description_kind": "plain", "optional": true }, + "turbo_mode": { + "type": "string", + "description": "Turbo frequency mode to use for the instance. Currently supported modes is \"ALL_CORE_MAX\".", + "description_kind": "plain", + "optional": true + }, "visible_core_count": { "type": "number", "description": "The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\\'s nominal CPU count and the underlying platform\\'s SMT width.", @@ -40776,7 +42932,7 @@ "attributes": { "confidential_instance_type": { "type": "string", - "description": "\n\t\t\t\t\t\t\t\tThe confidential computing technology the instance uses.\n\t\t\t\t\t\t\t\tSEV is an AMD feature. TDX is an Intel feature. One of the following\n\t\t\t\t\t\t\t\tvalues is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform =\n\t\t\t\t\t\t\t\t\"AMD Milan\" is currently required. TDX is only available in beta.", + "description": "\n\t\t\t\t\t\t\t\tThe confidential computing technology the instance uses.\n\t\t\t\t\t\t\t\tSEV is an AMD feature. TDX is an Intel feature. One of the following\n\t\t\t\t\t\t\t\tvalues is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform =\n\t\t\t\t\t\t\t\t\"AMD Milan\" is currently required.", "description_kind": "plain", "optional": true }, @@ -40861,7 +43017,14 @@ }, "provisioned_iops": { "type": "number", - "description": "Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk).", + "description": "Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk) or the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks) depending on the selected disk_type.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "provisioned_throughput": { + "type": "number", + "description": "Indicates how much throughput to provision for the disk, in MB/s. This sets the amount of data that can be read or written from the disk per second. Values must greater than or equal to 1. For more details, see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks).", "description_kind": "plain", "optional": true, "computed": true @@ -41240,6 +43403,12 @@ "description_kind": "plain", "optional": true }, + "availability_domain": { + "type": "number", + "description": "Specifies the availability domain, which this instance should be scheduled on.", + "description_kind": "plain", + "optional": true + }, "instance_termination_action": { "type": "string", "description": "Specifies the action GCE should take when SPOT VM is preempted.", @@ -41485,9 +43654,9 @@ }, "customer_name": { "type": "string", - "description": "Customer name, to put in the Letter of Authorization as the party authorized to request a\ncrossconnect.", + "description": "Customer name, to put in the Letter of Authorization as the party authorized to request a\ncrossconnect. This field is required for Dedicated and Partner Interconnect, should not be specified\nfor cross-cloud interconnect.", "description_kind": "plain", - "required": true + "optional": true }, "description": { "type": "string", @@ -41584,9 +43753,9 @@ }, "location": { "type": "string", - "description": "URL of the InterconnectLocation object that represents where this connection is to be provisioned.", + "description": "URL of the InterconnectLocation object that represents where this connection is to be provisioned.\nSpecifies the location inside Google's Networks, should not be passed in case of cross-cloud interconnect.", "description_kind": "plain", - "required": true + "optional": true }, "macsec_enabled": { "type": "bool", @@ -41641,7 +43810,7 @@ "list", "string" ], - "description": "interconnects.list of features requested for this Interconnect connection. Options: MACSEC (\nIf specified then the connection is created on MACsec capable hardware ports. If not\nspecified, the default value is false, which allocates non-MACsec capable ports first if\navailable). Possible values: [\"MACSEC\"]", + "description": "interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC (\nIf specified then the connection is created on MACsec capable hardware ports. If not\nspecified, the default value is false, which allocates non-MACsec capable ports first if\navailable). Note that MACSEC is still technically allowed for compatibility reasons, but it\ndoes not work with the API, and will be removed in an upcoming major version. Possible values: [\"MACSEC\", \"IF_MACSEC\"]", "description_kind": "plain", "optional": true }, @@ -41677,6 +43846,14 @@ "macsec": { "nesting_mode": "list", "block": { + "attributes": { + "fail_open": { + "type": "bool", + "description": "If set to true, the Interconnect connection is configured with a should-secure\nMACsec security policy, that allows the Google router to fallback to cleartext\ntraffic if the MKA session cannot be established. By default, the Interconnect\nconnection is configured with a must-secure security policy that drops all traffic\nif the MKA session cannot be established with your router.", + "description_kind": "plain", + "optional": true + } + }, "block_types": { "pre_shared_keys": { "nesting_mode": "list", @@ -41686,6 +43863,7 @@ "type": "bool", "description": "If set to true, the Interconnect connection is configured with a should-secure\nMACsec security policy, that allows the Google router to fallback to cleartext\ntraffic if the MKA session cannot be established. By default, the Interconnect\nconnection is configured with a must-secure security policy that drops all traffic\nif the MKA session cannot be established with your router.", "description_kind": "plain", + "deprecated": true, "optional": true }, "name": { @@ -41974,7 +44152,6 @@ "type": "number", "description": "The unique identifier for the resource.", "description_kind": "plain", - "optional": true, "computed": true }, "creation_timestamp": { @@ -42142,10 +44319,17 @@ "description_kind": "plain", "optional": true }, + "network_id": { + "type": "string", + "description": "The unique identifier for the resource. This identifier is defined by the server.", + "description_kind": "plain", + "computed": true + }, "numeric_id": { "type": "string", "description": "The unique identifier for the resource. This identifier is defined by the server.", "description_kind": "plain", + "deprecated": true, "computed": true }, "project": { @@ -42696,7 +44880,7 @@ }, "firewall_policy": { "type": "string", - "description": "The firewall policy ID of the association.", + "description": "The firewall policy of the resource.", "description_kind": "plain", "required": true }, @@ -42714,7 +44898,6 @@ }, "project": { "type": "string", - "description": "The project for the resource", "description_kind": "plain", "optional": true, "computed": true @@ -42759,6 +44942,12 @@ "description_kind": "plain", "required": true }, + "creation_timestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format.", + "description_kind": "plain", + "computed": true + }, "description": { "type": "string", "description": "An optional description for this resource.", @@ -42767,19 +44956,19 @@ }, "direction": { "type": "string", - "description": "The direction in which this rule applies. Possible values: INGRESS, EGRESS", + "description": "The direction in which this rule applies. Possible values: [\"INGRESS\", \"EGRESS\"]", "description_kind": "plain", "required": true }, "disabled": { "type": "bool", - "description": "Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled.", + "description": "Denotes whether the firewall policy rule is disabled.\nWhen set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist.\nIf this is unspecified, the firewall policy rule will be enabled.", "description_kind": "plain", "optional": true }, "enable_logging": { "type": "bool", - "description": "Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on \"goto_next\" rules.", + "description": "Denotes whether to enable logging for a particular rule.\nIf logging is enabled, logs will be exported to the configured export destination in Stackdriver.\nLogs may be exported to BigQuery or Pub/Sub.\nNote: you cannot enable logging on \"goto_next\" rules.", "description_kind": "plain", "optional": true }, @@ -42797,19 +44986,18 @@ }, "kind": { "type": "string", - "description": "Type of the resource. Always `compute#firewallPolicyRule` for firewall policy rules", + "description": "Type of the resource. Always 'compute#firewallPolicyRule' for firewall policy rules", "description_kind": "plain", "computed": true }, "priority": { "type": "number", - "description": "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", + "description": "An integer indicating the priority of a rule in the list.\nThe priority must be a positive value between 0 and 2147483647.\nRules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", "description_kind": "plain", "required": true }, "project": { "type": "string", - "description": "The project for the resource", "description_kind": "plain", "optional": true, "computed": true @@ -42828,7 +45016,7 @@ }, "security_profile_group": { "type": "string", - "description": "A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.", + "description": "A fully-qualified URL of a SecurityProfile resource instance.\nExample: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group\nMust be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.", "description_kind": "plain", "optional": true }, @@ -42843,7 +45031,7 @@ }, "tls_inspect": { "type": "bool", - "description": "Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions.", + "description": "Boolean flag indicating if the traffic should be TLS decrypted.\nCan be set only if action = 'apply_security_profile_group' and cannot be set for other actions.", "description_kind": "plain", "optional": true } @@ -42858,7 +45046,7 @@ "list", "string" ], - "description": "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules.", + "description": "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10.", "description_kind": "plain", "optional": true }, @@ -42867,7 +45055,7 @@ "list", "string" ], - "description": "Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress.", + "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100.", "description_kind": "plain", "optional": true }, @@ -42885,7 +45073,7 @@ "list", "string" ], - "description": "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress.", + "description": "Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of dest region codes allowed is 5000.", "description_kind": "plain", "optional": true }, @@ -42894,7 +45082,7 @@ "list", "string" ], - "description": "Name of the Google Cloud Threat Intelligence list.", + "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination.", "description_kind": "plain", "optional": true }, @@ -42903,7 +45091,7 @@ "list", "string" ], - "description": "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules.", + "description": "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10.", "description_kind": "plain", "optional": true }, @@ -42912,7 +45100,7 @@ "list", "string" ], - "description": "Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress.", + "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100.", "description_kind": "plain", "optional": true }, @@ -42930,7 +45118,7 @@ "list", "string" ], - "description": "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress.", + "description": "Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of source region codes allowed is 5000.", "description_kind": "plain", "optional": true }, @@ -42939,7 +45127,7 @@ "list", "string" ], - "description": "Name of the Google Cloud Threat Intelligence list.", + "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source.", "description_kind": "plain", "optional": true } @@ -42951,7 +45139,7 @@ "attributes": { "ip_protocol": { "type": "string", - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (`tcp`, `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol number.", + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule.\nThis value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", "description_kind": "plain", "required": true }, @@ -42960,7 +45148,7 @@ "list", "string" ], - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``.", + "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", "description_kind": "plain", "optional": true } @@ -42976,18 +45164,18 @@ "attributes": { "name": { "type": "string", - "description": "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", + "description": "Name of the secure tag, created with TagManager's TagValue API.", "description_kind": "plain", - "required": true + "optional": true }, "state": { "type": "string", - "description": "[Output Only] State of the secure tag, either `EFFECTIVE` or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted or its network is deleted.", + "description": "State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted.", "description_kind": "plain", "computed": true } }, - "description": "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", + "description": "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", "description_kind": "plain" } } @@ -43004,18 +45192,18 @@ "attributes": { "name": { "type": "string", - "description": "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", + "description": "Name of the secure tag, created with TagManager's TagValue API.", "description_kind": "plain", - "required": true + "optional": true }, "state": { "type": "string", - "description": "[Output Only] State of the secure tag, either `EFFECTIVE` or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted or its network is deleted.", + "description": "State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted.", "description_kind": "plain", "computed": true } }, - "description": "A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256.", + "description": "A list of secure tags that controls which instances the firewall rule applies to.\nIf targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored.\ntargetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256.", "description_kind": "plain" } }, @@ -43465,6 +45653,54 @@ } }, "block_types": { + "accelerators": { + "nesting_mode": "list", + "block": { + "attributes": { + "accelerator_count": { + "type": "number", + "description": "The number of the guest accelerator cards exposed to this\nnode template.", + "description_kind": "plain", + "optional": true + }, + "accelerator_type": { + "type": "string", + "description": "Full or partial URL of the accelerator type resource to expose\nto this node template.", + "description_kind": "plain", + "optional": true + } + }, + "description": "List of the type and count of accelerator cards attached to the\nnode template", + "description_kind": "plain" + } + }, + "disks": { + "nesting_mode": "list", + "block": { + "attributes": { + "disk_count": { + "type": "number", + "description": "Specifies the number of such disks.", + "description_kind": "plain", + "optional": true + }, + "disk_size_gb": { + "type": "number", + "description": "Specifies the size of the disk in base-2 GB.", + "description_kind": "plain", + "optional": true + }, + "disk_type": { + "type": "string", + "description": "Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL.", + "description_kind": "plain", + "optional": true + } + }, + "description": "List of the type, size and count of disks attached to the\nnode template", + "description_kind": "plain" + } + }, "node_type_flexibility": { "nesting_mode": "list", "block": { @@ -43946,7 +46182,7 @@ "attributes": { "cloud_armor_tier": { "type": "string", - "description": "Managed protection tier to be set. Possible values: [\"CA_STANDARD\", \"CA_ENTERPRISE_PAYGO\"]", + "description": "Managed protection tier to be set. Possible values: [\"CA_STANDARD\", \"CA_ENTERPRISE_PAYGO\", \"CA_ENTERPRISE_ANNUAL\"]", "description_kind": "plain", "required": true }, @@ -44487,7 +46723,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -44594,7 +46830,7 @@ }, "connection_draining_timeout_sec": { "type": "number", - "description": "Time for which instance will be drained (not accept new\nconnections, but still work to finish started).\n\nFrom version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value.", + "description": "Time for which instance will be drained (not accept new\nconnections, but still work to finish started).", "description_kind": "plain", "optional": true }, @@ -44643,6 +46879,12 @@ "optional": true, "computed": true }, + "ip_address_selection_policy": { + "type": "string", + "description": "Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). Possible values: [\"IPV4_ONLY\", \"PREFER_IPV6\", \"IPV6_ONLY\"]", + "description_kind": "plain", + "optional": true + }, "load_balancing_scheme": { "type": "string", "description": "Indicates what kind of load balancing this regional backend service\nwill be used for. A backend service created for one type of load\nbalancing cannot be used with the other(s). For more information, refer to\n[Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service). Default value: \"INTERNAL\" Possible values: [\"EXTERNAL\", \"EXTERNAL_MANAGED\", \"INTERNAL\", \"INTERNAL_MANAGED\"]", @@ -44651,7 +46893,7 @@ }, "locality_lb_policy": { "type": "string", - "description": "The load balancing algorithm used within the scope of the locality.\nThe possible values are:\n\n* 'ROUND_ROBIN': This is a simple policy in which each healthy backend\n is selected in round robin order.\n\n* 'LEAST_REQUEST': An O(1) algorithm which selects two random healthy\n hosts and picks the host which has fewer active requests.\n\n* 'RING_HASH': The ring/modulo hash load balancer implements consistent\n hashing to backends. The algorithm has the property that the\n addition/removal of a host from a set of N hosts only affects\n 1/N of the requests.\n\n* 'RANDOM': The load balancer selects a random healthy host.\n\n* 'ORIGINAL_DESTINATION': Backend host is selected based on the client\n connection metadata, i.e., connections are opened\n to the same address as the destination address of\n the incoming connection before the connection\n was redirected to the load balancer.\n\n* 'MAGLEV': used as a drop in replacement for the ring hash load balancer.\n Maglev is not as stable as ring hash but has faster table lookup\n build times and host selection times. For more information about\n Maglev, refer to https://ai.google/research/pubs/pub44824\n\n* 'WEIGHTED_MAGLEV': Per-instance weighted Load Balancing via health check\n reported weights. If set, the Backend Service must\n configure a non legacy HTTP-based Health Check, and\n health check replies are expected to contain\n non-standard HTTP response header field\n X-Load-Balancing-Endpoint-Weight to specify the\n per-instance weights. If set, Load Balancing is weight\n based on the per-instance weights reported in the last\n processed health check replies, as long as every\n instance either reported a valid weight or had\n UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains\n equal-weight.\n\nThis field is applicable to either:\n\n* A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2,\n and loadBalancingScheme set to INTERNAL_MANAGED.\n* A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.\n* A regional backend service with loadBalancingScheme set to EXTERNAL (External Network\n Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External\n Network Load Balancing. The default is MAGLEV.\n\nIf session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV,\nor RING_HASH, session affinity settings will not take effect.\n\nOnly ROUND_ROBIN and RING_HASH are supported when the backend service is referenced\nby a URL map that is bound to target gRPC proxy that has validate_for_proxyless\nfield set to true. Possible values: [\"ROUND_ROBIN\", \"LEAST_REQUEST\", \"RING_HASH\", \"RANDOM\", \"ORIGINAL_DESTINATION\", \"MAGLEV\", \"WEIGHTED_MAGLEV\"]", + "description": "The load balancing algorithm used within the scope of the locality.\nThe possible values are:\n\n* 'ROUND_ROBIN': This is a simple policy in which each healthy backend\n is selected in round robin order.\n\n* 'LEAST_REQUEST': An O(1) algorithm which selects two random healthy\n hosts and picks the host which has fewer active requests.\n\n* 'RING_HASH': The ring/modulo hash load balancer implements consistent\n hashing to backends. The algorithm has the property that the\n addition/removal of a host from a set of N hosts only affects\n 1/N of the requests.\n\n* 'RANDOM': The load balancer selects a random healthy host.\n\n* 'ORIGINAL_DESTINATION': Backend host is selected based on the client\n connection metadata, i.e., connections are opened\n to the same address as the destination address of\n the incoming connection before the connection\n was redirected to the load balancer.\n\n* 'MAGLEV': used as a drop in replacement for the ring hash load balancer.\n Maglev is not as stable as ring hash but has faster table lookup\n build times and host selection times. For more information about\n Maglev, refer to https://ai.google/research/pubs/pub44824\n\n* 'WEIGHTED_MAGLEV': Per-instance weighted Load Balancing via health check\n reported weights. Only applicable to loadBalancingScheme\n EXTERNAL. If set, the Backend Service must\n configure a non legacy HTTP-based Health Check, and\n health check replies are expected to contain\n non-standard HTTP response header field\n X-Load-Balancing-Endpoint-Weight to specify the\n per-instance weights. If set, Load Balancing is weight\n based on the per-instance weights reported in the last\n processed health check replies, as long as every\n instance either reported a valid weight or had\n UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains\n equal-weight.\n\nlocality_lb_policy is applicable to either:\n\n* A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2,\n and loadBalancingScheme set to INTERNAL_MANAGED.\n* A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.\n* A regional backend service with loadBalancingScheme set to EXTERNAL (External Network\n Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External\n Network Load Balancing. The default is MAGLEV.\n\nIf session_affinity is not NONE, and locality_lb_policy is not set to MAGLEV, WEIGHTED_MAGLEV,\nor RING_HASH, session affinity settings will not take effect.\n\nOnly ROUND_ROBIN and RING_HASH are supported when the backend service is referenced\nby a URL map that is bound to target gRPC proxy that has validate_for_proxyless\nfield set to true. Possible values: [\"ROUND_ROBIN\", \"LEAST_REQUEST\", \"RING_HASH\", \"RANDOM\", \"ORIGINAL_DESTINATION\", \"MAGLEV\", \"WEIGHTED_MAGLEV\"]", "description_kind": "plain", "optional": true }, @@ -44701,7 +46943,7 @@ }, "session_affinity": { "type": "string", - "description": "Type of session affinity to use. The default is NONE. Session affinity is\nnot applicable if the protocol is UDP. Possible values: [\"NONE\", \"CLIENT_IP\", \"CLIENT_IP_PORT_PROTO\", \"CLIENT_IP_PROTO\", \"GENERATED_COOKIE\", \"HEADER_FIELD\", \"HTTP_COOKIE\", \"CLIENT_IP_NO_DESTINATION\"]", + "description": "Type of session affinity to use. The default is NONE. Session affinity is\nnot applicable if the protocol is UDP. Possible values: [\"NONE\", \"CLIENT_IP\", \"CLIENT_IP_PORT_PROTO\", \"CLIENT_IP_PROTO\", \"GENERATED_COOKIE\", \"HEADER_FIELD\", \"HTTP_COOKIE\", \"CLIENT_IP_NO_DESTINATION\", \"STRONG_COOKIE_AFFINITY\"]", "description_kind": "plain", "optional": true, "computed": true @@ -44721,7 +46963,7 @@ "attributes": { "balancing_mode": { "type": "string", - "description": "Specifies the balancing mode for this backend.\n\nSee the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode)\nfor an explanation of load balancing modes.\n\nFrom version 6.0.0 default value will be UTILIZATION to match default GCP value. Default value: \"CONNECTION\" Possible values: [\"UTILIZATION\", \"RATE\", \"CONNECTION\"]", + "description": "Specifies the balancing mode for this backend.\n\nSee the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode)\nfor an explanation of load balancing modes. Default value: \"UTILIZATION\" Possible values: [\"UTILIZATION\", \"RATE\", \"CONNECTION\"]", "description_kind": "plain", "optional": true }, @@ -45071,17 +47313,23 @@ "nesting_mode": "list", "block": { "attributes": { + "enabled": { + "type": "bool", + "description": "Whether the serving infrastructure will authenticate and authorize all incoming requests.", + "description_kind": "plain", + "required": true + }, "oauth2_client_id": { "type": "string", "description": "OAuth2 Client ID for IAP", "description_kind": "plain", - "required": true + "optional": true }, "oauth2_client_secret": { "type": "string", "description": "OAuth2 Client Secret for IAP", "description_kind": "plain", - "required": true, + "optional": true, "sensitive": true }, "oauth2_client_secret_sha256": { @@ -45224,7 +47472,53 @@ "max_items": 1 } }, - "description": "Settings controlling eviction of unhealthy hosts from the load balancing pool.\nThis field is applicable only when the 'load_balancing_scheme' is set\nto INTERNAL_MANAGED and the 'protocol' is set to HTTP, HTTPS, or HTTP2.\n\nFrom version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value.\nDefault values are enforce by GCP without providing them.", + "description": "Settings controlling eviction of unhealthy hosts from the load balancing pool.\nThis field is applicable only when the 'load_balancing_scheme' is set\nto INTERNAL_MANAGED and the 'protocol' is set to HTTP, HTTPS, or HTTP2.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "strong_session_affinity_cookie": { + "nesting_mode": "list", + "block": { + "attributes": { + "name": { + "type": "string", + "description": "Name of the cookie.", + "description_kind": "plain", + "optional": true + }, + "path": { + "type": "string", + "description": "Path to set for the cookie.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "ttl": { + "nesting_mode": "list", + "block": { + "attributes": { + "nanos": { + "type": "number", + "description": "Span of time that's a fraction of a second at nanosecond\nresolution. Durations less than one second are represented\nwith a 0 seconds field and a positive nanos field. Must\nbe from 0 to 999,999,999 inclusive.", + "description_kind": "plain", + "optional": true + }, + "seconds": { + "type": "number", + "description": "Span of time at a resolution of a second.\nMust be from 0 to 315,576,000,000 inclusive.", + "description_kind": "plain", + "required": true + } + }, + "description": "Lifetime of the cookie.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY.", "description_kind": "plain" }, "max_items": 1 @@ -45298,6 +47592,13 @@ "description_kind": "plain", "computed": true }, + "existing_reservations": { + "type": "string", + "description": "Specifies the already existing reservations to attach to the Commitment.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "id": { "type": "string", "description_kind": "plain", @@ -45614,7 +47915,7 @@ "required": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -45981,6 +48282,12 @@ "description_kind": "plain", "optional": true }, + "health_check_id": { + "type": "number", + "description": "The unique identifier number for the resource. This identifier is defined by the server.", + "description_kind": "plain", + "computed": true + }, "healthy_threshold": { "type": "number", "description": "A so-far unhealthy instance will be marked healthy after this many\nconsecutive successes. The default value is 2.", @@ -46066,7 +48373,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -46118,7 +48425,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -46170,7 +48477,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -46222,7 +48529,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -46284,7 +48591,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -46330,7 +48637,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -46419,6 +48726,12 @@ "description_kind": "plain", "computed": true }, + "instance_group_manager_id": { + "type": "number", + "description": "The unique identifier number for the resource. This identifier is defined by the server.", + "description_kind": "plain", + "computed": true + }, "list_managed_instances_results": { "type": "string", "description": "Pagination behavior of the listManagedInstances API method for this managed instance group. Valid values are: \"PAGELESS\", \"PAGINATED\". If PAGELESS (default), Pagination is disabled for the group's listManagedInstances API method. maxResults and pageToken query parameters are ignored and all instances are returned in a single response. If PAGINATED, pagination is enabled, maxResults and pageToken query parameters are respected.", @@ -46518,6 +48831,20 @@ "optional": true, "computed": true }, + "target_stopped_size": { + "type": "number", + "description": "The target number of stopped instances for this managed instance group.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "target_suspended_size": { + "type": "number", + "description": "The target number of suspended instances for this managed instance group.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "wait_for_instances": { "type": "bool", "description": "Whether to wait for all instances to be created/updated before returning. Note that if this is set to true and the operation does not succeed, Terraform will continue trying until it times out.", @@ -46582,6 +48909,46 @@ }, "max_items": 1 }, + "instance_flexibility_policy": { + "nesting_mode": "list", + "block": { + "block_types": { + "instance_selections": { + "nesting_mode": "set", + "block": { + "attributes": { + "machine_types": { + "type": [ + "set", + "string" + ], + "description": "Full machine-type names, e.g. \"n1-standard-16\"", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Instance selection name.", + "description_kind": "plain", + "required": true + }, + "rank": { + "type": "number", + "description": "Preference of this instance selection. Lower number means higher preference. MIG will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Named instance selections configuring properties that the group will use when creating new VMs.", + "description_kind": "plain" + } + } + }, + "description": "The flexibility policy for this managed instance group. Instance flexibility allowing MIG to create VMs from multiple types of machines. Instance flexibility configuration on MIG overrides instance template configuration.", + "description_kind": "plain" + }, + "max_items": 1 + }, "instance_lifecycle_policy": { "nesting_mode": "list", "block": { @@ -46625,6 +48992,30 @@ "description_kind": "plain" } }, + "standby_policy": { + "nesting_mode": "list", + "block": { + "attributes": { + "initial_delay_sec": { + "type": "number", + "description": "Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "mode": { + "type": "string", + "description": "Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is \"MANUAL\".", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Standby policy for stopped and suspended instances.", + "description_kind": "plain" + }, + "max_items": 1 + }, "stateful_disk": { "nesting_mode": "set", "block": { @@ -46837,6 +49228,12 @@ "description_kind": "plain", "optional": true }, + "creation_timestamp": { + "type": "string", + "description": "The time at which the instance was created in RFC 3339 format.", + "description_kind": "plain", + "computed": true + }, "description": { "type": "string", "description": "A brief description of this resource.", @@ -46864,6 +49261,12 @@ "description_kind": "plain", "optional": true }, + "key_revocation_action_type": { + "type": "string", + "description": "Action to be taken when a customer's encryption key is revoked. Supports \"STOP\" and \"NONE\", with \"NONE\" being the default.", + "description_kind": "plain", + "optional": true + }, "labels": { "type": [ "map", @@ -46994,12 +49397,30 @@ "description_kind": "plain", "optional": true }, + "enable_uefi_networking": { + "type": "bool", + "description": "Whether to enable UEFI networking or not.", + "description_kind": "plain", + "optional": true + }, + "performance_monitoring_unit": { + "type": "string", + "description": "The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are \"STANDARD\", \"ENHANCED\", and \"ARCHITECTURAL\".", + "description_kind": "plain", + "optional": true + }, "threads_per_core": { "type": "number", "description": "The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.", "description_kind": "plain", "optional": true }, + "turbo_mode": { + "type": "string", + "description": "Turbo frequency mode to use for the instance. Currently supported modes is \"ALL_CORE_MAX\".", + "description_kind": "plain", + "optional": true + }, "visible_core_count": { "type": "number", "description": "The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\\'s nominal CPU count and the underlying platform\\'s SMT width.", @@ -47018,7 +49439,7 @@ "attributes": { "confidential_instance_type": { "type": "string", - "description": "\n\t\t\t\t\t\t\t\tSpecifies which confidential computing technology to use.\n\t\t\t\t\t\t\t\tThis could be one of the following values: SEV, SEV_SNP.\n\t\t\t\t\t\t\t\tIf SEV_SNP, min_cpu_platform = \"AMD Milan\" is currently required.", + "description": "\n\t\t\t\t\t\t\t\tThe confidential computing technology the instance uses.\n\t\t\t\t\t\t\t\tSEV is an AMD feature. TDX is an Intel feature. One of the following\n\t\t\t\t\t\t\t\tvalues is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform =\n\t\t\t\t\t\t\t\t\"AMD Milan\" is currently required.", "description_kind": "plain", "optional": true }, @@ -47103,7 +49524,14 @@ }, "provisioned_iops": { "type": "number", - "description": "Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk).", + "description": "Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk) or the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks) depending on the selected disk_type.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "provisioned_throughput": { + "type": "number", + "description": "Indicates how much throughput to provision for the disk, in MB/s. This sets the amount of data that can be read or written from the disk per second. Values must greater than or equal to 1. For more details, see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks).", "description_kind": "plain", "optional": true, "computed": true @@ -47482,6 +49910,12 @@ "description_kind": "plain", "optional": true }, + "availability_domain": { + "type": "number", + "description": "Specifies the availability domain, which this instance should be scheduled on.", + "description_kind": "plain", + "optional": true + }, "instance_termination_action": { "type": "string", "description": "Specifies the action GCE should take when SPOT VM is preempted.", @@ -47706,6 +50140,12 @@ "description_kind": "plain", "optional": true }, + "network_endpoint_id": { + "type": "number", + "description": "The unique identifier number for the resource. This identifier is defined by the server.", + "description_kind": "plain", + "computed": true + }, "port": { "type": "number", "description": "Port number of network endpoint.", @@ -47898,6 +50338,22 @@ }, "max_items": 1 }, + "psc_data": { + "nesting_mode": "list", + "block": { + "attributes": { + "producer_port": { + "type": "string", + "description": "The PSC producer port to use when consumer PSC NEG connects to a producer. If\nthis flag isn't specified for a PSC NEG with endpoint type\nprivate-service-connect, then PSC NEG will be connected to a first port in the\navailable PSC producer port range.", + "description_kind": "plain", + "optional": true + } + }, + "description": "This field is only used for PSC NEGs.", + "description_kind": "plain" + }, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -48032,7 +50488,7 @@ }, "firewall_policy": { "type": "string", - "description": "The firewall policy ID of the association.", + "description": "The firewall policy of the resource.", "description_kind": "plain", "required": true }, @@ -48050,7 +50506,6 @@ }, "project": { "type": "string", - "description": "The project for the resource", "description_kind": "plain", "optional": true, "computed": true @@ -48102,6 +50557,12 @@ "description_kind": "plain", "required": true }, + "creation_timestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format.", + "description_kind": "plain", + "computed": true + }, "description": { "type": "string", "description": "An optional description for this resource.", @@ -48110,19 +50571,19 @@ }, "direction": { "type": "string", - "description": "The direction in which this rule applies. Possible values: INGRESS, EGRESS", + "description": "The direction in which this rule applies. Possible values: [\"INGRESS\", \"EGRESS\"]", "description_kind": "plain", "required": true }, "disabled": { "type": "bool", - "description": "Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled.", + "description": "Denotes whether the firewall policy rule is disabled.\nWhen set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist.\nIf this is unspecified, the firewall policy rule will be enabled.", "description_kind": "plain", "optional": true }, "enable_logging": { "type": "bool", - "description": "Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on \"goto_next\" rules.", + "description": "Denotes whether to enable logging for a particular rule.\nIf logging is enabled, logs will be exported to the configured export destination in Stackdriver.\nLogs may be exported to BigQuery or Pub/Sub.\nNote: you cannot enable logging on \"goto_next\" rules.", "description_kind": "plain", "optional": true }, @@ -48140,19 +50601,18 @@ }, "kind": { "type": "string", - "description": "Type of the resource. Always `compute#firewallPolicyRule` for firewall policy rules", + "description": "Type of the resource. Always 'compute#firewallPolicyRule' for firewall policy rules", "description_kind": "plain", "computed": true }, "priority": { "type": "number", - "description": "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", + "description": "An integer indicating the priority of a rule in the list.\nThe priority must be a positive value between 0 and 2147483647.\nRules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", "description_kind": "plain", "required": true }, "project": { "type": "string", - "description": "The project for the resource", "description_kind": "plain", "optional": true, "computed": true @@ -48178,7 +50638,7 @@ }, "security_profile_group": { "type": "string", - "description": "A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.", + "description": "A fully-qualified URL of a SecurityProfile resource instance.\nExample: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group\nMust be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.\n\nSecurity Profile Group and Firewall Policy Rule must be in the same scope.", "description_kind": "plain", "optional": true }, @@ -48193,7 +50653,7 @@ }, "tls_inspect": { "type": "bool", - "description": "Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions.", + "description": "Boolean flag indicating if the traffic should be TLS decrypted.\nCan be set only if action = 'apply_security_profile_group' and cannot be set for other actions.", "description_kind": "plain", "optional": true } @@ -48208,7 +50668,7 @@ "list", "string" ], - "description": "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules.", + "description": "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10.", "description_kind": "plain", "optional": true }, @@ -48217,7 +50677,7 @@ "list", "string" ], - "description": "Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress.", + "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100.", "description_kind": "plain", "optional": true }, @@ -48235,7 +50695,7 @@ "list", "string" ], - "description": "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress.", + "description": "Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of dest region codes allowed is 5000.", "description_kind": "plain", "optional": true }, @@ -48244,7 +50704,7 @@ "list", "string" ], - "description": "Name of the Google Cloud Threat Intelligence list.", + "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination.", "description_kind": "plain", "optional": true }, @@ -48253,7 +50713,7 @@ "list", "string" ], - "description": "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules.", + "description": "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10.", "description_kind": "plain", "optional": true }, @@ -48262,7 +50722,7 @@ "list", "string" ], - "description": "Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress.", + "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100.", "description_kind": "plain", "optional": true }, @@ -48280,7 +50740,7 @@ "list", "string" ], - "description": "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress.", + "description": "Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of source region codes allowed is 5000.", "description_kind": "plain", "optional": true }, @@ -48289,7 +50749,7 @@ "list", "string" ], - "description": "Name of the Google Cloud Threat Intelligence list.", + "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source.", "description_kind": "plain", "optional": true } @@ -48301,7 +50761,7 @@ "attributes": { "ip_protocol": { "type": "string", - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (`tcp`, `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol number.", + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule.\nThis value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", "description_kind": "plain", "required": true }, @@ -48310,7 +50770,7 @@ "list", "string" ], - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``.", + "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", "description_kind": "plain", "optional": true } @@ -48326,18 +50786,18 @@ "attributes": { "name": { "type": "string", - "description": "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", + "description": "Name of the secure tag, created with TagManager's TagValue API.", "description_kind": "plain", - "required": true + "optional": true }, "state": { "type": "string", - "description": "[Output Only] State of the secure tag, either `EFFECTIVE` or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted or its network is deleted.", + "description": "State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted.", "description_kind": "plain", "computed": true } }, - "description": "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", + "description": "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", "description_kind": "plain" } } @@ -48354,18 +50814,18 @@ "attributes": { "name": { "type": "string", - "description": "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", + "description": "Name of the secure tag, created with TagManager's TagValue API.", "description_kind": "plain", - "required": true + "optional": true }, "state": { "type": "string", - "description": "[Output Only] State of the secure tag, either `EFFECTIVE` or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted or its network is deleted.", + "description": "State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted.", "description_kind": "plain", "computed": true } }, - "description": "A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256.", + "description": "A list of secure tags that controls which instances the firewall rule applies to.\nIf targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored.\ntargetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256.", "description_kind": "plain" } }, @@ -48845,6 +51305,12 @@ "description_kind": "plain", "optional": true }, + "http_keep_alive_timeout_sec": { + "type": "number", + "description": "Specifies how long to keep a connection open, after completing a response,\nwhile there is no matching traffic (in seconds). If an HTTP keepalive is\nnot specified, a default value (600 seconds) will be used. For Regional\nHTTP(S) load balancer, the minimum allowed value is 5 seconds and the\nmaximum allowed value is 600 seconds.", + "description_kind": "plain", + "optional": true + }, "id": { "type": "string", "description_kind": "plain", @@ -48941,6 +51407,12 @@ "description_kind": "plain", "optional": true }, + "http_keep_alive_timeout_sec": { + "type": "number", + "description": "Specifies how long to keep a connection open, after completing a response,\nwhile there is no matching traffic (in seconds). If an HTTP keepalive is\nnot specified, a default value (600 seconds) will be used. For Regioanl\nHTTP(S) load balancer, the minimum allowed value is 5 seconds and the\nmaximum allowed value is 600 seconds.", + "description_kind": "plain", + "optional": true + }, "id": { "type": "string", "description_kind": "plain", @@ -51187,7 +53659,7 @@ }, "instance_group_manager": { "type": "string", - "description": "The name of the managed instance group. The name should conform to RFC1035 or be a resource ID.\nAuthorization requires the following IAM permission on the specified resource instanceGroupManager:\n*compute.instanceGroupManagers.update", + "description": "The reference of the instance group manager this ResizeRequest is a part of.", "description_kind": "plain", "required": true }, @@ -51211,7 +53683,7 @@ }, "state": { "type": "string", - "description": "[Output only] Current state of the request.", + "description": "Current state of the request.", "description_kind": "plain", "computed": true }, @@ -51404,13 +53876,13 @@ } ] ], - "description": "[Output only] Status of the request.", + "description": "Status of the request.", "description_kind": "plain", "computed": true }, "zone": { "type": "string", - "description": "Name of the compute zone scoping this request. Name should conform to RFC1035.", + "description": "The reference of the compute zone scoping this request.", "description_kind": "plain", "required": true } @@ -51428,7 +53900,7 @@ }, "seconds": { "type": "string", - "description": "Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years", + "description": "Span of time at a resolution of a second. Must be from 600 to 604800 inclusive. Note: minimum and maximum allowed range for requestedRunDuration is 10 minutes (600 seconds) and 7 days(604800 seconds) correspondingly.", "description_kind": "plain", "required": true } @@ -52200,7 +54672,8 @@ ], "description": "A list of URLs of the IP resources to be drained. These IPs must be\nvalid static external IPs that have been assigned to the NAT.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "enable_dynamic_port_allocation": { "type": "bool", @@ -52238,6 +54711,15 @@ "optional": true, "computed": true }, + "initial_nat_ips": { + "type": [ + "set", + "string" + ], + "description": "Self-links of NAT IPs to be used as initial value for creation alongside a RouterNatAddress resource.\nConflicts with natIps and drainNatIps. Only valid if natIpAllocateOption is set to MANUAL_ONLY.", + "description_kind": "plain", + "optional": true + }, "max_ports_per_vm": { "type": "number", "description": "Maximum number of ports allocated to a VM from this NAT.\nThis field can only be set when enableDynamicPortAllocation is enabled.", @@ -52268,9 +54750,10 @@ "set", "string" ], - "description": "Self-links of NAT IPs. Only valid if natIpAllocateOption\nis set to MANUAL_ONLY.", + "description": "Self-links of NAT IPs. Only valid if natIpAllocateOption\nis set to MANUAL_ONLY.\nIf this field is used alongside with a count created list of address resources 'google_compute_address.foobar.*.self_link',\nthe access level resource for the address resource must have a 'lifecycle' block with 'create_before_destroy = true' so\nthe number of resources can be increased/decreased without triggering the 'resourceInUseByAnotherResource' error.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "project": { "type": "string", @@ -52462,53 +54945,16 @@ "description_kind": "plain" } }, - "google_compute_router_peer": { + "google_compute_router_nat_address": { "version": 0, "block": { "attributes": { - "advertise_mode": { - "type": "string", - "description": "User-specified flag to indicate which mode to use for advertisement.\nValid values of this enum field are: 'DEFAULT', 'CUSTOM' Default value: \"DEFAULT\" Possible values: [\"DEFAULT\", \"CUSTOM\"]", - "description_kind": "plain", - "optional": true - }, - "advertised_groups": { + "drain_nat_ips": { "type": [ - "list", + "set", "string" ], - "description": "User-specified list of prefix groups to advertise in custom\nmode, which currently supports the following option:\n\n* 'ALL_SUBNETS': Advertises all of the router's own VPC subnets.\nThis excludes any routes learned for subnets that use VPC Network\nPeering.\n\n\nNote that this field can only be populated if advertiseMode is 'CUSTOM'\nand overrides the list defined for the router (in the \"bgp\" message).\nThese groups are advertised in addition to any specified prefixes.\nLeave this field blank to advertise no custom groups.", - "description_kind": "plain", - "optional": true - }, - "advertised_route_priority": { - "type": "number", - "description": "The priority of routes advertised to this BGP peer.\nWhere there is more than one matching route of maximum\nlength, the routes with the lowest priority value win.", - "description_kind": "plain", - "optional": true - }, - "custom_learned_route_priority": { - "type": "number", - "description": "The user-defined custom learned route priority for a BGP session.\nThis value is applied to all custom learned route ranges for the session. You can choose a value\nfrom 0 to 65335. If you don't provide a value, Google Cloud assigns a priority of 100 to the ranges.", - "description_kind": "plain", - "optional": true - }, - "enable": { - "type": "bool", - "description": "The status of the BGP peer connection. If set to false, any active session\nwith the peer is terminated and all associated routing information is removed.\nIf set to true, the peer connection can be established with routing information.\nThe default is true.", - "description_kind": "plain", - "optional": true - }, - "enable_ipv4": { - "type": "bool", - "description": "Enable IPv4 traffic over BGP Peer. It is enabled by default if the peerIpAddress is version 4.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "enable_ipv6": { - "type": "bool", - "description": "Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default.", + "description": "A list of URLs of the IP resources to be drained. These IPs must be\nvalid static external IPs that have been assigned to the NAT.", "description_kind": "plain", "optional": true }, @@ -52518,72 +54964,15 @@ "optional": true, "computed": true }, - "interface": { - "type": "string", - "description": "Name of the interface the BGP peer is associated with.", - "description_kind": "plain", - "required": true - }, - "ip_address": { - "type": "string", - "description": "IP address of the interface inside Google Cloud Platform.\nOnly IPv4 is supported.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "ipv4_nexthop_address": { - "type": "string", - "description": "IPv4 address of the interface inside Google Cloud Platform.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "ipv6_nexthop_address": { - "type": "string", - "description": "IPv6 address of the interface inside Google Cloud Platform.\nThe address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64.\nIf you do not specify the next hop addresses, Google Cloud automatically\nassigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "management_type": { - "type": "string", - "description": "The resource that configures and manages this BGP peer.\n\n* 'MANAGED_BY_USER' is the default value and can be managed by\nyou or other users\n* 'MANAGED_BY_ATTACHMENT' is a BGP peer that is configured and\nmanaged by Cloud Interconnect, specifically by an\nInterconnectAttachment of type PARTNER. Google automatically\ncreates, updates, and deletes this type of BGP peer when the\nPARTNER InterconnectAttachment is created, updated,\nor deleted.", - "description_kind": "plain", - "computed": true - }, - "name": { - "type": "string", - "description": "Name of this BGP peer. The name must be 1-63 characters long,\nand comply with RFC1035. Specifically, the name must be 1-63 characters\nlong and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which\nmeans the first character must be a lowercase letter, and all\nfollowing characters must be a dash, lowercase letter, or digit,\nexcept the last character, which cannot be a dash.", - "description_kind": "plain", - "required": true - }, - "peer_asn": { - "type": "number", - "description": "Peer BGP Autonomous System Number (ASN).\nEach BGP interface may use a different value.", + "nat_ips": { + "type": [ + "set", + "string" + ], + "description": "Self-links of NAT IPs to be used in a Nat service. Only valid if the referenced RouterNat\nnatIpAllocateOption is set to MANUAL_ONLY.", "description_kind": "plain", "required": true }, - "peer_ip_address": { - "type": "string", - "description": "IP address of the BGP interface outside Google Cloud Platform.\nOnly IPv4 is supported. Required if 'ip_address' is set.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "peer_ipv4_nexthop_address": { - "type": "string", - "description": "IPv4 address of the BGP interface outside Google Cloud Platform.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "peer_ipv6_nexthop_address": { - "type": "string", - "description": "IPv6 address of the BGP interface outside Google Cloud Platform.\nThe address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64.\nIf you do not specify the next hop addresses, Google Cloud automatically\nassigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "project": { "type": "string", "description_kind": "plain", @@ -52592,118 +54981,294 @@ }, "region": { "type": "string", - "description": "Region where the router and BgpPeer reside.\nIf it is not provided, the provider region is used.", + "description": "Region where the NAT service reside.", "description_kind": "plain", "optional": true, "computed": true }, "router": { "type": "string", - "description": "The name of the Cloud Router in which this BgpPeer will be configured.", + "description": "The name of the Cloud Router in which the referenced NAT service is configured.", "description_kind": "plain", "required": true }, - "router_appliance_instance": { + "router_nat": { "type": "string", - "description": "The URI of the VM instance that is used as third-party router appliances\nsuch as Next Gen Firewalls, Virtual Routers, or Router Appliances.\nThe VM instance must be located in zones contained in the same region as\nthis Cloud Router. The VM instance is the peer side of the BGP session.", + "description": "The name of the Nat service in which this address will be configured.", "description_kind": "plain", - "optional": true + "required": true } }, "block_types": { - "advertised_ip_ranges": { - "nesting_mode": "list", - "block": { - "attributes": { - "description": { - "type": "string", - "description": "User-specified description for the IP range.", - "description_kind": "plain", - "optional": true - }, - "range": { - "type": "string", - "description": "The IP range to advertise. The value must be a\nCIDR-formatted string.", - "description_kind": "plain", - "required": true - } - }, - "description": "User-specified list of individual IP ranges to advertise in\ncustom mode. This field can only be populated if advertiseMode\nis 'CUSTOM' and is advertised to all peers of the router. These IP\nranges will be advertised in addition to any specified groups.\nLeave this field blank to advertise no custom IP ranges.", - "description_kind": "plain" - } - }, - "bfd": { - "nesting_mode": "list", - "block": { - "attributes": { - "min_receive_interval": { - "type": "number", - "description": "The minimum interval, in milliseconds, between BFD control packets\nreceived from the peer router. The actual value is negotiated\nbetween the two routers and is equal to the greater of this value\nand the transmit interval of the other router. If set, this value\nmust be between 1000 and 30000.", - "description_kind": "plain", - "optional": true - }, - "min_transmit_interval": { - "type": "number", - "description": "The minimum interval, in milliseconds, between BFD control packets\ntransmitted to the peer router. The actual value is negotiated\nbetween the two routers and is equal to the greater of this value\nand the corresponding receive interval of the other router. If set,\nthis value must be between 1000 and 30000.", - "description_kind": "plain", - "optional": true - }, - "multiplier": { - "type": "number", - "description": "The number of consecutive BFD packets that must be missed before\nBFD declares that a peer is unavailable. If set, the value must\nbe a value between 5 and 16.", - "description_kind": "plain", - "optional": true - }, - "session_initialization_mode": { - "type": "string", - "description": "The BFD session initialization mode for this BGP peer.\nIf set to 'ACTIVE', the Cloud Router will initiate the BFD session\nfor this BGP peer. If set to 'PASSIVE', the Cloud Router will wait\nfor the peer router to initiate the BFD session for this BGP peer.\nIf set to 'DISABLED', BFD is disabled for this BGP peer. Possible values: [\"ACTIVE\", \"DISABLED\", \"PASSIVE\"]", - "description_kind": "plain", - "required": true - } - }, - "description": "BFD configuration for the BGP peering.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "custom_learned_ip_ranges": { - "nesting_mode": "list", - "block": { - "attributes": { - "range": { - "type": "string", - "description": "The IP range to advertise. The value must be a\nCIDR-formatted string.", - "description_kind": "plain", - "required": true - } - }, - "description": "The custom learned route IP address range. Must be a valid CIDR-formatted prefix. If an \nIP address is provided without a subnet mask, it is interpreted as, for IPv4, a /32 singular IP address range, and, for IPv6, /128.", - "description_kind": "plain" - } - }, - "md5_authentication_key": { - "nesting_mode": "list", - "block": { - "attributes": { - "key": { - "type": "string", - "description": "Value of the key.", - "description_kind": "plain", - "required": true, - "sensitive": true - }, - "name": { - "type": "string", - "description": "[REQUIRED] Name used to identify the key.\nMust be unique within a router. Must be referenced by exactly one bgpPeer. Must comply with RFC1035.", - "description_kind": "plain", - "required": true - } - }, - "description": "Present if MD5 authentication is enabled for the peering. Must be the name\nof one of the entries in the Router.md5_authentication_keys. The field must comply with RFC1035.", - "description_kind": "plain" - }, - "max_items": 1 - }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_compute_router_peer": { + "version": 0, + "block": { + "attributes": { + "advertise_mode": { + "type": "string", + "description": "User-specified flag to indicate which mode to use for advertisement.\nValid values of this enum field are: 'DEFAULT', 'CUSTOM' Default value: \"DEFAULT\" Possible values: [\"DEFAULT\", \"CUSTOM\"]", + "description_kind": "plain", + "optional": true + }, + "advertised_groups": { + "type": [ + "list", + "string" + ], + "description": "User-specified list of prefix groups to advertise in custom\nmode, which currently supports the following option:\n\n* 'ALL_SUBNETS': Advertises all of the router's own VPC subnets.\nThis excludes any routes learned for subnets that use VPC Network\nPeering.\n\n\nNote that this field can only be populated if advertiseMode is 'CUSTOM'\nand overrides the list defined for the router (in the \"bgp\" message).\nThese groups are advertised in addition to any specified prefixes.\nLeave this field blank to advertise no custom groups.", + "description_kind": "plain", + "optional": true + }, + "advertised_route_priority": { + "type": "number", + "description": "The priority of routes advertised to this BGP peer.\nWhere there is more than one matching route of maximum\nlength, the routes with the lowest priority value win.", + "description_kind": "plain", + "optional": true + }, + "custom_learned_route_priority": { + "type": "number", + "description": "The user-defined custom learned route priority for a BGP session.\nThis value is applied to all custom learned route ranges for the session. You can choose a value\nfrom 0 to 65335. If you don't provide a value, Google Cloud assigns a priority of 100 to the ranges.", + "description_kind": "plain", + "optional": true + }, + "enable": { + "type": "bool", + "description": "The status of the BGP peer connection. If set to false, any active session\nwith the peer is terminated and all associated routing information is removed.\nIf set to true, the peer connection can be established with routing information.\nThe default is true.", + "description_kind": "plain", + "optional": true + }, + "enable_ipv4": { + "type": "bool", + "description": "Enable IPv4 traffic over BGP Peer. It is enabled by default if the peerIpAddress is version 4.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "enable_ipv6": { + "type": "bool", + "description": "Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "interface": { + "type": "string", + "description": "Name of the interface the BGP peer is associated with.", + "description_kind": "plain", + "required": true + }, + "ip_address": { + "type": "string", + "description": "IP address of the interface inside Google Cloud Platform.\nOnly IPv4 is supported.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "ipv4_nexthop_address": { + "type": "string", + "description": "IPv4 address of the interface inside Google Cloud Platform.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "ipv6_nexthop_address": { + "type": "string", + "description": "IPv6 address of the interface inside Google Cloud Platform.\nThe address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64.\nIf you do not specify the next hop addresses, Google Cloud automatically\nassigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "management_type": { + "type": "string", + "description": "The resource that configures and manages this BGP peer.\n\n* 'MANAGED_BY_USER' is the default value and can be managed by\nyou or other users\n* 'MANAGED_BY_ATTACHMENT' is a BGP peer that is configured and\nmanaged by Cloud Interconnect, specifically by an\nInterconnectAttachment of type PARTNER. Google automatically\ncreates, updates, and deletes this type of BGP peer when the\nPARTNER InterconnectAttachment is created, updated,\nor deleted.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "Name of this BGP peer. The name must be 1-63 characters long,\nand comply with RFC1035. Specifically, the name must be 1-63 characters\nlong and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which\nmeans the first character must be a lowercase letter, and all\nfollowing characters must be a dash, lowercase letter, or digit,\nexcept the last character, which cannot be a dash.", + "description_kind": "plain", + "required": true + }, + "peer_asn": { + "type": "number", + "description": "Peer BGP Autonomous System Number (ASN).\nEach BGP interface may use a different value.", + "description_kind": "plain", + "required": true + }, + "peer_ip_address": { + "type": "string", + "description": "IP address of the BGP interface outside Google Cloud Platform.\nOnly IPv4 is supported. Required if 'ip_address' is set.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "peer_ipv4_nexthop_address": { + "type": "string", + "description": "IPv4 address of the BGP interface outside Google Cloud Platform.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "peer_ipv6_nexthop_address": { + "type": "string", + "description": "IPv6 address of the BGP interface outside Google Cloud Platform.\nThe address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64.\nIf you do not specify the next hop addresses, Google Cloud automatically\nassigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "region": { + "type": "string", + "description": "Region where the router and BgpPeer reside.\nIf it is not provided, the provider region is used.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "router": { + "type": "string", + "description": "The name of the Cloud Router in which this BgpPeer will be configured.", + "description_kind": "plain", + "required": true + }, + "router_appliance_instance": { + "type": "string", + "description": "The URI of the VM instance that is used as third-party router appliances\nsuch as Next Gen Firewalls, Virtual Routers, or Router Appliances.\nThe VM instance must be located in zones contained in the same region as\nthis Cloud Router. The VM instance is the peer side of the BGP session.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "advertised_ip_ranges": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "User-specified description for the IP range.", + "description_kind": "plain", + "optional": true + }, + "range": { + "type": "string", + "description": "The IP range to advertise. The value must be a\nCIDR-formatted string.", + "description_kind": "plain", + "required": true + } + }, + "description": "User-specified list of individual IP ranges to advertise in\ncustom mode. This field can only be populated if advertiseMode\nis 'CUSTOM' and is advertised to all peers of the router. These IP\nranges will be advertised in addition to any specified groups.\nLeave this field blank to advertise no custom IP ranges.", + "description_kind": "plain" + } + }, + "bfd": { + "nesting_mode": "list", + "block": { + "attributes": { + "min_receive_interval": { + "type": "number", + "description": "The minimum interval, in milliseconds, between BFD control packets\nreceived from the peer router. The actual value is negotiated\nbetween the two routers and is equal to the greater of this value\nand the transmit interval of the other router. If set, this value\nmust be between 1000 and 30000.", + "description_kind": "plain", + "optional": true + }, + "min_transmit_interval": { + "type": "number", + "description": "The minimum interval, in milliseconds, between BFD control packets\ntransmitted to the peer router. The actual value is negotiated\nbetween the two routers and is equal to the greater of this value\nand the corresponding receive interval of the other router. If set,\nthis value must be between 1000 and 30000.", + "description_kind": "plain", + "optional": true + }, + "multiplier": { + "type": "number", + "description": "The number of consecutive BFD packets that must be missed before\nBFD declares that a peer is unavailable. If set, the value must\nbe a value between 5 and 16.", + "description_kind": "plain", + "optional": true + }, + "session_initialization_mode": { + "type": "string", + "description": "The BFD session initialization mode for this BGP peer.\nIf set to 'ACTIVE', the Cloud Router will initiate the BFD session\nfor this BGP peer. If set to 'PASSIVE', the Cloud Router will wait\nfor the peer router to initiate the BFD session for this BGP peer.\nIf set to 'DISABLED', BFD is disabled for this BGP peer. Possible values: [\"ACTIVE\", \"DISABLED\", \"PASSIVE\"]", + "description_kind": "plain", + "required": true + } + }, + "description": "BFD configuration for the BGP peering.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "custom_learned_ip_ranges": { + "nesting_mode": "list", + "block": { + "attributes": { + "range": { + "type": "string", + "description": "The IP range to learn. The value must be a\nCIDR-formatted string.", + "description_kind": "plain", + "required": true + } + }, + "description": "The custom learned route IP address range. Must be a valid CIDR-formatted prefix. If an \nIP address is provided without a subnet mask, it is interpreted as, for IPv4, a /32 singular IP address range, and, for IPv6, /128.", + "description_kind": "plain" + } + }, + "md5_authentication_key": { + "nesting_mode": "list", + "block": { + "attributes": { + "key": { + "type": "string", + "description": "Value of the key.", + "description_kind": "plain", + "required": true, + "sensitive": true + }, + "name": { + "type": "string", + "description": "[REQUIRED] Name used to identify the key.\nMust be unique within a router. Must be referenced by exactly one bgpPeer. Must comply with RFC1035.", + "description_kind": "plain", + "required": true + } + }, + "description": "Present if MD5 authentication is enabled for the peering. Must be the name\nof one of the entries in the Router.md5_authentication_keys. The field must comply with RFC1035.", + "description_kind": "plain" + }, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -52803,6 +55368,86 @@ "computed": true } }, + "block_types": { + "threshold_configs": { + "nesting_mode": "list", + "block": { + "attributes": { + "auto_deploy_confidence_threshold": { + "type": "number", + "description_kind": "plain", + "optional": true + }, + "auto_deploy_expiration_sec": { + "type": "number", + "description_kind": "plain", + "optional": true + }, + "auto_deploy_impacted_baseline_threshold": { + "type": "number", + "description_kind": "plain", + "optional": true + }, + "auto_deploy_load_threshold": { + "type": "number", + "description_kind": "plain", + "optional": true + }, + "detection_absolute_qps": { + "type": "number", + "description_kind": "plain", + "optional": true + }, + "detection_load_threshold": { + "type": "number", + "description_kind": "plain", + "optional": true + }, + "detection_relative_to_baseline_qps": { + "type": "number", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "traffic_granularity_configs": { + "nesting_mode": "list", + "block": { + "attributes": { + "enable_each_unique_value": { + "type": "bool", + "description": "If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if value is empty.", + "description_kind": "plain", + "optional": true + }, + "type": { + "type": "string", + "description": "Type of this configuration.", + "description_kind": "plain", + "required": true + }, + "value": { + "type": "string", + "description": "Requests that match this value constitute a granular traffic unit.", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description": "Configuration options for layer7 adaptive protection for various customizable thresholds.", + "description_kind": "plain" + } + } + }, "description": "Layer 7 DDoS Defense Config of this security policy", "description_kind": "plain" }, @@ -53040,6 +55685,126 @@ "min_items": 1, "max_items": 1 }, + "preconfigured_waf_config": { + "nesting_mode": "list", + "block": { + "block_types": { + "exclusion": { + "nesting_mode": "list", + "block": { + "attributes": { + "target_rule_ids": { + "type": [ + "set", + "string" + ], + "description": "A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set.", + "description_kind": "plain", + "optional": true + }, + "target_rule_set": { + "type": "string", + "description": "Target WAF rule set to apply the preconfigured WAF exclusion.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "request_cookie": { + "nesting_mode": "list", + "block": { + "attributes": { + "operator": { + "type": "string", + "description": "You can specify an exact match or a partial match by using a field operator and a field value. Available options: EQUALS: The operator matches if the field value equals the specified value. STARTS_WITH: The operator matches if the field value starts with the specified value. ENDS_WITH: The operator matches if the field value ends with the specified value. CONTAINS: The operator matches if the field value contains the specified value. EQUALS_ANY: The operator matches if the field value is any value.", + "description_kind": "plain", + "required": true + }, + "value": { + "type": "string", + "description": "A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation.", + "description_kind": "plain" + } + }, + "request_header": { + "nesting_mode": "list", + "block": { + "attributes": { + "operator": { + "type": "string", + "description": "You can specify an exact match or a partial match by using a field operator and a field value. Available options: EQUALS: The operator matches if the field value equals the specified value. STARTS_WITH: The operator matches if the field value starts with the specified value. ENDS_WITH: The operator matches if the field value ends with the specified value. CONTAINS: The operator matches if the field value contains the specified value. EQUALS_ANY: The operator matches if the field value is any value.", + "description_kind": "plain", + "required": true + }, + "value": { + "type": "string", + "description": "A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Request header whose value will be excluded from inspection during preconfigured WAF evaluation.", + "description_kind": "plain" + } + }, + "request_query_param": { + "nesting_mode": "list", + "block": { + "attributes": { + "operator": { + "type": "string", + "description": "You can specify an exact match or a partial match by using a field operator and a field value. Available options: EQUALS: The operator matches if the field value equals the specified value. STARTS_WITH: The operator matches if the field value starts with the specified value. ENDS_WITH: The operator matches if the field value ends with the specified value. CONTAINS: The operator matches if the field value contains the specified value. EQUALS_ANY: The operator matches if the field value is any value.", + "description_kind": "plain", + "required": true + }, + "value": { + "type": "string", + "description": "A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body.", + "description_kind": "plain" + } + }, + "request_uri": { + "nesting_mode": "list", + "block": { + "attributes": { + "operator": { + "type": "string", + "description": "You can specify an exact match or a partial match by using a field operator and a field value. Available options: EQUALS: The operator matches if the field value equals the specified value. STARTS_WITH: The operator matches if the field value starts with the specified value. ENDS_WITH: The operator matches if the field value ends with the specified value. CONTAINS: The operator matches if the field value contains the specified value. EQUALS_ANY: The operator matches if the field value is any value.", + "description_kind": "plain", + "required": true + }, + "value": { + "type": "string", + "description": "A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded.", + "description_kind": "plain" + } + } + }, + "description": "An exclusion to apply during preconfigured WAF evaluation.", + "description_kind": "plain" + } + } + }, + "description": "Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect.", + "description_kind": "plain" + }, + "max_items": 1 + }, "rate_limit_options": { "nesting_mode": "list", "block": { @@ -53098,6 +55863,27 @@ }, "max_items": 1 }, + "enforce_on_key_configs": { + "nesting_mode": "list", + "block": { + "attributes": { + "enforce_on_key_name": { + "type": "string", + "description": "Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value.", + "description_kind": "plain", + "optional": true + }, + "enforce_on_key_type": { + "type": "string", + "description": "Determines the key to enforce the rate_limit_threshold on", + "description_kind": "plain", + "optional": true + } + }, + "description": "Enforce On Key Config of this security policy", + "description_kind": "plain" + } + }, "exceed_redirect_options": { "nesting_mode": "list", "block": { @@ -53251,6 +56037,37 @@ } }, "block_types": { + "header_action": { + "nesting_mode": "list", + "block": { + "block_types": { + "request_headers_to_adds": { + "nesting_mode": "list", + "block": { + "attributes": { + "header_name": { + "type": "string", + "description": "The name of the header to set.", + "description_kind": "plain", + "optional": true + }, + "header_value": { + "type": "string", + "description": "The value to set the named header to.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The list of request headers to add or overwrite if they're already present.", + "description_kind": "plain" + } + } + }, + "description": "Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "description_kind": "plain" + }, + "max_items": 1 + }, "match": { "nesting_mode": "list", "block": { @@ -53592,6 +56409,28 @@ }, "max_items": 1 }, + "redirect_options": { + "nesting_mode": "list", + "block": { + "attributes": { + "target": { + "type": "string", + "description": "Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA.", + "description_kind": "plain", + "optional": true + }, + "type": { + "type": "string", + "description": "Type of the redirect action.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "description_kind": "plain" + }, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -53629,7 +56468,10 @@ [ "object", { + "consumer_network": "string", "endpoint": "string", + "propagated_connection_count": "number", + "psc_connection_id": "string", "status": "string" } ] @@ -53707,6 +56549,13 @@ "optional": true, "computed": true }, + "propagated_connection_limit": { + "type": "number", + "description": "The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center.\nThis limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer.\n\nIf the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list.\nIf the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint.\n\nIf unspecified, the default propagated connection limit is 250.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "reconcile_connections": { "type": "bool", "description": "This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints.\n\nIf false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified .\nIf true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list.", @@ -54516,9 +57365,10 @@ }, "ip_cidr_range": { "type": "string", - "description": "The range of internal addresses that are owned by this subnetwork.\nProvide this property when you create the subnetwork. For example,\n10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and\nnon-overlapping within a network. Only IPv4 is supported.", + "description": "The range of internal addresses that are owned by this subnetwork.\nProvide this property when you create the subnetwork. For example,\n10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and\nnon-overlapping within a network. Only IPv4 is supported.\nField is optional when 'reserved_internal_range' is defined, otherwise required.", "description_kind": "plain", - "required": true + "optional": true, + "computed": true }, "ipv6_access_type": { "type": "string", @@ -54566,7 +57416,7 @@ }, "purpose": { "type": "string", - "description": "The purpose of the resource. This field can be either 'PRIVATE_RFC_1918', 'REGIONAL_MANAGED_PROXY', 'GLOBAL_MANAGED_PROXY', 'PRIVATE_SERVICE_CONNECT' or 'PRIVATE_NAT'([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)).\nA subnet with purpose set to 'REGIONAL_MANAGED_PROXY' is a user-created subnetwork that is reserved for regional Envoy-based load balancers.\nA subnetwork in a given region with purpose set to 'GLOBAL_MANAGED_PROXY' is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers.\nA subnetwork with purpose set to 'PRIVATE_SERVICE_CONNECT' reserves the subnet for hosting a Private Service Connect published service.\nA subnetwork with purpose set to 'PRIVATE_NAT' is used as source range for Private NAT gateways.\nNote that 'REGIONAL_MANAGED_PROXY' is the preferred setting for all regional Envoy load balancers.\nIf unspecified, the purpose defaults to 'PRIVATE_RFC_1918'.", + "description": "The purpose of the resource. This field can be either 'PRIVATE', 'REGIONAL_MANAGED_PROXY', 'GLOBAL_MANAGED_PROXY', 'PRIVATE_SERVICE_CONNECT' or 'PRIVATE_NAT'([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)).\nA subnet with purpose set to 'REGIONAL_MANAGED_PROXY' is a user-created subnetwork that is reserved for regional Envoy-based load balancers.\nA subnetwork in a given region with purpose set to 'GLOBAL_MANAGED_PROXY' is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers.\nA subnetwork with purpose set to 'PRIVATE_SERVICE_CONNECT' reserves the subnet for hosting a Private Service Connect published service.\nA subnetwork with purpose set to 'PRIVATE_NAT' is used as source range for Private NAT gateways.\nNote that 'REGIONAL_MANAGED_PROXY' is the preferred setting for all regional Envoy load balancers.\nIf unspecified, the purpose defaults to 'PRIVATE'.", "description_kind": "plain", "optional": true, "computed": true @@ -54578,27 +57428,17 @@ "optional": true, "computed": true }, - "role": { + "reserved_internal_range": { "type": "string", - "description": "The role of subnetwork.\nCurrently, this field is only used when 'purpose' is 'REGIONAL_MANAGED_PROXY'.\nThe value can be set to 'ACTIVE' or 'BACKUP'.\nAn 'ACTIVE' subnetwork is one that is currently being used for Envoy-based load balancers in a region.\nA 'BACKUP' subnetwork is one that is ready to be promoted to 'ACTIVE' or is currently draining. Possible values: [\"ACTIVE\", \"BACKUP\"]", + "description": "The ID of the reserved internal range. Must be prefixed with 'networkconnectivity.googleapis.com'\nE.g. 'networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId}'", "description_kind": "plain", "optional": true }, - "secondary_ip_range": { - "type": [ - "list", - [ - "object", - { - "ip_cidr_range": "string", - "range_name": "string" - } - ] - ], - "description": "An array of configurations for secondary IP ranges for VM instances\ncontained in this subnetwork. The primary IP of such VM must belong\nto the primary ipCidrRange of the subnetwork. The alias IPs may belong\nto either primary or secondary ranges.\n\n**Note**: This field uses [attr-as-block mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html) to avoid\nbreaking users during the 0.12 upgrade. To explicitly send a list of zero objects,\nset 'send_secondary_ip_range_if_empty = true'", + "role": { + "type": "string", + "description": "The role of subnetwork.\nCurrently, this field is only used when 'purpose' is 'REGIONAL_MANAGED_PROXY'.\nThe value can be set to 'ACTIVE' or 'BACKUP'.\nAn 'ACTIVE' subnetwork is one that is currently being used for Envoy-based load balancers in a region.\nA 'BACKUP' subnetwork is one that is ready to be promoted to 'ACTIVE' or is currently draining. Possible values: [\"ACTIVE\", \"BACKUP\"]", "description_kind": "plain", - "optional": true, - "computed": true + "optional": true }, "self_link": { "type": "string", @@ -54617,6 +57457,12 @@ "description_kind": "plain", "optional": true, "computed": true + }, + "subnetwork_id": { + "type": "number", + "description": "The unique identifier number for the resource. This identifier is defined by the server.", + "description_kind": "plain", + "computed": true } }, "block_types": { @@ -54663,6 +57509,34 @@ }, "max_items": 1 }, + "secondary_ip_range": { + "nesting_mode": "list", + "block": { + "attributes": { + "ip_cidr_range": { + "type": "string", + "description": "The range of IP addresses belonging to this subnetwork secondary\nrange. Provide this property when you create the subnetwork.\nRanges must be unique and non-overlapping with all primary and\nsecondary IP ranges within a network. Only IPv4 is supported.\nField is optional when 'reserved_internal_range' is defined, otherwise required.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "range_name": { + "type": "string", + "description": "The name associated with this subnetwork secondary range, used\nwhen adding an alias IP range to a VM instance. The name must\nbe 1-63 characters long, and comply with RFC1035. The name\nmust be unique within the subnetwork.", + "description_kind": "plain", + "required": true + }, + "reserved_internal_range": { + "type": "string", + "description": "The ID of the reserved internal range. Must be prefixed with 'networkconnectivity.googleapis.com'\nE.g. 'networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId}'", + "description_kind": "plain", + "optional": true + } + }, + "description": "An array of configurations for secondary IP ranges for VM instances\ncontained in this subnetwork. The primary IP of such VM must belong\nto the primary ipCidrRange of the subnetwork. The alias IPs may belong\nto either primary or secondary ranges.\n\n**Note**: This field uses [attr-as-block mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html) to avoid\nbreaking users during the 0.12 upgrade. To explicitly send a list of zero objects,\nset 'send_secondary_ip_range_if_empty = true'", + "description_kind": "plain" + } + }, "timeouts": { "nesting_mode": "single", "block": { @@ -54988,7 +57862,7 @@ }, "http_keep_alive_timeout_sec": { "type": "number", - "description": "Specifies how long to keep a connection open, after completing a response,\nwhile there is no matching traffic (in seconds). If an HTTP keepalive is\nnot specified, a default value (610 seconds) will be used. For Global\nexternal HTTP(S) load balancer, the minimum allowed value is 5 seconds and\nthe maximum allowed value is 1200 seconds. For Global external HTTP(S)\nload balancer (classic), this option is not available publicly.", + "description": "Specifies how long to keep a connection open, after completing a response,\nwhile there is no matching traffic (in seconds). If an HTTP keepalive is\nnot specified, a default value will be used. For Global\nexternal HTTP(S) load balancer, the default value is 610 seconds, the\nminimum allowed value is 5 seconds and the maximum allowed value is 1200\nseconds. For cross-region internal HTTP(S) load balancer, the default\nvalue is 600 seconds, the minimum allowed value is 5 seconds, and the\nmaximum allowed value is 600 seconds. For Global external HTTP(S) load\nbalancer (classic), this option is not available publicly.", "description_kind": "plain", "optional": true }, @@ -55096,7 +57970,7 @@ }, "http_keep_alive_timeout_sec": { "type": "number", - "description": "Specifies how long to keep a connection open, after completing a response,\nwhile there is no matching traffic (in seconds). If an HTTP keepalive is\nnot specified, a default value (610 seconds) will be used. For Global\nexternal HTTP(S) load balancer, the minimum allowed value is 5 seconds and\nthe maximum allowed value is 1200 seconds. For Global external HTTP(S)\nload balancer (classic), this option is not available publicly.", + "description": "Specifies how long to keep a connection open, after completing a response,\nwhile there is no matching traffic (in seconds). If an HTTP keepalive is\nnot specified, a default value will be used. For Global\nexternal HTTP(S) load balancer, the default value is 610 seconds, the\nminimum allowed value is 5 seconds and the maximum allowed value is 1200\nseconds. For cross-region internal HTTP(S) load balancer, the default\nvalue is 600 seconds, the minimum allowed value is 5 seconds, and the\nmaximum allowed value is 600 seconds. For Global external HTTP(S) load\nbalancer (classic), this option is not available publicly.", "description_kind": "plain", "optional": true }, @@ -55144,7 +58018,7 @@ }, "server_tls_policy": { "type": "string", - "description": "A URL referring to a networksecurity.ServerTlsPolicy\nresource that describes how the proxy should authenticate inbound\ntraffic. serverTlsPolicy only applies to a global TargetHttpsProxy\nattached to globalForwardingRules with the loadBalancingScheme\nset to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED.\nFor details which ServerTlsPolicy resources are accepted with\nINTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED\nloadBalancingScheme consult ServerTlsPolicy documentation.\nIf left blank, communications are not encrypted.", + "description": "A URL referring to a networksecurity.ServerTlsPolicy\nresource that describes how the proxy should authenticate inbound\ntraffic. serverTlsPolicy only applies to a global TargetHttpsProxy\nattached to globalForwardingRules with the loadBalancingScheme\nset to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED.\nFor details which ServerTlsPolicy resources are accepted with\nINTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED\nloadBalancingScheme consult ServerTlsPolicy documentation.\nIf left blank, communications are not encrypted.\n\nIf you remove this field from your configuration at the same time as\ndeleting or recreating a referenced ServerTlsPolicy resource, you will\nreceive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy\nwithin the ServerTlsPolicy resource to avoid this.", "description_kind": "plain", "optional": true }, @@ -58698,7 +61572,7 @@ }, "deletion_policy": { "type": "string", - "description": "Policy to determine what flags to send on delete.", + "description": "Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS", "description_kind": "plain", "optional": true }, @@ -58992,6 +61866,23 @@ }, "max_items": 1 }, + "security_posture_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "vulnerability_mode": { + "type": "string", + "description": "Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. Possible values: [\"VULNERABILITY_DISABLED\", \"VULNERABILITY_ENTERPRISE\"]", + "description_kind": "plain", + "required": true + } + }, + "description": "Enable/Disable Security Posture API features for the cluster.", + "description_kind": "plain", + "deprecated": true + }, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -59862,6 +62753,42 @@ "min_items": 1, "max_items": 1 }, + "kubelet_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "cpu_cfs_quota": { + "type": "bool", + "description": "Whether or not to enable CPU CFS quota. Defaults to true.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "cpu_cfs_quota_period": { + "type": "string", + "description": "Optional. The CPU CFS quota period to use for the node. Defaults to \"100ms\".", + "description_kind": "plain", + "optional": true + }, + "cpu_manager_policy": { + "type": "string", + "description": "The CpuManagerPolicy to use for the node. Defaults to \"none\".", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "pod_pids_limit": { + "type": "number", + "description": "Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The kubelet configuration for the node pool.", + "description_kind": "plain" + }, + "max_items": 1 + }, "management": { "nesting_mode": "list", "block": { @@ -60803,6 +63730,15 @@ "description_kind": "plain", "optional": true }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, "enable_autopilot": { "type": "bool", "description": "Enable Autopilot for this cluster.", @@ -60815,6 +63751,12 @@ "description_kind": "plain", "optional": true }, + "enable_fqdn_network_policy": { + "type": "bool", + "description": "Whether FQDN Network Policy is enabled on this cluster.", + "description_kind": "plain", + "optional": true + }, "enable_intranode_visibility": { "type": "bool", "description": "Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.", @@ -60981,7 +63923,7 @@ "map", "string" ], - "description": "The GCE resource labels (a map of key/value pairs) to be applied to the cluster.", + "description": "The GCE resource labels (a map of key/value pairs) to be applied to the cluster.\n\n\t\t\t\t**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\n\t\t\t\tPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "optional": true }, @@ -61004,6 +63946,15 @@ "optional": true, "computed": true }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, "tpu_ipv4_cidr_block": { "type": "string", "description": "The IP address range of the Cloud TPUs in this cluster, in CIDR notation (e.g. 1.2.3.4/29).", @@ -61091,7 +64042,7 @@ "required": true } }, - "description": "The status of the Filestore CSI driver addon, which allows the usage of filestore instance as volumes. Defaults to disabled; set enabled = true to enable.", + "description": "The status of the Filestore CSI driver addon, which allows the usage of filestore instance as volumes. Defaults to disabled for Standard clusters; set enabled = true to enable. It is enabled by default for Autopilot clusters; set enabled = true to enable it explicitly.", "description_kind": "plain" }, "max_items": 1 @@ -61171,6 +64122,21 @@ }, "max_items": 1 }, + "parallelstore_csi_driver_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "enabled": { + "type": "bool", + "description_kind": "plain", + "required": true + } + }, + "description": "The status of the Parallelstore CSI driver addon, which allows the usage of Parallelstore instances as volumes. Defaults to disabled; set enabled = true to enable.", + "description_kind": "plain" + }, + "max_items": 1 + }, "ray_operator_config": { "nesting_mode": "list", "block": { @@ -61556,6 +64522,39 @@ }, "max_items": 1 }, + "control_plane_endpoints_config": { + "nesting_mode": "list", + "block": { + "block_types": { + "dns_endpoint_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "allow_external_traffic": { + "type": "bool", + "description": "Controls whether user traffic is allowed over this endpoint. Note that GCP-managed services may still use the endpoint even if this is false.", + "description_kind": "plain", + "optional": true + }, + "endpoint": { + "type": "string", + "description": "The cluster's DNS endpoint.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "DNS endpoint configuration.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Configuration for all of the cluster's control plane endpoints. Currently supports only DNS endpoint configuration, IP endpoint configuration is available in private_cluster_config.", + "description_kind": "plain" + }, + "max_items": 1 + }, "cost_management_config": { "nesting_mode": "list", "block": { @@ -61614,6 +64613,12 @@ "nesting_mode": "list", "block": { "attributes": { + "additive_vpc_scope_dns_domain": { + "type": "string", + "description": "Enable additive VPC scope DNS in a GKE cluster.", + "description_kind": "plain", + "optional": true + }, "cluster_dns": { "type": "string", "description": "Which in-cluster DNS provider should be used.", @@ -61657,6 +64662,29 @@ }, "max_items": 1 }, + "enterprise_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "cluster_tier": { + "type": "string", + "description": "Indicates the effective cluster tier. Available options include STANDARD and ENTERPRISE.", + "description_kind": "plain", + "computed": true + }, + "desired_tier": { + "type": "string", + "description": "Indicates the desired cluster tier. Available options include STANDARD and ENTERPRISE.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Defines the config needed to enable/disable GKE Enterprise", + "description_kind": "plain" + }, + "max_items": 1 + }, "fleet": { "nesting_mode": "list", "block": { @@ -61818,7 +64846,7 @@ "list", "string" ], - "description": "GKE components exposing logs. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, SCHEDULER, and WORKLOADS.", + "description": "GKE components exposing logs. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, KCP_CONNECTION, KCP_SSHD, SCHEDULER, and WORKLOADS.", "description_kind": "plain", "required": true } @@ -61984,6 +65012,13 @@ "description_kind": "plain", "optional": true, "computed": true + }, + "private_endpoint_enforcement_enabled": { + "type": "bool", + "description": "Whether authorized networks is enforced on the private endpoint or not. Defaults to false.", + "description_kind": "plain", + "optional": true, + "computed": true } }, "block_types": { @@ -62060,15 +65095,7 @@ "type": "bool", "description": "Whether or not Relay is enabled.", "description_kind": "plain", - "optional": true - }, - "relay_mode": { - "type": "string", - "description": "Mode used to make Relay available.", - "description_kind": "plain", - "deprecated": true, - "optional": true, - "computed": true + "required": true } }, "description": "Configuration of Advanced Datapath Observability features.", @@ -62166,42 +65193,6 @@ "description_kind": "plain", "optional": true }, - "guest_accelerator": { - "type": [ - "list", - [ - "object", - { - "count": "number", - "gpu_driver_installation_config": [ - "list", - [ - "object", - { - "gpu_driver_version": "string" - } - ] - ], - "gpu_partition_size": "string", - "gpu_sharing_config": [ - "list", - [ - "object", - { - "gpu_sharing_strategy": "string", - "max_shared_clients_per_gpu": "number" - } - ] - ], - "type": "string" - } - ] - ], - "description": "List of the type and count of accelerator cards attached to the instance.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "image_type": { "type": "string", "description": "The image type to use for this node. Note that for a given image type, the latest version of it will be used.", @@ -62226,6 +65217,12 @@ "optional": true, "computed": true }, + "local_ssd_encryption_mode": { + "type": "string", + "description": "LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.", + "description_kind": "plain", + "optional": true + }, "logging_variant": { "type": "string", "description": "Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.", @@ -62310,6 +65307,15 @@ "description_kind": "plain", "optional": true }, + "storage_pools": { + "type": [ + "list", + "string" + ], + "description": "The list of Storage Pools where boot disks are provisioned.", + "description_kind": "plain", + "optional": true + }, "tags": { "type": [ "list", @@ -62472,6 +65478,73 @@ }, "max_items": 1 }, + "guest_accelerator": { + "nesting_mode": "list", + "block": { + "attributes": { + "count": { + "type": "number", + "description": "The number of the accelerator cards exposed to an instance.", + "description_kind": "plain", + "required": true + }, + "gpu_partition_size": { + "type": "string", + "description": "Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)", + "description_kind": "plain", + "optional": true + }, + "type": { + "type": "string", + "description": "The accelerator type resource name.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "gpu_driver_installation_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "gpu_driver_version": { + "type": "string", + "description": "Mode for how the GPU driver is installed.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for auto installation of GPU driver.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "gpu_sharing_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "gpu_sharing_strategy": { + "type": "string", + "description": "The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)", + "description_kind": "plain", + "required": true + }, + "max_shared_clients_per_gpu": { + "type": "number", + "description": "The maximum number of containers that can share a GPU.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for GPU sharing.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "List of the type and count of accelerator cards attached to the instance.", + "description_kind": "plain" + } + }, "gvnic": { "nesting_mode": "list", "block": { @@ -62524,7 +65597,7 @@ "type": "string", "description": "Control the CPU management policy on the node.", "description_kind": "plain", - "required": true + "optional": true }, "insecure_kubelet_readonly_port_enabled": { "type": "string", @@ -62566,6 +65639,30 @@ "optional": true } }, + "block_types": { + "hugepages_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "hugepage_size_1g": { + "type": "number", + "description": "Amount of 1G hugepages.", + "description_kind": "plain", + "optional": true + }, + "hugepage_size_2m": { + "type": "number", + "description": "Amount of 2M hugepages.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Amounts for 2M and 1G hugepages.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, "description": "Parameters that can be configured on Linux nodes.", "description_kind": "plain" }, @@ -63056,42 +66153,6 @@ "description_kind": "plain", "optional": true }, - "guest_accelerator": { - "type": [ - "list", - [ - "object", - { - "count": "number", - "gpu_driver_installation_config": [ - "list", - [ - "object", - { - "gpu_driver_version": "string" - } - ] - ], - "gpu_partition_size": "string", - "gpu_sharing_config": [ - "list", - [ - "object", - { - "gpu_sharing_strategy": "string", - "max_shared_clients_per_gpu": "number" - } - ] - ], - "type": "string" - } - ] - ], - "description": "List of the type and count of accelerator cards attached to the instance.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "image_type": { "type": "string", "description": "The image type to use for this node. Note that for a given image type, the latest version of it will be used.", @@ -63116,6 +66177,12 @@ "optional": true, "computed": true }, + "local_ssd_encryption_mode": { + "type": "string", + "description": "LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.", + "description_kind": "plain", + "optional": true + }, "logging_variant": { "type": "string", "description": "Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.", @@ -63200,6 +66267,15 @@ "description_kind": "plain", "optional": true }, + "storage_pools": { + "type": [ + "list", + "string" + ], + "description": "The list of Storage Pools where boot disks are provisioned.", + "description_kind": "plain", + "optional": true + }, "tags": { "type": [ "list", @@ -63362,6 +66438,73 @@ }, "max_items": 1 }, + "guest_accelerator": { + "nesting_mode": "list", + "block": { + "attributes": { + "count": { + "type": "number", + "description": "The number of the accelerator cards exposed to an instance.", + "description_kind": "plain", + "required": true + }, + "gpu_partition_size": { + "type": "string", + "description": "Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)", + "description_kind": "plain", + "optional": true + }, + "type": { + "type": "string", + "description": "The accelerator type resource name.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "gpu_driver_installation_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "gpu_driver_version": { + "type": "string", + "description": "Mode for how the GPU driver is installed.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for auto installation of GPU driver.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "gpu_sharing_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "gpu_sharing_strategy": { + "type": "string", + "description": "The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)", + "description_kind": "plain", + "required": true + }, + "max_shared_clients_per_gpu": { + "type": "number", + "description": "The maximum number of containers that can share a GPU.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for GPU sharing.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "List of the type and count of accelerator cards attached to the instance.", + "description_kind": "plain" + } + }, "gvnic": { "nesting_mode": "list", "block": { @@ -63414,7 +66557,7 @@ "type": "string", "description": "Control the CPU management policy on the node.", "description_kind": "plain", - "required": true + "optional": true }, "insecure_kubelet_readonly_port_enabled": { "type": "string", @@ -63456,6 +66599,30 @@ "optional": true } }, + "block_types": { + "hugepages_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "hugepage_size_1g": { + "type": "number", + "description": "Amount of 1G hugepages.", + "description_kind": "plain", + "optional": true + }, + "hugepage_size_2m": { + "type": "number", + "description": "Amount of 2M hugepages.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Amounts for 2M and 1G hugepages.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, "description": "Parameters that can be configured on Linux nodes.", "description_kind": "plain" }, @@ -63789,6 +66956,23 @@ } }, "block_types": { + "linux_node_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "cgroup_mode": { + "type": "string", + "description": "cgroupMode specifies the cgroup mode to be used on the node.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Linux node configuration options.", + "description_kind": "plain" + }, + "max_items": 1 + }, "network_tags": { "nesting_mode": "list", "block": { @@ -63919,6 +67103,22 @@ "description_kind": "plain" }, "max_items": 1 + }, + "gcfs_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "enabled": { + "type": "bool", + "description": "Whether or not GCFS is enabled", + "description_kind": "plain", + "required": true + } + }, + "description": "GCFS configuration for this node.", + "description_kind": "plain" + }, + "max_items": 1 } }, "description": "Subset of NodeConfig message that has defaults.", @@ -64114,6 +67314,22 @@ }, "max_items": 1 }, + "secret_manager_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "enabled": { + "type": "bool", + "description": "Enable the Secret manager csi component.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for the Secret Manager feature.", + "description_kind": "plain" + }, + "max_items": 1 + }, "security_posture_config": { "nesting_mode": "list", "block": { @@ -64182,6 +67398,70 @@ "description_kind": "plain" } }, + "user_managed_keys_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "aggregation_ca": { + "type": "string", + "description": "The Certificate Authority Service caPool to use for the aggreation CA in this cluster.", + "description_kind": "plain", + "optional": true + }, + "cluster_ca": { + "type": "string", + "description": "The Certificate Authority Service caPool to use for the cluster CA in this cluster.", + "description_kind": "plain", + "optional": true + }, + "control_plane_disk_encryption_key": { + "type": "string", + "description": "The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes.", + "description_kind": "plain", + "optional": true + }, + "etcd_api_ca": { + "type": "string", + "description": "The Certificate Authority Service caPool to use for the etcd API CA in this cluster.", + "description_kind": "plain", + "optional": true + }, + "etcd_peer_ca": { + "type": "string", + "description": "The Certificate Authority Service caPool to use for the etcd peer CA in this cluster.", + "description_kind": "plain", + "optional": true + }, + "gkeops_etcd_backup_encryption_key": { + "type": "string", + "description": "Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups.", + "description_kind": "plain", + "optional": true + }, + "service_account_signing_keys": { + "type": [ + "set", + "string" + ], + "description": "The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster.", + "description_kind": "plain", + "optional": true + }, + "service_account_verification_keys": { + "type": [ + "set", + "string" + ], + "description": "The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The custom keys configuration of the cluster.", + "description_kind": "plain" + }, + "max_items": 1 + }, "vertical_pod_autoscaling": { "nesting_mode": "list", "block": { @@ -64553,42 +67833,6 @@ "description_kind": "plain", "optional": true }, - "guest_accelerator": { - "type": [ - "list", - [ - "object", - { - "count": "number", - "gpu_driver_installation_config": [ - "list", - [ - "object", - { - "gpu_driver_version": "string" - } - ] - ], - "gpu_partition_size": "string", - "gpu_sharing_config": [ - "list", - [ - "object", - { - "gpu_sharing_strategy": "string", - "max_shared_clients_per_gpu": "number" - } - ] - ], - "type": "string" - } - ] - ], - "description": "List of the type and count of accelerator cards attached to the instance.", - "description_kind": "plain", - "optional": true, - "computed": true - }, "image_type": { "type": "string", "description": "The image type to use for this node. Note that for a given image type, the latest version of it will be used.", @@ -64613,6 +67857,12 @@ "optional": true, "computed": true }, + "local_ssd_encryption_mode": { + "type": "string", + "description": "LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.", + "description_kind": "plain", + "optional": true + }, "logging_variant": { "type": "string", "description": "Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.", @@ -64697,6 +67947,15 @@ "description_kind": "plain", "optional": true }, + "storage_pools": { + "type": [ + "list", + "string" + ], + "description": "The list of Storage Pools where boot disks are provisioned.", + "description_kind": "plain", + "optional": true + }, "tags": { "type": [ "list", @@ -64859,6 +68118,73 @@ }, "max_items": 1 }, + "guest_accelerator": { + "nesting_mode": "list", + "block": { + "attributes": { + "count": { + "type": "number", + "description": "The number of the accelerator cards exposed to an instance.", + "description_kind": "plain", + "required": true + }, + "gpu_partition_size": { + "type": "string", + "description": "Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)", + "description_kind": "plain", + "optional": true + }, + "type": { + "type": "string", + "description": "The accelerator type resource name.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "gpu_driver_installation_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "gpu_driver_version": { + "type": "string", + "description": "Mode for how the GPU driver is installed.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for auto installation of GPU driver.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "gpu_sharing_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "gpu_sharing_strategy": { + "type": "string", + "description": "The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)", + "description_kind": "plain", + "required": true + }, + "max_shared_clients_per_gpu": { + "type": "number", + "description": "The maximum number of containers that can share a GPU.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for GPU sharing.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "List of the type and count of accelerator cards attached to the instance.", + "description_kind": "plain" + } + }, "gvnic": { "nesting_mode": "list", "block": { @@ -64911,7 +68237,7 @@ "type": "string", "description": "Control the CPU management policy on the node.", "description_kind": "plain", - "required": true + "optional": true }, "insecure_kubelet_readonly_port_enabled": { "type": "string", @@ -64953,6 +68279,30 @@ "optional": true } }, + "block_types": { + "hugepages_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "hugepage_size_1g": { + "type": "number", + "description": "Amount of 1G hugepages.", + "description_kind": "plain", + "optional": true + }, + "hugepage_size_2m": { + "type": "number", + "description": "Amount of 2M hugepages.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Amounts for 2M and 1G hugepages.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, "description": "Parameters that can be configured on Linux nodes.", "description_kind": "plain" }, @@ -71721,6 +75071,74 @@ "description_kind": "plain" }, "max_items": 1 + }, + "tag_resources": { + "nesting_mode": "list", + "block": { + "attributes": { + "lower_data_risk_to_low": { + "type": "bool", + "description": "Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles.", + "description_kind": "plain", + "optional": true + }, + "profile_generations_to_tag": { + "type": [ + "list", + "string" + ], + "description": "The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both 'PROFILE_GENERATION_NEW' and 'PROFILE_GENERATION_UPDATE'. Possible values: [\"PROFILE_GENERATION_NEW\", \"PROFILE_GENERATION_UPDATE\"]", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "tag_conditions": { + "nesting_mode": "list", + "block": { + "block_types": { + "sensitivity_score": { + "nesting_mode": "list", + "block": { + "attributes": { + "score": { + "type": "string", + "description": "The sensitivity score applied to the resource. Possible values: [\"SENSITIVITY_LOW\", \"SENSITIVITY_MODERATE\", \"SENSITIVITY_HIGH\"]", + "description_kind": "plain", + "required": true + } + }, + "description": "Conditions attaching the tag to a resource on its profile having this sensitivity score.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "tag": { + "nesting_mode": "list", + "block": { + "attributes": { + "namespaced_value": { + "type": "string", + "description": "The namespaced name for the tag value to attach to resources. Must be in the format '{parent_id}/{tag_key_short_name}/{short_name}', for example, \"123456/environment/prod\".", + "description_kind": "plain", + "optional": true + } + }, + "description": "The tag value to attach to resources.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "The tags to associate with different conditions.", + "description_kind": "plain" + } + } + }, + "description": "Publish a message into the Pub/Sub topic.", + "description_kind": "plain" + }, + "max_items": 1 } }, "description": "Actions to execute at the completion of scanning", @@ -71762,7 +75180,7 @@ "max_items": 1 } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -71779,6 +75197,22 @@ "nesting_mode": "list", "block": { "block_types": { + "inspect_template_modified_cadence": { + "nesting_mode": "list", + "block": { + "attributes": { + "frequency": { + "type": "string", + "description": "How frequently data profiles can be updated when the template is modified. Defaults to never. Possible values: [\"UPDATE_FREQUENCY_NEVER\", \"UPDATE_FREQUENCY_DAILY\", \"UPDATE_FREQUENCY_MONTHLY\"]", + "description_kind": "plain", + "optional": true + } + }, + "description": "Governs when to update data profiles when the inspection rules defined by the 'InspectTemplate' change. If not set, changing the template will not cause a data profile to update.", + "description_kind": "plain" + }, + "max_items": 1 + }, "schema_modified_cadence": { "nesting_mode": "list", "block": { @@ -72159,6 +75593,22 @@ } }, "block_types": { + "inspect_template_modified_cadence": { + "nesting_mode": "list", + "block": { + "attributes": { + "frequency": { + "type": "string", + "description": "How frequently data profiles can be updated when the template is modified. Defaults to never. Possible values: [\"UPDATE_FREQUENCY_NEVER\", \"UPDATE_FREQUENCY_DAILY\", \"UPDATE_FREQUENCY_MONTHLY\"]", + "description_kind": "plain", + "required": true + } + }, + "description": "Governs when to update data profiles when the inspection rules defined by the 'InspectTemplate' change. If not set, changing the template will not cause a data profile to update.", + "description_kind": "plain" + }, + "max_items": 1 + }, "schema_modified_cadence": { "nesting_mode": "list", "block": { @@ -75799,7 +79249,93 @@ }, "host": { "type": "string", - "description": "Required. The IP or hostname of the source MySQL database.", + "description": "The IP or hostname of the source MySQL database.", + "description_kind": "plain", + "optional": true + }, + "password": { + "type": "string", + "description": "Input only. The password for the user that Database Migration Service will be using to connect to the database.\nThis field is not returned on request, and the value is encrypted when stored in Database Migration Service.", + "description_kind": "plain", + "optional": true, + "sensitive": true + }, + "password_set": { + "type": "bool", + "description": "Output only. Indicates If this connection profile password is stored.", + "description_kind": "plain", + "computed": true + }, + "port": { + "type": "number", + "description": "The network port of the source MySQL database.", + "description_kind": "plain", + "optional": true + }, + "username": { + "type": "string", + "description": "The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "ssl": { + "nesting_mode": "list", + "block": { + "attributes": { + "ca_certificate": { + "type": "string", + "description": "Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate.\nThe replica will use this certificate to verify it's connecting to the right host.", + "description_kind": "plain", + "required": true, + "sensitive": true + }, + "client_certificate": { + "type": "string", + "description": "Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.\nIf this field is used then the 'clientKey' field is mandatory", + "description_kind": "plain", + "optional": true, + "sensitive": true + }, + "client_key": { + "type": "string", + "description": "Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate.\nIf this field is used then the 'clientCertificate' field is mandatory.", + "description_kind": "plain", + "optional": true, + "sensitive": true + }, + "type": { + "type": "string", + "description": "The current connection profile state.", + "description_kind": "plain", + "computed": true + } + }, + "description": "SSL configuration for the destination to connect to the source database.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Specifies connection parameters required specifically for MySQL databases.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "oracle": { + "nesting_mode": "list", + "block": { + "attributes": { + "database_service": { + "type": "string", + "description": "Required. Database service for the Oracle connection.", + "description_kind": "plain", + "required": true + }, + "host": { + "type": "string", + "description": "Required. The IP or hostname of the source Oracle database.", "description_kind": "plain", "required": true }, @@ -75818,7 +79354,7 @@ }, "port": { "type": "number", - "description": "Required. The network port of the source MySQL database.", + "description": "Required. The network port of the source Oracle database.", "description_kind": "plain", "required": true }, @@ -75830,6 +79366,64 @@ } }, "block_types": { + "forward_ssh_connectivity": { + "nesting_mode": "list", + "block": { + "attributes": { + "hostname": { + "type": "string", + "description": "Required. Hostname for the SSH tunnel.", + "description_kind": "plain", + "required": true + }, + "password": { + "type": "string", + "description": "Input only. SSH password. Only one of 'password' and 'private_key' can be configured.", + "description_kind": "plain", + "optional": true, + "sensitive": true + }, + "port": { + "type": "number", + "description": "Port for the SSH tunnel, default value is 22.", + "description_kind": "plain", + "required": true + }, + "private_key": { + "type": "string", + "description": "Input only. SSH private key. Only one of 'password' and 'private_key' can be configured.", + "description_kind": "plain", + "optional": true, + "sensitive": true + }, + "username": { + "type": "string", + "description": "Required. Username for the SSH tunnel.", + "description_kind": "plain", + "required": true + } + }, + "description": "SSL configuration for the destination to connect to the source database.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "private_connectivity": { + "nesting_mode": "list", + "block": { + "attributes": { + "private_connection": { + "type": "string", + "description": "Required. The resource name (URI) of the private connection.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for using a private network to communicate with the source database", + "description_kind": "plain" + }, + "max_items": 1 + }, "ssl": { "nesting_mode": "list", "block": { @@ -75866,34 +79460,54 @@ "description_kind": "plain" }, "max_items": 1 + }, + "static_service_ip_connectivity": { + "nesting_mode": "list", + "block": { + "description": "This object has no nested fields.\n\nStatic IP address connectivity configured on service project.", + "description_kind": "plain" + }, + "max_items": 1 } }, - "description": "Specifies connection parameters required specifically for MySQL databases.", + "description": "Specifies connection parameters required specifically for Oracle databases.", "description_kind": "plain" }, "max_items": 1 }, - "oracle": { + "postgresql": { "nesting_mode": "list", "block": { "attributes": { - "database_service": { + "alloydb_cluster_id": { "type": "string", - "description": "Required. Database service for the Oracle connection.", + "description": "If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID.", "description_kind": "plain", - "required": true + "optional": true + }, + "cloud_sql_id": { + "type": "string", + "description": "If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source.", + "description_kind": "plain", + "optional": true }, "host": { "type": "string", - "description": "Required. The IP or hostname of the source Oracle database.", + "description": "The IP or hostname of the source MySQL database.", "description_kind": "plain", - "required": true + "optional": true + }, + "network_architecture": { + "type": "string", + "description": "Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with.", + "description_kind": "plain", + "computed": true }, "password": { "type": "string", - "description": "Required. Input only. The password for the user that Database Migration Service will be using to connect to the database.\nThis field is not returned on request, and the value is encrypted when stored in Database Migration Service.", + "description": "Input only. The password for the user that Database Migration Service will be using to connect to the database.\nThis field is not returned on request, and the value is encrypted when stored in Database Migration Service.", "description_kind": "plain", - "required": true, + "optional": true, "sensitive": true }, "password_set": { @@ -75904,76 +79518,18 @@ }, "port": { "type": "number", - "description": "Required. The network port of the source Oracle database.", + "description": "The network port of the source MySQL database.", "description_kind": "plain", - "required": true + "optional": true }, "username": { "type": "string", - "description": "Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.", + "description": "The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.", "description_kind": "plain", - "required": true + "optional": true } }, "block_types": { - "forward_ssh_connectivity": { - "nesting_mode": "list", - "block": { - "attributes": { - "hostname": { - "type": "string", - "description": "Required. Hostname for the SSH tunnel.", - "description_kind": "plain", - "required": true - }, - "password": { - "type": "string", - "description": "Input only. SSH password. Only one of 'password' and 'private_key' can be configured.", - "description_kind": "plain", - "optional": true, - "sensitive": true - }, - "port": { - "type": "number", - "description": "Port for the SSH tunnel, default value is 22.", - "description_kind": "plain", - "required": true - }, - "private_key": { - "type": "string", - "description": "Input only. SSH private key. Only one of 'password' and 'private_key' can be configured.", - "description_kind": "plain", - "optional": true, - "sensitive": true - }, - "username": { - "type": "string", - "description": "Required. Username for the SSH tunnel.", - "description_kind": "plain", - "required": true - } - }, - "description": "SSL configuration for the destination to connect to the source database.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "private_connectivity": { - "nesting_mode": "list", - "block": { - "attributes": { - "private_connection": { - "type": "string", - "description": "Required. The resource name (URI) of the private connection.", - "description_kind": "plain", - "required": true - } - }, - "description": "Configuration for using a private network to communicate with the source database", - "description_kind": "plain" - }, - "max_items": 1 - }, "ssl": { "nesting_mode": "list", "block": { @@ -76010,109 +79566,264 @@ "description_kind": "plain" }, "max_items": 1 - }, - "static_service_ip_connectivity": { - "nesting_mode": "list", - "block": { - "description": "This object has no nested fields.\n\nStatic IP address connectivity configured on service project.", - "description_kind": "plain" - }, - "max_items": 1 } }, - "description": "Specifies connection parameters required specifically for Oracle databases.", + "description": "Specifies connection parameters required specifically for PostgreSQL databases.", "description_kind": "plain" }, "max_items": 1 }, - "postgresql": { - "nesting_mode": "list", + "timeouts": { + "nesting_mode": "single", "block": { "attributes": { - "cloud_sql_id": { + "create": { "type": "string", - "description": "If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source.", "description_kind": "plain", "optional": true }, - "host": { - "type": "string", - "description": "Required. The IP or hostname of the source MySQL database.", - "description_kind": "plain", - "required": true - }, - "network_architecture": { - "type": "string", - "description": "Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with.", - "description_kind": "plain", - "computed": true - }, - "password": { + "delete": { "type": "string", - "description": "Required. Input only. The password for the user that Database Migration Service will be using to connect to the database.\nThis field is not returned on request, and the value is encrypted when stored in Database Migration Service.", "description_kind": "plain", - "required": true, - "sensitive": true - }, - "password_set": { - "type": "bool", - "description": "Output only. Indicates If this connection profile password is stored.", - "description_kind": "plain", - "computed": true - }, - "port": { - "type": "number", - "description": "Required. The network port of the source MySQL database.", - "description_kind": "plain", - "required": true + "optional": true }, - "username": { + "update": { "type": "string", - "description": "Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.", "description_kind": "plain", - "required": true + "optional": true } }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_database_migration_service_migration_job": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "Output only. The timestamp when the resource was created. A timestamp in RFC3339 UTC 'Zulu' format, accurate to nanoseconds. Example: '2014-10-02T15:01:23.045123456Z'.", + "description_kind": "plain", + "computed": true + }, + "destination": { + "type": "string", + "description": "The name of the destination connection profile resource in the form of projects/{project}/locations/{location}/connectionProfiles/{destinationConnectionProfile}.", + "description_kind": "plain", + "required": true + }, + "display_name": { + "type": "string", + "description": "The migration job display name.", + "description_kind": "plain", + "optional": true + }, + "dump_path": { + "type": "string", + "description": "The path to the dump file in Google Cloud Storage,\nin the format: (gs://[BUCKET_NAME]/[OBJECT_NAME]).\nThis field and the \"dump_flags\" field are mutually exclusive.", + "description_kind": "plain", + "optional": true + }, + "dump_type": { + "type": "string", + "description": "The type of the data dump. Supported for MySQL to CloudSQL for MySQL\nmigrations only. Possible values: [\"LOGICAL\", \"PHYSICAL\"]", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "error": { + "type": [ + "list", + [ + "object", + { + "code": "number", + "details": [ + "list", + [ + "map", + "string" + ] + ], + "message": "string" + } + ] + ], + "description": "Output only. The error details in case of state FAILED.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "The resource labels for migration job to use to annotate any related underlying resources such as Compute Engine VMs.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "The location where the migration job should reside.", + "description_kind": "plain", + "optional": true + }, + "migration_job_id": { + "type": "string", + "description": "The ID of the migration job.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "The name of this migration job resource in the form of projects/{project}/locations/{location}/migrationJobs/{migrationJob}.", + "description_kind": "plain", + "computed": true + }, + "phase": { + "type": "string", + "description": "The current migration job phase.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "source": { + "type": "string", + "description": "The name of the source connection profile resource in the form of projects/{project}/locations/{location}/connectionProfiles/{sourceConnectionProfile}.", + "description_kind": "plain", + "required": true + }, + "state": { + "type": "string", + "description": "The current migration job state.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "type": { + "type": "string", + "description": "The type of the migration job. Possible values: [\"ONE_TIME\", \"CONTINUOUS\"]", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "dump_flags": { + "nesting_mode": "list", + "block": { "block_types": { - "ssl": { + "dump_flags": { "nesting_mode": "list", "block": { "attributes": { - "ca_certificate": { - "type": "string", - "description": "Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate.\nThe replica will use this certificate to verify it's connecting to the right host.", - "description_kind": "plain", - "required": true, - "sensitive": true - }, - "client_certificate": { - "type": "string", - "description": "Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.\nIf this field is used then the 'clientKey' field is mandatory", - "description_kind": "plain", - "optional": true, - "sensitive": true - }, - "client_key": { + "name": { "type": "string", - "description": "Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate.\nIf this field is used then the 'clientCertificate' field is mandatory.", + "description": "The name of the flag", "description_kind": "plain", - "optional": true, - "sensitive": true + "optional": true }, - "type": { + "value": { "type": "string", - "description": "The current connection profile state.", + "description": "The vale of the flag", "description_kind": "plain", - "computed": true + "optional": true } }, - "description": "SSL configuration for the destination to connect to the source database.", + "description": "A list of dump flags", "description_kind": "plain" - }, - "max_items": 1 + } } }, - "description": "Specifies connection parameters required specifically for PostgreSQL databases.", + "description": "The initial dump flags.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "performance_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "dump_parallel_level": { + "type": "string", + "description": "Initial dump parallelism level. Possible values: [\"MIN\", \"OPTIMAL\", \"MAX\"]", + "description_kind": "plain", + "optional": true + } + }, + "description": "Data dump parallelism settings used by the migration.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "reverse_ssh_connectivity": { + "nesting_mode": "list", + "block": { + "attributes": { + "vm": { + "type": "string", + "description": "The name of the virtual machine (Compute Engine) used as the bastion server\nfor the SSH tunnel.", + "description_kind": "plain", + "optional": true + }, + "vm_ip": { + "type": "string", + "description": "The IP of the virtual machine (Compute Engine) used as the bastion server\nfor the SSH tunnel.", + "description_kind": "plain", + "optional": true + }, + "vm_port": { + "type": "number", + "description": "The forwarding port of the virtual machine (Compute Engine) used as the\nbastion server for the SSH tunnel.", + "description_kind": "plain", + "optional": true + }, + "vpc": { + "type": "string", + "description": "The name of the VPC to peer with the Cloud SQL private network.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The details of the VPC network that the source database is located in.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "static_ip_connectivity": { + "nesting_mode": "list", + "block": { + "description": "If set to an empty object ('{}'), the source database will allow incoming\nconnections from the public IP of the destination database.\nYou can retrieve the public IP of the Cloud SQL instance from the\nCloud SQL console or using Cloud SQL APIs.", "description_kind": "plain" }, "max_items": 1 @@ -76139,6 +79850,22 @@ }, "description_kind": "plain" } + }, + "vpc_peering_connectivity": { + "nesting_mode": "list", + "block": { + "attributes": { + "vpc": { + "type": "string", + "description": "The name of the VPC network to peer with the Cloud SQL private network.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The details of the VPC network that the source database is located in.", + "description_kind": "plain" + }, + "max_items": 1 } }, "description_kind": "plain" @@ -80735,6 +84462,565 @@ "description_kind": "plain" } }, + "google_dataproc_batch": { + "version": 0, + "block": { + "attributes": { + "batch_id": { + "type": "string", + "description": "The ID to use for the batch, which will become the final component of the batch's resource name.\nThis value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.", + "description_kind": "plain", + "optional": true + }, + "create_time": { + "type": "string", + "description": "The time when the batch was created.", + "description_kind": "plain", + "computed": true + }, + "creator": { + "type": "string", + "description": "The email address of the user who created the batch.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "The labels to associate with this batch.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "The location in which the batch will be created in.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "The resource name of the batch.", + "description_kind": "plain", + "computed": true + }, + "operation": { + "type": "string", + "description": "The resource name of the operation associated with this batch.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "runtime_info": { + "type": [ + "list", + [ + "object", + { + "approximate_usage": [ + "list", + [ + "object", + { + "accelerator_type": "string", + "milli_accelerator_seconds": "string", + "milli_dcu_seconds": "string", + "shuffle_storage_gb_seconds": "string" + } + ] + ], + "current_usage": [ + "list", + [ + "object", + { + "accelerator_type": "string", + "milli_accelerator": "string", + "milli_dcu": "string", + "milli_dcu_premium": "string", + "shuffle_storage_gb": "string", + "shuffle_storage_gb_premium": "string", + "snapshot_time": "string" + } + ] + ], + "diagnostic_output_uri": "string", + "endpoints": [ + "map", + "string" + ], + "output_uri": "string" + } + ] + ], + "description": "Runtime information about batch execution.", + "description_kind": "plain", + "computed": true + }, + "state": { + "type": "string", + "description": "The state of the batch. For possible values, see the [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches#State).", + "description_kind": "plain", + "computed": true + }, + "state_history": { + "type": [ + "list", + [ + "object", + { + "state": "string", + "state_message": "string", + "state_start_time": "string" + } + ] + ], + "description": "Historical state information for the batch.", + "description_kind": "plain", + "computed": true + }, + "state_message": { + "type": "string", + "description": "Batch state details, such as a failure description if the state is FAILED.", + "description_kind": "plain", + "computed": true + }, + "state_time": { + "type": "string", + "description": "Batch state details, such as a failure description if the state is FAILED.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "uuid": { + "type": "string", + "description": "A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "environment_config": { + "nesting_mode": "list", + "block": { + "block_types": { + "execution_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "kms_key": { + "type": "string", + "description": "The Cloud KMS key to use for encryption.", + "description_kind": "plain", + "optional": true + }, + "network_tags": { + "type": [ + "list", + "string" + ], + "description": "Tags used for network traffic control.", + "description_kind": "plain", + "optional": true + }, + "network_uri": { + "type": "string", + "description": "Network configuration for workload execution.", + "description_kind": "plain", + "optional": true + }, + "service_account": { + "type": "string", + "description": "Service account that used to execute workload.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "staging_bucket": { + "type": "string", + "description": "A Cloud Storage bucket used to stage workload dependencies, config files, and store\nworkload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket,\nCloud Dataproc will determine a Cloud Storage location according to the region where your workload is running,\nand then create and manage project-level, per-location staging and temporary buckets.\nThis field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "description_kind": "plain", + "optional": true + }, + "subnetwork_uri": { + "type": "string", + "description": "Subnetwork configuration for workload execution.", + "description_kind": "plain", + "optional": true + }, + "ttl": { + "type": "string", + "description": "The duration after which the workload will be terminated.\nWhen the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing\nwork to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it\nexits naturally (or run forever without exiting). If ttl is not specified for an interactive session,\nit defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours.\nMinimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session),\nthe conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or\nwhen ttl has been exceeded, whichever occurs first.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Execution configuration for a workload.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "peripherals_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "metastore_service": { + "type": "string", + "description": "Resource name of an existing Dataproc Metastore service.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "spark_history_server_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "dataproc_cluster": { + "type": "string", + "description": "Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The Spark History Server configuration for the workload.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Peripherals configuration that workload has access to.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Environment configuration for the batch execution.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "pyspark_batch": { + "nesting_mode": "list", + "block": { + "attributes": { + "archive_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of archives to be extracted into the working directory of each executor.\nSupported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description_kind": "plain", + "optional": true + }, + "args": { + "type": [ + "list", + "string" + ], + "description": "The arguments to pass to the driver. Do not include arguments that can be set as batch\nproperties, such as --conf, since a collision can occur that causes an incorrect batch submission.", + "description_kind": "plain", + "optional": true + }, + "file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of files to be placed in the working directory of each executor.", + "description_kind": "plain", + "optional": true + }, + "jar_file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.", + "description_kind": "plain", + "optional": true + }, + "main_python_file_uri": { + "type": "string", + "description": "The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.", + "description_kind": "plain", + "optional": true + }, + "python_file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS file URIs of Python files to pass to the PySpark framework.\nSupported file types: .py, .egg, and .zip.", + "description_kind": "plain", + "optional": true + } + }, + "description": "PySpark batch config.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "runtime_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "cohort": { + "type": "string", + "description": "Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.", + "description_kind": "plain", + "optional": true + }, + "container_image": { + "type": "string", + "description": "Optional custom container image for the job runtime environment. If not specified, a default container image will be used.", + "description_kind": "plain", + "optional": true + }, + "effective_properties": { + "type": [ + "map", + "string" + ], + "description": "A mapping of property names to values, which are used to configure workload execution.", + "description_kind": "plain", + "computed": true + }, + "properties": { + "type": [ + "map", + "string" + ], + "description": "A mapping of property names to values, which are used to configure workload execution.", + "description_kind": "plain", + "optional": true + }, + "version": { + "type": "string", + "description": "Version of the batch runtime.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "autotuning_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "scenarios": { + "type": [ + "list", + "string" + ], + "description": "Optional. Scenarios for which tunings are applied. Possible values: [\"SCALING\", \"BROADCAST_HASH_JOIN\", \"MEMORY\"]", + "description_kind": "plain", + "optional": true + } + }, + "description": "Optional. Autotuning configuration of the workload.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Runtime configuration for the batch execution.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "spark_batch": { + "nesting_mode": "list", + "block": { + "attributes": { + "archive_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of archives to be extracted into the working directory of each executor.\nSupported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description_kind": "plain", + "optional": true + }, + "args": { + "type": [ + "list", + "string" + ], + "description": "The arguments to pass to the driver. Do not include arguments that can be set as batch\nproperties, such as --conf, since a collision can occur that causes an incorrect batch submission.", + "description_kind": "plain", + "optional": true + }, + "file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of files to be placed in the working directory of each executor.", + "description_kind": "plain", + "optional": true + }, + "jar_file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.", + "description_kind": "plain", + "optional": true + }, + "main_class": { + "type": "string", + "description": "The name of the driver main class. The jar file that contains the class must be in the\nclasspath or specified in jarFileUris.", + "description_kind": "plain", + "optional": true + }, + "main_jar_file_uri": { + "type": "string", + "description": "The HCFS URI of the jar file that contains the main class.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Spark batch config.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "spark_r_batch": { + "nesting_mode": "list", + "block": { + "attributes": { + "archive_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of archives to be extracted into the working directory of each executor.\nSupported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description_kind": "plain", + "optional": true + }, + "args": { + "type": [ + "list", + "string" + ], + "description": "The arguments to pass to the driver. Do not include arguments that can be set as batch\nproperties, such as --conf, since a collision can occur that causes an incorrect batch submission.", + "description_kind": "plain", + "optional": true + }, + "file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of files to be placed in the working directory of each executor.", + "description_kind": "plain", + "optional": true + }, + "main_r_file_uri": { + "type": "string", + "description": "The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.", + "description_kind": "plain", + "optional": true + } + }, + "description": "SparkR batch config.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "spark_sql_batch": { + "nesting_mode": "list", + "block": { + "attributes": { + "jar_file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of jar files to be added to the Spark CLASSPATH.", + "description_kind": "plain", + "optional": true + }, + "query_file_uri": { + "type": "string", + "description": "The HCFS URI of the script that contains Spark SQL queries to execute.", + "description_kind": "plain", + "optional": true + }, + "query_variables": { + "type": [ + "map", + "string" + ], + "description": "Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", + "description_kind": "plain", + "optional": true + } + }, + "description": "Spark SQL batch config.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_dataproc_cluster": { "version": 1, "block": { @@ -81126,6 +85412,22 @@ } }, "block_types": { + "confidential_instance_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "enable_confidential_compute": { + "type": "bool", + "description": "Defines whether the instance should have confidential compute enabled.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Confidential Instance Config for clusters using Compute Engine Confidential VMs.", + "description_kind": "plain" + }, + "max_items": 1 + }, "node_group_affinity": { "nesting_mode": "list", "block": { @@ -81488,6 +85790,28 @@ "description": "List of instance selection options that the group will use when creating new VMs.", "description_kind": "plain" } + }, + "provisioning_model_mix": { + "nesting_mode": "list", + "block": { + "attributes": { + "standard_capacity_base": { + "type": "number", + "description": "The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need.", + "description_kind": "plain", + "optional": true + }, + "standard_capacity_percent_above_base": { + "type": "number", + "description": "The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Defines how Dataproc should create VMs with a mixture of provisioning models.", + "description_kind": "plain" + }, + "max_items": 1 } }, "description": "Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.", @@ -82226,6 +86550,770 @@ "description_kind": "plain" } }, + "google_dataproc_gdc_application_environment": { + "version": 0, + "block": { + "attributes": { + "annotations": { + "type": [ + "map", + "string" + ], + "description": "The annotations to associate with this application environment. Annotations may be used to store client information, but are not used by the server.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "optional": true + }, + "application_environment_id": { + "type": "string", + "description": "The id of the application environment", + "description_kind": "plain", + "optional": true + }, + "create_time": { + "type": "string", + "description": "The timestamp when the resource was created.", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "User-provided human-readable name to be used in user interfaces.", + "description_kind": "plain", + "optional": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "The labels to associate with this application environment. Labels may be used for filtering and billing tracking. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "The location of the application environment", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. The name of the application environment. Format: projects/{project}/locations/{location}/serviceInstances/{service_instance}/applicationEnvironments/{application_environment_id}", + "description_kind": "plain", + "computed": true + }, + "namespace": { + "type": "string", + "description": "The name of the namespace in which to create this ApplicationEnvironment. This namespace must already exist in the cluster", + "description_kind": "plain", + "optional": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "serviceinstance": { + "type": "string", + "description": "The id of the service instance to which this application environment belongs.", + "description_kind": "plain", + "required": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "uid": { + "type": "string", + "description": "System generated unique identifier for this application environment, formatted as UUID4.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The timestamp when the resource was most recently updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "spark_application_environment_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "default_properties": { + "type": [ + "map", + "string" + ], + "description": "A map of default Spark properties to apply to workloads in this application environment. These defaults may be overridden by per-application properties.", + "description_kind": "plain", + "optional": true + }, + "default_version": { + "type": "string", + "description": "The default Dataproc version to use for applications submitted to this application environment", + "description_kind": "plain", + "optional": true + } + }, + "description": "Represents the SparkApplicationEnvironmentConfig.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_dataproc_gdc_service_instance": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "The timestamp when the resource was created.", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "User-provided human-readable name to be used in user interfaces.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_service_account": { + "type": "string", + "description": "Effective service account associated with ServiceInstance. This will be the service_account if specified. Otherwise, it will be an automatically created per-resource P4SA that also automatically has Fleet Workload. Identity bindings applied.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "The labels to associate with this service instance. Labels may be used for filtering and billing tracking. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Location of the resource.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. The name of the service instance.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "reconciling": { + "type": "bool", + "description": "Whether the service instance is currently reconciling. True if the current state of the resource does not match the intended state, and the system is working to reconcile them, whether or not the change was user initiated.", + "description_kind": "plain", + "computed": true + }, + "requested_state": { + "type": "string", + "description": "The intended state to which the service instance is reconciling. Possible values:\n* 'CREATING'\n* 'ACTIVE'\n* 'DISCONNECTED'\n* 'DELETING'\n* 'STOPPING'\n* 'STOPPED'\n* 'STARTING'\n* 'UPDATING'\n* 'FAILED'", + "description_kind": "plain", + "computed": true + }, + "service_account": { + "type": "string", + "description": "Requested service account to associate with ServiceInstance.", + "description_kind": "plain", + "optional": true + }, + "service_instance_id": { + "type": "string", + "description": "Id of the service instance.", + "description_kind": "plain", + "required": true + }, + "state": { + "type": "string", + "description": "The current state. Possible values:\n* 'CREATING'\n* 'ACTIVE'\n* 'DISCONNECTED'\n* 'DELETING'\n* 'STOPPING'\n* 'STOPPED'\n* 'STARTING'\n* 'UPDATING'\n* 'FAILED'", + "description_kind": "plain", + "computed": true + }, + "state_message": { + "type": "string", + "description": "A message explaining the current state.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "uid": { + "type": "string", + "description": "System generated unique identifier for this service instance, formatted as UUID4.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The timestamp when the resource was most recently updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "gdce_cluster": { + "nesting_mode": "list", + "block": { + "attributes": { + "gdce_cluster": { + "type": "string", + "description": "Gdce cluster resource id.", + "description_kind": "plain", + "required": true + } + }, + "description": "Gdce cluster information.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "spark_service_instance_config": { + "nesting_mode": "list", + "block": { + "description": "Spark-specific service instance configuration.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_dataproc_gdc_spark_application": { + "version": 0, + "block": { + "attributes": { + "annotations": { + "type": [ + "map", + "string" + ], + "description": "The annotations to associate with this application. Annotations may be used to store client information, but are not used by the server. \n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "optional": true + }, + "application_environment": { + "type": "string", + "description": "An ApplicationEnvironment from which to inherit configuration properties.", + "description_kind": "plain", + "optional": true + }, + "create_time": { + "type": "string", + "description": "The timestamp when the resource was created.", + "description_kind": "plain", + "computed": true + }, + "dependency_images": { + "type": [ + "list", + "string" + ], + "description": "List of container image uris for additional file dependencies. Dependent files are sequentially copied from each image. If a file with the same name exists in 2 images then the file from later image is used.", + "description_kind": "plain", + "optional": true + }, + "display_name": { + "type": "string", + "description": "User-provided human-readable name to be used in user interfaces.", + "description_kind": "plain", + "optional": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "The labels to associate with this application. Labels may be used for filtering and billing tracking. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "The location of the spark application.", + "description_kind": "plain", + "required": true + }, + "monitoring_endpoint": { + "type": "string", + "description": "URL for a monitoring UI for this application (for eventual Spark PHS/UI support) Out of scope for private GA", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "Identifier. The name of the application. Format: projects/{project}/locations/{location}/serviceInstances/{service_instance}/sparkApplications/{application}", + "description_kind": "plain", + "computed": true + }, + "namespace": { + "type": "string", + "description": "The Kubernetes namespace in which to create the application. This namespace must already exist on the cluster.", + "description_kind": "plain", + "optional": true + }, + "output_uri": { + "type": "string", + "description": "An HCFS URI pointing to the location of stdout and stdout of the application Mainly useful for Pantheon and gcloud Not in scope for private GA", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "properties": { + "type": [ + "map", + "string" + ], + "description": "application-specific properties.", + "description_kind": "plain", + "optional": true + }, + "reconciling": { + "type": "bool", + "description": "Whether the application is currently reconciling. True if the current state of the resource does not match the intended state, and the system is working to reconcile them, whether or not the change was user initiated.", + "description_kind": "plain", + "computed": true + }, + "serviceinstance": { + "type": "string", + "description": "The id of the service instance to which this spark application belongs.", + "description_kind": "plain", + "required": true + }, + "spark_application_id": { + "type": "string", + "description": "The id of the application", + "description_kind": "plain", + "required": true + }, + "state": { + "type": "string", + "description": "The current state.\nPossible values:\n* 'STATE_UNSPECIFIED'\n* 'PENDING'\n* 'RUNNING'\n* 'CANCELLING'\n* 'CANCELLED'\n* 'SUCCEEDED'\n* 'FAILED'", + "description_kind": "plain", + "computed": true + }, + "state_message": { + "type": "string", + "description": "A message explaining the current state.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "uid": { + "type": "string", + "description": "System generated unique identifier for this application, formatted as UUID4.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The timestamp when the resource was most recently updated.", + "description_kind": "plain", + "computed": true + }, + "version": { + "type": "string", + "description": "The Dataproc version of this application.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "pyspark_application_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "archive_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description_kind": "plain", + "optional": true + }, + "args": { + "type": [ + "list", + "string" + ], + "description": "The arguments to pass to the driver. Do not include arguments, such as '--conf', that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + "description_kind": "plain", + "optional": true + }, + "file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + "description_kind": "plain", + "optional": true + }, + "jar_file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", + "description_kind": "plain", + "optional": true + }, + "main_python_file_uri": { + "type": "string", + "description": "The HCFS URI of the main Python file to use as the driver. Must be a .py file.", + "description_kind": "plain", + "required": true + }, + "python_file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Represents the PySparkApplicationConfig.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "spark_application_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "archive_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: '.jar', '.tar', '.tar.gz', '.tgz', and '.zip'.", + "description_kind": "plain", + "optional": true + }, + "args": { + "type": [ + "list", + "string" + ], + "description": "The arguments to pass to the driver. Do not include arguments that can be set as application properties, such as '--conf', since a collision can occur that causes an incorrect application submission.", + "description_kind": "plain", + "optional": true + }, + "file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of files to be placed in the working directory of each executor.", + "description_kind": "plain", + "optional": true + }, + "jar_file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.", + "description_kind": "plain", + "optional": true + }, + "main_class": { + "type": "string", + "description": "The name of the driver main class. The jar file that contains the class must be in the classpath or specified in 'jar_file_uris'.", + "description_kind": "plain", + "optional": true + }, + "main_jar_file_uri": { + "type": "string", + "description": "The HCFS URI of the jar file that contains the main class.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Represents the SparkApplicationConfig.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "spark_r_application_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "archive_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description_kind": "plain", + "optional": true + }, + "args": { + "type": [ + "list", + "string" + ], + "description": "The arguments to pass to the driver. Do not include arguments, such as '--conf', that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + "description_kind": "plain", + "optional": true + }, + "file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + "description_kind": "plain", + "optional": true + }, + "main_r_file_uri": { + "type": "string", + "description": "The HCFS URI of the main R file to use as the driver. Must be a .R file.", + "description_kind": "plain", + "required": true + } + }, + "description": "Represents the SparkRApplicationConfig.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "spark_sql_application_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "jar_file_uris": { + "type": [ + "list", + "string" + ], + "description": "HCFS URIs of jar files to be added to the Spark CLASSPATH.", + "description_kind": "plain", + "optional": true + }, + "query_file_uri": { + "type": "string", + "description": "The HCFS URI of the script that contains SQL queries.", + "description_kind": "plain", + "optional": true + }, + "script_variables": { + "type": [ + "map", + "string" + ], + "description": "Mapping of query variable names to values (equivalent to the Spark SQL command: SET 'name=\"value\";').", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "query_list": { + "nesting_mode": "list", + "block": { + "attributes": { + "queries": { + "type": [ + "list", + "string" + ], + "description": "The queries to run.", + "description_kind": "plain", + "required": true + } + }, + "description": "Represents a list of queries.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Represents the SparkRApplicationConfig.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_dataproc_job": { "version": 0, "block": { @@ -83489,6 +88577,12 @@ "description_kind": "plain", "optional": true }, + "deletion_protection": { + "type": "bool", + "description": "Indicates if the dataproc metastore should be protected against accidental deletions.", + "description_kind": "plain", + "optional": true + }, "effective_labels": { "type": [ "map", @@ -85872,86 +90966,6 @@ "description_kind": "plain" } }, - "google_datastore_index": { - "version": 0, - "block": { - "attributes": { - "ancestor": { - "type": "string", - "description": "Policy for including ancestors in the index. Default value: \"NONE\" Possible values: [\"NONE\", \"ALL_ANCESTORS\"]", - "description_kind": "plain", - "optional": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "index_id": { - "type": "string", - "description": "The index id.", - "description_kind": "plain", - "computed": true - }, - "kind": { - "type": "string", - "description": "The entity kind which the index applies to.", - "description_kind": "plain", - "required": true - }, - "project": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - } - }, - "block_types": { - "properties": { - "nesting_mode": "list", - "block": { - "attributes": { - "direction": { - "type": "string", - "description": "The direction the index should optimize for sorting. Possible values: [\"ASCENDING\", \"DESCENDING\"]", - "description_kind": "plain", - "required": true - }, - "name": { - "type": "string", - "description": "The property name to index.", - "description_kind": "plain", - "required": true - } - }, - "description": "An ordered list of properties to index on.", - "description_kind": "plain" - } - }, - "timeouts": { - "nesting_mode": "single", - "block": { - "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "delete": { - "type": "string", - "description_kind": "plain", - "optional": true - } - }, - "description_kind": "plain" - } - } - }, - "description_kind": "plain", - "deprecated": true - } - }, "google_datastream_connection_profile": { "version": 0, "block": { @@ -86527,7 +91541,7 @@ }, "desired_state": { "type": "string", - "description": "Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.", + "description": "Desired state of the Stream. Set this field to 'RUNNING' to start the stream,\n'NOT_STARTED' to create the stream without starting and 'PAUSED' to pause\nthe stream from a 'RUNNING' state.\nPossible values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED", "description_kind": "plain", "optional": true }, @@ -87230,103 +92244,15 @@ } }, "block_types": { - "exclude_objects": { + "binary_log_position": { "nesting_mode": "list", "block": { - "block_types": { - "mysql_databases": { - "nesting_mode": "list", - "block": { - "attributes": { - "database": { - "type": "string", - "description": "Database name.", - "description_kind": "plain", - "required": true - } - }, - "block_types": { - "mysql_tables": { - "nesting_mode": "list", - "block": { - "attributes": { - "table": { - "type": "string", - "description": "Table name.", - "description_kind": "plain", - "required": true - } - }, - "block_types": { - "mysql_columns": { - "nesting_mode": "list", - "block": { - "attributes": { - "collation": { - "type": "string", - "description": "Column collation.", - "description_kind": "plain", - "optional": true - }, - "column": { - "type": "string", - "description": "Column name.", - "description_kind": "plain", - "optional": true - }, - "data_type": { - "type": "string", - "description": "The MySQL data type. Full data types list can be found here:\nhttps://dev.mysql.com/doc/refman/8.0/en/data-types.html", - "description_kind": "plain", - "optional": true - }, - "length": { - "type": "number", - "description": "Column length.", - "description_kind": "plain", - "computed": true - }, - "nullable": { - "type": "bool", - "description": "Whether or not the column can accept a null value.", - "description_kind": "plain", - "optional": true - }, - "ordinal_position": { - "type": "number", - "description": "The ordinal position of the column in the table.", - "description_kind": "plain", - "optional": true - }, - "primary_key": { - "type": "bool", - "description": "Whether or not the column represents a primary key.", - "description_kind": "plain", - "optional": true - } - }, - "description": "MySQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.", - "description_kind": "plain" - } - } - }, - "description": "Tables in the database.", - "description_kind": "plain" - } - } - }, - "description": "MySQL databases on the server", - "description_kind": "plain" - }, - "min_items": 1 - } - }, - "description": "MySQL objects to exclude from the stream.", + "description": "CDC reader reads from binary logs replication cdc method.", "description_kind": "plain" }, "max_items": 1 }, - "include_objects": { + "exclude_objects": { "nesting_mode": "list", "block": { "block_types": { @@ -87417,149 +92343,15 @@ "min_items": 1 } }, - "description": "MySQL objects to retrieve from the source.", - "description_kind": "plain" - }, - "max_items": 1 - } - }, - "description": "MySQL data source configuration.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "oracle_source_config": { - "nesting_mode": "list", - "block": { - "attributes": { - "max_concurrent_backfill_tasks": { - "type": "number", - "description": "Maximum number of concurrent backfill tasks. The number should be non negative.\nIf not set (or set to 0), the system's default value will be used.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "max_concurrent_cdc_tasks": { - "type": "number", - "description": "Maximum number of concurrent CDC tasks. The number should be non negative.\nIf not set (or set to 0), the system's default value will be used.", - "description_kind": "plain", - "optional": true, - "computed": true - } - }, - "block_types": { - "drop_large_objects": { - "nesting_mode": "list", - "block": { - "description": "Configuration to drop large object values.", + "description": "MySQL objects to exclude from the stream.", "description_kind": "plain" }, "max_items": 1 }, - "exclude_objects": { + "gtid": { "nesting_mode": "list", "block": { - "block_types": { - "oracle_schemas": { - "nesting_mode": "list", - "block": { - "attributes": { - "schema": { - "type": "string", - "description": "Schema name.", - "description_kind": "plain", - "required": true - } - }, - "block_types": { - "oracle_tables": { - "nesting_mode": "list", - "block": { - "attributes": { - "table": { - "type": "string", - "description": "Table name.", - "description_kind": "plain", - "required": true - } - }, - "block_types": { - "oracle_columns": { - "nesting_mode": "list", - "block": { - "attributes": { - "column": { - "type": "string", - "description": "Column name.", - "description_kind": "plain", - "optional": true - }, - "data_type": { - "type": "string", - "description": "The Oracle data type. Full data types list can be found here:\nhttps://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html", - "description_kind": "plain", - "optional": true - }, - "encoding": { - "type": "string", - "description": "Column encoding.", - "description_kind": "plain", - "computed": true - }, - "length": { - "type": "number", - "description": "Column length.", - "description_kind": "plain", - "computed": true - }, - "nullable": { - "type": "bool", - "description": "Whether or not the column can accept a null value.", - "description_kind": "plain", - "computed": true - }, - "ordinal_position": { - "type": "number", - "description": "The ordinal position of the column in the table.", - "description_kind": "plain", - "computed": true - }, - "precision": { - "type": "number", - "description": "Column precision.", - "description_kind": "plain", - "computed": true - }, - "primary_key": { - "type": "bool", - "description": "Whether or not the column represents a primary key.", - "description_kind": "plain", - "computed": true - }, - "scale": { - "type": "number", - "description": "Column scale.", - "description_kind": "plain", - "computed": true - } - }, - "description": "Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.", - "description_kind": "plain" - } - } - }, - "description": "Tables in the database.", - "description_kind": "plain" - } - } - }, - "description": "Oracle schemas/databases in the database server", - "description_kind": "plain" - }, - "min_items": 1 - } - }, - "description": "Oracle objects to exclude from the stream.", + "description": "CDC reader reads from gtid based replication.", "description_kind": "plain" }, "max_items": 1 @@ -87568,19 +92360,19 @@ "nesting_mode": "list", "block": { "block_types": { - "oracle_schemas": { + "mysql_databases": { "nesting_mode": "list", "block": { "attributes": { - "schema": { + "database": { "type": "string", - "description": "Schema name.", + "description": "Database name.", "description_kind": "plain", "required": true } }, "block_types": { - "oracle_tables": { + "mysql_tables": { "nesting_mode": "list", "block": { "attributes": { @@ -87592,157 +92384,16 @@ } }, "block_types": { - "oracle_columns": { + "mysql_columns": { "nesting_mode": "list", "block": { "attributes": { - "column": { - "type": "string", - "description": "Column name.", - "description_kind": "plain", - "optional": true - }, - "data_type": { + "collation": { "type": "string", - "description": "The Oracle data type. Full data types list can be found here:\nhttps://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html", + "description": "Column collation.", "description_kind": "plain", "optional": true }, - "encoding": { - "type": "string", - "description": "Column encoding.", - "description_kind": "plain", - "computed": true - }, - "length": { - "type": "number", - "description": "Column length.", - "description_kind": "plain", - "computed": true - }, - "nullable": { - "type": "bool", - "description": "Whether or not the column can accept a null value.", - "description_kind": "plain", - "computed": true - }, - "ordinal_position": { - "type": "number", - "description": "The ordinal position of the column in the table.", - "description_kind": "plain", - "computed": true - }, - "precision": { - "type": "number", - "description": "Column precision.", - "description_kind": "plain", - "computed": true - }, - "primary_key": { - "type": "bool", - "description": "Whether or not the column represents a primary key.", - "description_kind": "plain", - "computed": true - }, - "scale": { - "type": "number", - "description": "Column scale.", - "description_kind": "plain", - "computed": true - } - }, - "description": "Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.", - "description_kind": "plain" - } - } - }, - "description": "Tables in the database.", - "description_kind": "plain" - } - } - }, - "description": "Oracle schemas/databases in the database server", - "description_kind": "plain" - }, - "min_items": 1 - } - }, - "description": "Oracle objects to retrieve from the source.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "stream_large_objects": { - "nesting_mode": "list", - "block": { - "description": "Configuration to drop large object values.", - "description_kind": "plain" - }, - "max_items": 1 - } - }, - "description": "MySQL data source configuration.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "postgresql_source_config": { - "nesting_mode": "list", - "block": { - "attributes": { - "max_concurrent_backfill_tasks": { - "type": "number", - "description": "Maximum number of concurrent backfill tasks. The number should be non\nnegative. If not set (or set to 0), the system's default value will be used.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "publication": { - "type": "string", - "description": "The name of the publication that includes the set of all tables\nthat are defined in the stream's include_objects.", - "description_kind": "plain", - "required": true - }, - "replication_slot": { - "type": "string", - "description": "The name of the logical replication slot that's configured with\nthe pgoutput plugin.", - "description_kind": "plain", - "required": true - } - }, - "block_types": { - "exclude_objects": { - "nesting_mode": "list", - "block": { - "block_types": { - "postgresql_schemas": { - "nesting_mode": "list", - "block": { - "attributes": { - "schema": { - "type": "string", - "description": "Database name.", - "description_kind": "plain", - "required": true - } - }, - "block_types": { - "postgresql_tables": { - "nesting_mode": "list", - "block": { - "attributes": { - "table": { - "type": "string", - "description": "Table name.", - "description_kind": "plain", - "required": true - } - }, - "block_types": { - "postgresql_columns": { - "nesting_mode": "list", - "block": { - "attributes": { "column": { "type": "string", "description": "Column name.", @@ -87751,7 +92402,386 @@ }, "data_type": { "type": "string", - "description": "The PostgreSQL data type. Full data types list can be found here:\nhttps://www.postgresql.org/docs/current/datatype.html", + "description": "The MySQL data type. Full data types list can be found here:\nhttps://dev.mysql.com/doc/refman/8.0/en/data-types.html", + "description_kind": "plain", + "optional": true + }, + "length": { + "type": "number", + "description": "Column length.", + "description_kind": "plain", + "computed": true + }, + "nullable": { + "type": "bool", + "description": "Whether or not the column can accept a null value.", + "description_kind": "plain", + "optional": true + }, + "ordinal_position": { + "type": "number", + "description": "The ordinal position of the column in the table.", + "description_kind": "plain", + "optional": true + }, + "primary_key": { + "type": "bool", + "description": "Whether or not the column represents a primary key.", + "description_kind": "plain", + "optional": true + } + }, + "description": "MySQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.", + "description_kind": "plain" + } + } + }, + "description": "Tables in the database.", + "description_kind": "plain" + } + } + }, + "description": "MySQL databases on the server", + "description_kind": "plain" + }, + "min_items": 1 + } + }, + "description": "MySQL objects to retrieve from the source.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "MySQL data source configuration.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "oracle_source_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "max_concurrent_backfill_tasks": { + "type": "number", + "description": "Maximum number of concurrent backfill tasks. The number should be non negative.\nIf not set (or set to 0), the system's default value will be used.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "max_concurrent_cdc_tasks": { + "type": "number", + "description": "Maximum number of concurrent CDC tasks. The number should be non negative.\nIf not set (or set to 0), the system's default value will be used.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "drop_large_objects": { + "nesting_mode": "list", + "block": { + "description": "Configuration to drop large object values.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "exclude_objects": { + "nesting_mode": "list", + "block": { + "block_types": { + "oracle_schemas": { + "nesting_mode": "list", + "block": { + "attributes": { + "schema": { + "type": "string", + "description": "Schema name.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "oracle_tables": { + "nesting_mode": "list", + "block": { + "attributes": { + "table": { + "type": "string", + "description": "Table name.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "oracle_columns": { + "nesting_mode": "list", + "block": { + "attributes": { + "column": { + "type": "string", + "description": "Column name.", + "description_kind": "plain", + "optional": true + }, + "data_type": { + "type": "string", + "description": "The Oracle data type. Full data types list can be found here:\nhttps://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html", + "description_kind": "plain", + "optional": true + }, + "encoding": { + "type": "string", + "description": "Column encoding.", + "description_kind": "plain", + "computed": true + }, + "length": { + "type": "number", + "description": "Column length.", + "description_kind": "plain", + "computed": true + }, + "nullable": { + "type": "bool", + "description": "Whether or not the column can accept a null value.", + "description_kind": "plain", + "computed": true + }, + "ordinal_position": { + "type": "number", + "description": "The ordinal position of the column in the table.", + "description_kind": "plain", + "computed": true + }, + "precision": { + "type": "number", + "description": "Column precision.", + "description_kind": "plain", + "computed": true + }, + "primary_key": { + "type": "bool", + "description": "Whether or not the column represents a primary key.", + "description_kind": "plain", + "computed": true + }, + "scale": { + "type": "number", + "description": "Column scale.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.", + "description_kind": "plain" + } + } + }, + "description": "Tables in the database.", + "description_kind": "plain" + } + } + }, + "description": "Oracle schemas/databases in the database server", + "description_kind": "plain" + }, + "min_items": 1 + } + }, + "description": "Oracle objects to exclude from the stream.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "include_objects": { + "nesting_mode": "list", + "block": { + "block_types": { + "oracle_schemas": { + "nesting_mode": "list", + "block": { + "attributes": { + "schema": { + "type": "string", + "description": "Schema name.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "oracle_tables": { + "nesting_mode": "list", + "block": { + "attributes": { + "table": { + "type": "string", + "description": "Table name.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "oracle_columns": { + "nesting_mode": "list", + "block": { + "attributes": { + "column": { + "type": "string", + "description": "Column name.", + "description_kind": "plain", + "optional": true + }, + "data_type": { + "type": "string", + "description": "The Oracle data type. Full data types list can be found here:\nhttps://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html", + "description_kind": "plain", + "optional": true + }, + "encoding": { + "type": "string", + "description": "Column encoding.", + "description_kind": "plain", + "computed": true + }, + "length": { + "type": "number", + "description": "Column length.", + "description_kind": "plain", + "computed": true + }, + "nullable": { + "type": "bool", + "description": "Whether or not the column can accept a null value.", + "description_kind": "plain", + "computed": true + }, + "ordinal_position": { + "type": "number", + "description": "The ordinal position of the column in the table.", + "description_kind": "plain", + "computed": true + }, + "precision": { + "type": "number", + "description": "Column precision.", + "description_kind": "plain", + "computed": true + }, + "primary_key": { + "type": "bool", + "description": "Whether or not the column represents a primary key.", + "description_kind": "plain", + "computed": true + }, + "scale": { + "type": "number", + "description": "Column scale.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.", + "description_kind": "plain" + } + } + }, + "description": "Tables in the database.", + "description_kind": "plain" + } + } + }, + "description": "Oracle schemas/databases in the database server", + "description_kind": "plain" + }, + "min_items": 1 + } + }, + "description": "Oracle objects to retrieve from the source.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "stream_large_objects": { + "nesting_mode": "list", + "block": { + "description": "Configuration to drop large object values.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "MySQL data source configuration.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "postgresql_source_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "max_concurrent_backfill_tasks": { + "type": "number", + "description": "Maximum number of concurrent backfill tasks. The number should be non\nnegative. If not set (or set to 0), the system's default value will be used.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "publication": { + "type": "string", + "description": "The name of the publication that includes the set of all tables\nthat are defined in the stream's include_objects.", + "description_kind": "plain", + "required": true + }, + "replication_slot": { + "type": "string", + "description": "The name of the logical replication slot that's configured with\nthe pgoutput plugin.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "exclude_objects": { + "nesting_mode": "list", + "block": { + "block_types": { + "postgresql_schemas": { + "nesting_mode": "list", + "block": { + "attributes": { + "schema": { + "type": "string", + "description": "Database name.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "postgresql_tables": { + "nesting_mode": "list", + "block": { + "attributes": { + "table": { + "type": "string", + "description": "Table name.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "postgresql_columns": { + "nesting_mode": "list", + "block": { + "attributes": { + "column": { + "type": "string", + "description": "Column name.", + "description_kind": "plain", + "optional": true + }, + "data_type": { + "type": "string", + "description": "The PostgreSQL data type. Full data types list can be found here:\nhttps://www.postgresql.org/docs/current/datatype.html", "description_kind": "plain", "optional": true }, @@ -87941,6 +92971,14 @@ } }, "block_types": { + "change_tables": { + "nesting_mode": "list", + "block": { + "description": "CDC reader reads from change tables.", + "description_kind": "plain" + }, + "max_items": 1 + }, "exclude_objects": { "nesting_mode": "list", "block": { @@ -88144,6 +93182,14 @@ "description_kind": "plain" }, "max_items": 1 + }, + "transaction_logs": { + "nesting_mode": "list", + "block": { + "description": "CDC reader reads from transaction logs.", + "description_kind": "plain" + }, + "max_items": 1 } }, "description": "SQL Server data source configuration.", @@ -88348,6 +93394,641 @@ "description_kind": "plain" } }, + "google_developer_connect_connection": { + "version": 0, + "block": { + "attributes": { + "annotations": { + "type": [ + "map", + "string" + ], + "description": "Optional. Allows clients to store small amounts of arbitrary data.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "optional": true + }, + "connection_id": { + "type": "string", + "description": "Required. Id of the requesting object\nIf auto-generating Id server-side, remove this field and\nconnection_id from the method_signature of Create RPC", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "Output only. [Output only] Create timestamp", + "description_kind": "plain", + "computed": true + }, + "delete_time": { + "type": "string", + "description": "Output only. [Output only] Delete timestamp", + "description_kind": "plain", + "computed": true + }, + "disabled": { + "type": "bool", + "description": "Optional. If disabled is set to true, functionality is disabled for this connection.\nRepository based API methods and webhooks processing for repositories in\nthis connection will be disabled.", + "description_kind": "plain", + "optional": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "etag": { + "type": "string", + "description": "Optional. This checksum is computed by the server based on the value of other\nfields, and may be sent on update and delete requests to ensure the\nclient has an up-to-date value before proceeding.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "installation_state": { + "type": [ + "list", + [ + "object", + { + "action_uri": "string", + "message": "string", + "stage": "string" + } + ] + ], + "description": "Describes stage and necessary actions to be taken by the\nuser to complete the installation. Used for GitHub and GitHub Enterprise\nbased connections.", + "description_kind": "plain", + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Optional. Labels as key value pairs\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. It identifies the resource within its parent collection as described in https://google.aip.dev/122.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. The resource name of the connection, in the format\n'projects/{project}/locations/{location}/connections/{connection_id}'.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "reconciling": { + "type": "bool", + "description": "Output only. Set to true when the connection is being set up or updated in the\nbackground.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "uid": { + "type": "string", + "description": "Output only. A system-assigned unique identifier for a the GitRepositoryLink.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. [Output only] Update timestamp", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "crypto_key_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "key_reference": { + "type": "string", + "description": "Required. The name of the key which is used to encrypt/decrypt customer data. For key\nin Cloud KMS, the key should be in the format of\n'projects/*/locations/*/keyRings/*/cryptoKeys/*'.", + "description_kind": "plain", + "required": true + } + }, + "description": "The crypto key configuration. This field is used by the Customer-managed\nencryption keys (CMEK) feature.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "github_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "app_installation_id": { + "type": "string", + "description": "Optional. GitHub App installation id.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "github_app": { + "type": "string", + "description": "Required. Immutable. The GitHub Application that was installed to the GitHub user or\norganization.\nPossible values:\nGIT_HUB_APP_UNSPECIFIED\nDEVELOPER_CONNECT\nFIREBASE", + "description_kind": "plain", + "required": true + }, + "installation_uri": { + "type": "string", + "description": "Output only. The URI to navigate to in order to manage the installation associated\nwith this GitHubConfig.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "authorizer_credential": { + "nesting_mode": "list", + "block": { + "attributes": { + "oauth_token_secret_version": { + "type": "string", + "description": "Required. A SecretManager resource containing the OAuth token that authorizes\nthe connection. Format: 'projects/*/secrets/*/versions/*'.", + "description_kind": "plain", + "required": true + }, + "username": { + "type": "string", + "description": "Output only. The username associated with this token.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Represents an OAuth token of the account that authorized the Connection,\nand associated metadata.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Configuration for connections to github.com.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "github_enterprise_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "app_id": { + "type": "string", + "description": "Optional. ID of the GitHub App created from the manifest.", + "description_kind": "plain", + "optional": true + }, + "app_installation_id": { + "type": "string", + "description": "Optional. ID of the installation of the GitHub App.", + "description_kind": "plain", + "optional": true + }, + "app_slug": { + "type": "string", + "description": "Output only. The URL-friendly name of the GitHub App.", + "description_kind": "plain", + "computed": true + }, + "host_uri": { + "type": "string", + "description": "Required. The URI of the GitHub Enterprise host this connection is for.", + "description_kind": "plain", + "required": true + }, + "installation_uri": { + "type": "string", + "description": "Output only. The URI to navigate to in order to manage the installation associated\nwith this GitHubEnterpriseConfig.", + "description_kind": "plain", + "computed": true + }, + "private_key_secret_version": { + "type": "string", + "description": "Optional. SecretManager resource containing the private key of the GitHub App,\nformatted as 'projects/*/secrets/*/versions/*'.", + "description_kind": "plain", + "optional": true + }, + "server_version": { + "type": "string", + "description": "Output only. GitHub Enterprise version installed at the host_uri.", + "description_kind": "plain", + "computed": true + }, + "ssl_ca_certificate": { + "type": "string", + "description": "Optional. SSL certificate to use for requests to GitHub Enterprise.", + "description_kind": "plain", + "optional": true + }, + "webhook_secret_secret_version": { + "type": "string", + "description": "Optional. SecretManager resource containing the webhook secret of the GitHub App,\nformatted as 'projects/*/secrets/*/versions/*'.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "service_directory_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "service": { + "type": "string", + "description": "Required. The Service Directory service name.\nFormat:\nprojects/{project}/locations/{location}/namespaces/{namespace}/services/{service}.", + "description_kind": "plain", + "required": true + } + }, + "description": "ServiceDirectoryConfig represents Service Directory configuration for a\nconnection.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Configuration for connections to an instance of GitHub Enterprise.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "gitlab_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "webhook_secret_secret_version": { + "type": "string", + "description": "Required. Immutable. SecretManager resource containing the webhook secret of a GitLab project,\nformatted as 'projects/*/secrets/*/versions/*'. This is used to validate\nwebhooks.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "authorizer_credential": { + "nesting_mode": "list", + "block": { + "attributes": { + "user_token_secret_version": { + "type": "string", + "description": "Required. A SecretManager resource containing the user token that authorizes\nthe Developer Connect connection. Format:\n'projects/*/secrets/*/versions/*'.", + "description_kind": "plain", + "required": true + }, + "username": { + "type": "string", + "description": "Output only. The username associated with this token.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Represents a personal access token that authorized the Connection,\nand associated metadata.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "read_authorizer_credential": { + "nesting_mode": "list", + "block": { + "attributes": { + "user_token_secret_version": { + "type": "string", + "description": "Required. A SecretManager resource containing the user token that authorizes\nthe Developer Connect connection. Format:\n'projects/*/secrets/*/versions/*'.", + "description_kind": "plain", + "required": true + }, + "username": { + "type": "string", + "description": "Output only. The username associated with this token.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Represents a personal access token that authorized the Connection,\nand associated metadata.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + } + }, + "description": "Configuration for connections to gitlab.com.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "gitlab_enterprise_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "host_uri": { + "type": "string", + "description": "Required. The URI of the GitLab Enterprise host this connection is for.", + "description_kind": "plain", + "required": true + }, + "server_version": { + "type": "string", + "description": "Output only. Version of the GitLab Enterprise server running on the 'host_uri'.", + "description_kind": "plain", + "computed": true + }, + "ssl_ca_certificate": { + "type": "string", + "description": "Optional. SSL Certificate Authority certificate to use for requests to GitLab\nEnterprise instance.", + "description_kind": "plain", + "optional": true + }, + "webhook_secret_secret_version": { + "type": "string", + "description": "Required. Immutable. SecretManager resource containing the webhook secret of a GitLab project,\nformatted as 'projects/*/secrets/*/versions/*'. This is used to validate\nwebhooks.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "authorizer_credential": { + "nesting_mode": "list", + "block": { + "attributes": { + "user_token_secret_version": { + "type": "string", + "description": "Required. A SecretManager resource containing the user token that authorizes\nthe Developer Connect connection. Format:\n'projects/*/secrets/*/versions/*'.", + "description_kind": "plain", + "required": true + }, + "username": { + "type": "string", + "description": "Output only. The username associated with this token.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Represents a personal access token that authorized the Connection,\nand associated metadata.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "read_authorizer_credential": { + "nesting_mode": "list", + "block": { + "attributes": { + "user_token_secret_version": { + "type": "string", + "description": "Required. A SecretManager resource containing the user token that authorizes\nthe Developer Connect connection. Format:\n'projects/*/secrets/*/versions/*'.", + "description_kind": "plain", + "required": true + }, + "username": { + "type": "string", + "description": "Output only. The username associated with this token.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Represents a personal access token that authorized the Connection,\nand associated metadata.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "service_directory_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "service": { + "type": "string", + "description": "Required. The Service Directory service name.\nFormat:\nprojects/{project}/locations/{location}/namespaces/{namespace}/services/{service}.", + "description_kind": "plain", + "required": true + } + }, + "description": "ServiceDirectoryConfig represents Service Directory configuration for a\nconnection.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Configuration for connections to an instance of GitLab Enterprise.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_developer_connect_git_repository_link": { + "version": 0, + "block": { + "attributes": { + "annotations": { + "type": [ + "map", + "string" + ], + "description": "Optional. Allows clients to store small amounts of arbitrary data. \n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "optional": true + }, + "clone_uri": { + "type": "string", + "description": "Required. Git Clone URI.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "Output only. [Output only] Create timestamp", + "description_kind": "plain", + "computed": true + }, + "delete_time": { + "type": "string", + "description": "Output only. [Output only] Delete timestamp", + "description_kind": "plain", + "computed": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "etag": { + "type": "string", + "description": "Optional. This checksum is computed by the server based on the value of other\nfields, and may be sent on update and delete requests to ensure the\nclient has an up-to-date value before proceeding.", + "description_kind": "plain", + "optional": true + }, + "git_repository_link_id": { + "type": "string", + "description": "Required. The ID to use for the repository, which will become the final component of\nthe repository's resource name. This ID should be unique in the connection.\nAllows alphanumeric characters and any of -._~%!$&'()*+,;=@.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Optional. Labels as key value pairs \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type 'developerconnect.googleapis.com/GitRepositoryLink'.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. Resource name of the repository, in the format\n'projects/*/locations/*/connections/*/gitRepositoryLinks/*'.", + "description_kind": "plain", + "computed": true + }, + "parent_connection": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type 'developerconnect.googleapis.com/GitRepositoryLink'.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "reconciling": { + "type": "bool", + "description": "Output only. Set to true when the connection is being set up or updated in the\nbackground.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "uid": { + "type": "string", + "description": "Output only. A system-assigned unique identifier for a the GitRepositoryLink.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. [Output only] Update timestamp", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_dialogflow_agent": { "version": 0, "block": { @@ -88508,6 +94189,7 @@ "type": "bool", "description": "Determines whether this agent should log conversation queries.", "description_kind": "plain", + "deprecated": true, "optional": true }, "id": { @@ -88610,6 +94292,71 @@ "description_kind": "plain" }, "max_items": 1 + }, + "logging_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "enable_consent_based_redaction": { + "type": "bool", + "description": "Enables consent-based end-user input redaction, if true, a pre-defined session parameter **$session.params.conversation-redaction** will be used to determine if the utterance should be redacted.", + "description_kind": "plain", + "optional": true + }, + "enable_interaction_logging": { + "type": "bool", + "description": "Enables DF Interaction logging.", + "description_kind": "plain", + "optional": true + }, + "enable_stackdriver_logging": { + "type": "bool", + "description": "Enables Google Cloud Logging.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels:\n* Agent level", + "description_kind": "plain" + }, + "max_items": 1 + }, + "speech_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "endpointer_sensitivity": { + "type": "number", + "description": "Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100.", + "description_kind": "plain", + "optional": true + }, + "models": { + "type": [ + "map", + "string" + ], + "description": "Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).\nAn object containing a list of **\"key\": value** pairs. Example: **{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }**.", + "description_kind": "plain", + "optional": true + }, + "no_speech_timeout": { + "type": "string", + "description": "Timeout before detecting no speech.\nA duration in seconds with up to nine fractional digits, ending with 's'. Example: \"3.5s\".", + "description_kind": "plain", + "optional": true + }, + "use_timeout_based_endpointing": { + "type": "bool", + "description": "Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Settings for speech to text detection. Exposed at the following levels:\n* Agent level\n* Flow level\n* Page level\n* Parameter level", + "description_kind": "plain" + }, + "max_items": 1 } }, "description": "Hierarchical advanced settings for this agent. The settings exposed at the lower level overrides the settings exposed at the higher level.\nHierarchy: Agent->Flow->Page->Fulfillment/Parameter.", @@ -89046,31 +94793,96 @@ "description_kind": "plain" }, "max_items": 1 - } - }, - "description": "Hierarchical advanced settings for this flow. The settings exposed at the lower level overrides the settings exposed at the higher level.\nHierarchy: Agent->Flow->Page->Fulfillment/Parameter.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "event_handlers": { - "nesting_mode": "list", - "block": { - "attributes": { - "event": { - "type": "string", - "description": "The name of the event to handle.", - "description_kind": "plain", - "optional": true }, - "name": { - "type": "string", - "description": "The unique identifier of this event handler.", - "description_kind": "plain", - "computed": true - }, - "target_flow": { - "type": "string", + "logging_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "enable_consent_based_redaction": { + "type": "bool", + "description": "Enables consent-based end-user input redaction, if true, a pre-defined session parameter **$session.params.conversation-redaction** will be used to determine if the utterance should be redacted.", + "description_kind": "plain", + "optional": true + }, + "enable_interaction_logging": { + "type": "bool", + "description": "Enables DF Interaction logging.", + "description_kind": "plain", + "optional": true + }, + "enable_stackdriver_logging": { + "type": "bool", + "description": "Enables Google Cloud Logging.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels:\n* Agent level", + "description_kind": "plain" + }, + "max_items": 1 + }, + "speech_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "endpointer_sensitivity": { + "type": "number", + "description": "Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100.", + "description_kind": "plain", + "optional": true + }, + "models": { + "type": [ + "map", + "string" + ], + "description": "Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).\nAn object containing a list of **\"key\": value** pairs. Example: **{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }**.", + "description_kind": "plain", + "optional": true + }, + "no_speech_timeout": { + "type": "string", + "description": "Timeout before detecting no speech.\nA duration in seconds with up to nine fractional digits, ending with 's'. Example: \"3.5s\".", + "description_kind": "plain", + "optional": true + }, + "use_timeout_based_endpointing": { + "type": "bool", + "description": "Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Settings for speech to text detection. Exposed at the following levels:\n* Agent level\n* Flow level\n* Page level\n* Parameter level", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Hierarchical advanced settings for this flow. The settings exposed at the lower level overrides the settings exposed at the higher level.\nHierarchy: Agent->Flow->Page->Fulfillment/Parameter.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "event_handlers": { + "nesting_mode": "list", + "block": { + "attributes": { + "event": { + "type": "string", + "description": "The name of the event to handle.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "The unique identifier of this event handler.", + "description_kind": "plain", + "computed": true + }, + "target_flow": { + "type": "string", "description": "The target flow to transition to.\nFormat: projects//locations//agents//flows/.", "description_kind": "plain", "optional": true @@ -92510,6 +98322,14 @@ "chat_engine_config": { "nesting_mode": "list", "block": { + "attributes": { + "dialogflow_agent_to_link": { + "type": "string", + "description": "The resource name of an existing Dialogflow agent to link to this Chat Engine. Format: 'projects//locations//agents/'.\nExactly one of 'agent_creation_config' or 'dialogflow_agent_to_link' must be set.", + "description_kind": "plain", + "optional": true + } + }, "block_types": { "agent_creation_config": { "nesting_mode": "list", @@ -92540,10 +98360,9 @@ "required": true } }, - "description": "The configuration to generate the Dialogflow agent that is associated to this Engine.", + "description": "The configuration to generate the Dialogflow agent that is associated to this Engine.\nExactly one of 'agent_creation_config' or 'dialogflow_agent_to_link' must be set.", "description_kind": "plain" }, - "min_items": 1, "max_items": 1 } }, @@ -92644,7 +98463,7 @@ }, "industry_vertical": { "type": "string", - "description": "The industry vertical that the data store registers. Possible values: [\"GENERIC\", \"MEDIA\"]", + "description": "The industry vertical that the data store registers. Possible values: [\"GENERIC\", \"MEDIA\", \"HEALTHCARE_FHIR\"]", "description_kind": "plain", "required": true }, @@ -92677,7 +98496,7 @@ "list", "string" ], - "description": "The solutions that the data store enrolls. Possible values: [\"SOLUTION_TYPE_RECOMMENDATION\", \"SOLUTION_TYPE_SEARCH\", \"SOLUTION_TYPE_CHAT\"]", + "description": "The solutions that the data store enrolls. Possible values: [\"SOLUTION_TYPE_RECOMMENDATION\", \"SOLUTION_TYPE_SEARCH\", \"SOLUTION_TYPE_CHAT\", \"SOLUTION_TYPE_GENERATIVE_CHAT\"]", "description_kind": "plain", "optional": true } @@ -92695,6 +98514,38 @@ } }, "block_types": { + "chunking_config": { + "nesting_mode": "list", + "block": { + "block_types": { + "layout_based_chunking_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "chunk_size": { + "type": "number", + "description": "The token size limit for each chunk.\nSupported values: 100-500 (inclusive). Default value: 500.", + "description_kind": "plain", + "optional": true + }, + "include_ancestor_headings": { + "type": "bool", + "description": "Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss.\nDefault value: False.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Configuration for the layout based chunking.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Whether chunking mode is enabled.", + "description_kind": "plain" + }, + "max_items": 1 + }, "default_parsing_config": { "nesting_mode": "list", "block": { @@ -92707,6 +98558,14 @@ }, "max_items": 1 }, + "layout_parsing_config": { + "nesting_mode": "list", + "block": { + "description": "Configurations applied to layout parser.", + "description_kind": "plain" + }, + "max_items": 1 + }, "ocr_parsing_config": { "nesting_mode": "list", "block": { @@ -92748,6 +98607,14 @@ }, "max_items": 1 }, + "layout_parsing_config": { + "nesting_mode": "list", + "block": { + "description": "Configurations applied to layout parser.", + "description_kind": "plain" + }, + "max_items": 1 + }, "ocr_parsing_config": { "nesting_mode": "list", "block": { @@ -92917,7 +98784,7 @@ }, "industry_vertical": { "type": "string", - "description": "The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to GENERIC. Vertical on Engine has to match vertical of the DataStore liniked to the engine. Default value: \"GENERIC\" Possible values: [\"GENERIC\", \"MEDIA\"]", + "description": "The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to GENERIC. Vertical on Engine has to match vertical of the DataStore liniked to the engine. Default value: \"GENERIC\" Possible values: [\"GENERIC\", \"MEDIA\", \"HEALTHCARE_FHIR\"]", "description_kind": "plain", "optional": true }, @@ -93016,6 +98883,149 @@ "description_kind": "plain" } }, + "google_discovery_engine_target_site": { + "version": 0, + "block": { + "attributes": { + "data_store_id": { + "type": "string", + "description": "The unique id of the data store.", + "description_kind": "plain", + "required": true + }, + "exact_match": { + "type": "bool", + "description": "If set to false, a uri_pattern is generated to include all pages whose\naddress contains the provided_uri_pattern. If set to true, an uri_pattern\nis generated to try to be an exact match of the provided_uri_pattern or\njust the specific page if the provided_uri_pattern is a specific one.\nprovided_uri_pattern is always normalized to generate the URI pattern to\nbe used by the search engine.", + "description_kind": "plain", + "optional": true + }, + "failure_reason": { + "type": [ + "list", + [ + "object", + { + "quota_failure": [ + "list", + [ + "object", + { + "total_required_quota": "number" + } + ] + ] + } + ] + ], + "description": "Site search indexing failure reasons.", + "description_kind": "plain", + "computed": true + }, + "generated_uri_pattern": { + "type": "string", + "description": "This is system-generated based on the 'provided_uri_pattern'.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "indexing_status": { + "type": "string", + "description": "The indexing status.", + "description_kind": "plain", + "computed": true + }, + "location": { + "type": "string", + "description": "The geographic location where the data store should reside. The value can\nonly be one of \"global\", \"us\" and \"eu\".", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "The unique full resource name of the target site. Values are of the format\n'projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/siteSearchEngine/targetSites/{target_site_id}'.\nThis field must be a UTF-8 encoded string with a length limit of 1024\ncharacters.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "provided_uri_pattern": { + "type": "string", + "description": "The user provided URI pattern from which the 'generated_uri_pattern' is\ngenerated.", + "description_kind": "plain", + "required": true + }, + "root_domain_uri": { + "type": "string", + "description": "Root domain of the 'provided_uri_pattern'.", + "description_kind": "plain", + "computed": true + }, + "site_verification_info": { + "type": [ + "list", + [ + "object", + { + "site_verification_state": "string", + "verify_time": "string" + } + ] + ], + "description": "Site ownership and validity verification status.", + "description_kind": "plain", + "computed": true + }, + "target_site_id": { + "type": "string", + "description": "The unique id of the target site.", + "description_kind": "plain", + "computed": true + }, + "type": { + "type": "string", + "description": "The possible target site types. Possible values: [\"INCLUDE\", \"EXCLUDE\"]", + "description_kind": "plain", + "optional": true + }, + "update_time": { + "type": "string", + "description": "The target site's last updated time.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_dns_managed_zone": { "version": 0, "block": { @@ -93665,6 +99675,12 @@ "description": "Specifies whether to enable fencing for geo queries.", "description_kind": "plain", "optional": true + }, + "health_check": { + "type": "string", + "description": "Specifies the health check.", + "description_kind": "plain", + "optional": true } }, "block_types": { @@ -93691,6 +99707,17 @@ "health_checked_targets": { "nesting_mode": "list", "block": { + "attributes": { + "external_endpoints": { + "type": [ + "list", + "string" + ], + "description": "The Internet IP addresses to be health checked.", + "description_kind": "plain", + "optional": true + } + }, "block_types": { "internal_load_balancers": { "nesting_mode": "list", @@ -93741,8 +99768,7 @@ }, "description": "The list of internal load balancers to health check.", "description_kind": "plain" - }, - "min_items": 1 + } } }, "description": "For A and AAAA types only. The list of targets to be health checked. These can be specified along with `rrdatas` within this item.", @@ -93796,6 +99822,17 @@ "health_checked_targets": { "nesting_mode": "list", "block": { + "attributes": { + "external_endpoints": { + "type": [ + "list", + "string" + ], + "description": "The Internet IP addresses to be health checked.", + "description_kind": "plain", + "optional": true + } + }, "block_types": { "internal_load_balancers": { "nesting_mode": "list", @@ -93846,8 +99883,7 @@ }, "description": "The list of internal load balancers to health check.", "description_kind": "plain" - }, - "min_items": 1 + } } }, "description": "For A and AAAA types only. The list of targets to be health checked. These can be specified along with `rrdatas` within this item.", @@ -93864,6 +99900,17 @@ "primary": { "nesting_mode": "list", "block": { + "attributes": { + "external_endpoints": { + "type": [ + "list", + "string" + ], + "description": "The Internet IP addresses to be health checked.", + "description_kind": "plain", + "optional": true + } + }, "block_types": { "internal_load_balancers": { "nesting_mode": "list", @@ -93914,8 +99961,7 @@ }, "description": "The list of internal load balancers to health check.", "description_kind": "plain" - }, - "min_items": 1 + } } }, "description": "The list of global primary targets to be health checked.", @@ -93953,6 +99999,17 @@ "health_checked_targets": { "nesting_mode": "list", "block": { + "attributes": { + "external_endpoints": { + "type": [ + "list", + "string" + ], + "description": "The Internet IP addresses to be health checked.", + "description_kind": "plain", + "optional": true + } + }, "block_types": { "internal_load_balancers": { "nesting_mode": "list", @@ -94003,8 +100060,7 @@ }, "description": "The list of internal load balancers to health check.", "description_kind": "plain" - }, - "min_items": 1 + } } }, "description": "The list of targets to be health checked. Note that if DNSSEC is enabled for this zone, only one of `rrdatas` or `health_checked_targets` can be set.", @@ -95552,7 +101608,7 @@ } ] ], - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain", "computed": true }, @@ -95699,6 +101755,15 @@ "description_kind": "plain", "optional": true }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, "id": { "type": "string", "description_kind": "plain", @@ -95710,7 +101775,7 @@ "map", "string" ], - "description": "Labels associated with this resource.", + "description": "Labels associated with this resource.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "optional": true }, @@ -95744,6 +101809,15 @@ "optional": true, "computed": true }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, "update_time": { "type": "string", "description": "The time when the subnet was last updated.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine\nfractional digits. Examples: '2014-10-02T15:01:23Z' and '2014-10-02T15:01:23.045123456Z'.", @@ -95771,6 +101845,11 @@ "type": "string", "description_kind": "plain", "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true } }, "description_kind": "plain" @@ -95796,6 +101875,15 @@ "description_kind": "plain", "optional": true }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, "id": { "type": "string", "description_kind": "plain", @@ -95825,7 +101913,7 @@ "map", "string" ], - "description": "Labels associated with this resource.", + "description": "Labels associated with this resource.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "optional": true }, @@ -95865,6 +101953,15 @@ "description_kind": "plain", "required": true }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, "update_time": { "type": "string", "description": "The time when the subnet was last updated.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine\nfractional digits. Examples: '2014-10-02T15:01:23Z' and '2014-10-02T15:01:23.045123456Z'.", @@ -95899,6 +101996,11 @@ "type": "string", "description_kind": "plain", "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true } }, "description_kind": "plain" @@ -97029,115 +103131,15 @@ "description_kind": "plain", "computed": true }, - "terraform_labels": { - "type": [ - "map", - "string" - ], - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", - "description_kind": "plain", - "computed": true - } - }, - "block_types": { - "timeouts": { - "nesting_mode": "single", - "block": { - "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "delete": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "update": { - "type": "string", - "description_kind": "plain", - "optional": true - } - }, - "description_kind": "plain" - } - } - }, - "description_kind": "plain" - } - }, - "google_filestore_instance": { - "version": 1, - "block": { - "attributes": { - "create_time": { - "type": "string", - "description": "Creation timestamp in RFC3339 text format.", - "description_kind": "plain", - "computed": true - }, - "description": { - "type": "string", - "description": "A description of the instance.", - "description_kind": "plain", - "optional": true - }, - "effective_labels": { + "tags": { "type": [ "map", "string" ], - "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", - "description_kind": "plain", - "computed": true - }, - "etag": { - "type": "string", - "description": "Server-specified ETag for the instance resource to prevent\nsimultaneous updates from overwriting each other.", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "kms_key_name": { - "type": "string", - "description": "KMS key name used for data encryption.", + "description": "A map of resource manager tags.\nResource manager tag keys and values have the same definition as resource manager tags.\nKeys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/{tag_value_id}.\nThe field is ignored (both PUT & PATCH) when empty.", "description_kind": "plain", "optional": true }, - "labels": { - "type": [ - "map", - "string" - ], - "description": "Resource labels to represent user-provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", - "description_kind": "plain", - "optional": true - }, - "location": { - "type": "string", - "description": "The name of the location of the instance. This can be a region for ENTERPRISE tier instances.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "name": { - "type": "string", - "description": "The resource name of the instance.", - "description_kind": "plain", - "required": true - }, - "project": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, "terraform_labels": { "type": [ "map", @@ -97146,144 +103148,313 @@ "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", "description_kind": "plain", "computed": true - }, - "tier": { - "type": "string", - "description": "The service tier of the instance.\nPossible values include: STANDARD, PREMIUM, BASIC_HDD, BASIC_SSD, HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE", - "description_kind": "plain", - "required": true - }, - "zone": { - "type": "string", - "description": "The name of the Filestore zone of the instance.", - "description_kind": "plain", - "deprecated": true, - "optional": true, - "computed": true } }, "block_types": { - "file_shares": { - "nesting_mode": "list", - "block": { - "attributes": { - "capacity_gb": { - "type": "number", - "description": "File share capacity in GiB. This must be at least 1024 GiB\nfor the standard tier, or 2560 GiB for the premium tier.", - "description_kind": "plain", - "required": true - }, - "name": { - "type": "string", - "description": "The name of the fileshare (16 characters or less)", - "description_kind": "plain", - "required": true - }, - "source_backup": { - "type": "string", - "description": "The resource name of the backup, in the format\nprojects/{projectId}/locations/{locationId}/backups/{backupId},\nthat this file share has been restored from.", - "description_kind": "plain", - "optional": true - } - }, - "block_types": { - "nfs_export_options": { - "nesting_mode": "list", - "block": { - "attributes": { - "access_mode": { - "type": "string", - "description": "Either READ_ONLY, for allowing only read requests on the exported directory,\nor READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. Default value: \"READ_WRITE\" Possible values: [\"READ_ONLY\", \"READ_WRITE\"]", - "description_kind": "plain", - "optional": true - }, - "anon_gid": { - "type": "number", - "description": "An integer representing the anonymous group id with a default value of 65534.\nAnon_gid may only be set with squashMode of ROOT_SQUASH. An error will be returned\nif this field is specified for other squashMode settings.", - "description_kind": "plain", - "optional": true - }, - "anon_uid": { - "type": "number", - "description": "An integer representing the anonymous user id with a default value of 65534.\nAnon_uid may only be set with squashMode of ROOT_SQUASH. An error will be returned\nif this field is specified for other squashMode settings.", - "description_kind": "plain", - "optional": true - }, - "ip_ranges": { - "type": [ - "list", - "string" - ], - "description": "List of either IPv4 addresses, or ranges in CIDR notation which may mount the file share.\nOverlapping IP ranges are not allowed, both within and across NfsExportOptions. An error will be returned.\nThe limit is 64 IP ranges/addresses for each FileShareConfig among all NfsExportOptions.", - "description_kind": "plain", - "optional": true - }, - "squash_mode": { - "type": "string", - "description": "Either NO_ROOT_SQUASH, for allowing root access on the exported directory, or ROOT_SQUASH,\nfor not allowing root access. The default is NO_ROOT_SQUASH. Default value: \"NO_ROOT_SQUASH\" Possible values: [\"NO_ROOT_SQUASH\", \"ROOT_SQUASH\"]", - "description_kind": "plain", - "optional": true - } - }, - "description": "Nfs Export Options. There is a limit of 10 export options per file share.", - "description_kind": "plain" - }, - "max_items": 10 - } - }, - "description": "File system shares on the instance. For this version, only a\nsingle file share is supported.", - "description_kind": "plain" - }, - "min_items": 1, - "max_items": 1 - }, - "networks": { - "nesting_mode": "list", - "block": { - "attributes": { - "connect_mode": { - "type": "string", - "description": "The network connect mode of the Filestore instance.\nIf not provided, the connect mode defaults to\nDIRECT_PEERING. Default value: \"DIRECT_PEERING\" Possible values: [\"DIRECT_PEERING\", \"PRIVATE_SERVICE_ACCESS\"]", - "description_kind": "plain", - "optional": true - }, - "ip_addresses": { - "type": [ - "list", - "string" - ], - "description": "A list of IPv4 or IPv6 addresses.", - "description_kind": "plain", - "computed": true - }, - "modes": { - "type": [ - "list", - "string" - ], - "description": "IP versions for which the instance has\nIP addresses assigned. Possible values: [\"ADDRESS_MODE_UNSPECIFIED\", \"MODE_IPV4\", \"MODE_IPV6\"]", - "description_kind": "plain", - "required": true - }, - "network": { - "type": "string", - "description": "The name of the GCE VPC network to which the\ninstance is connected.", - "description_kind": "plain", - "required": true - }, - "reserved_ip_range": { - "type": "string", - "description": "A /29 CIDR block that identifies the range of IP\naddresses reserved for this instance.", - "description_kind": "plain", - "optional": true, - "computed": true - } - }, - "description": "VPC networks to which the instance is connected. For this version,\nonly a single network is supported.", - "description_kind": "plain" - }, - "min_items": 1 - }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_filestore_instance": { + "version": 1, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format.", + "description_kind": "plain", + "computed": true + }, + "deletion_protection_enabled": { + "type": "bool", + "description": "Indicates whether the instance is protected against deletion.", + "description_kind": "plain", + "optional": true + }, + "deletion_protection_reason": { + "type": "string", + "description": "The reason for enabling deletion protection.", + "description_kind": "plain", + "optional": true + }, + "description": { + "type": "string", + "description": "A description of the instance.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "etag": { + "type": "string", + "description": "Server-specified ETag for the instance resource to prevent\nsimultaneous updates from overwriting each other.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "kms_key_name": { + "type": "string", + "description": "KMS key name used for data encryption.", + "description_kind": "plain", + "optional": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Resource labels to represent user-provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "The name of the location of the instance. This can be a region for ENTERPRISE tier instances.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "The resource name of the instance.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "protocol": { + "type": "string", + "description": "Either NFSv3, for using NFS version 3 as file sharing protocol,\nor NFSv4.1, for using NFS version 4.1 as file sharing protocol.\nNFSv4.1 can be used with HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE.\nThe default is NFSv3. Default value: \"NFS_V3\" Possible values: [\"NFS_V3\", \"NFS_V4_1\"]", + "description_kind": "plain", + "optional": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "tier": { + "type": "string", + "description": "The service tier of the instance.\nPossible values include: STANDARD, PREMIUM, BASIC_HDD, BASIC_SSD, HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE", + "description_kind": "plain", + "required": true + }, + "zone": { + "type": "string", + "description": "The name of the Filestore zone of the instance.", + "description_kind": "plain", + "deprecated": true, + "optional": true, + "computed": true + } + }, + "block_types": { + "file_shares": { + "nesting_mode": "list", + "block": { + "attributes": { + "capacity_gb": { + "type": "number", + "description": "File share capacity in GiB. This must be at least 1024 GiB\nfor the standard tier, or 2560 GiB for the premium tier.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "The name of the fileshare (16 characters or less)", + "description_kind": "plain", + "required": true + }, + "source_backup": { + "type": "string", + "description": "The resource name of the backup, in the format\nprojects/{projectId}/locations/{locationId}/backups/{backupId},\nthat this file share has been restored from.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "nfs_export_options": { + "nesting_mode": "list", + "block": { + "attributes": { + "access_mode": { + "type": "string", + "description": "Either READ_ONLY, for allowing only read requests on the exported directory,\nor READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. Default value: \"READ_WRITE\" Possible values: [\"READ_ONLY\", \"READ_WRITE\"]", + "description_kind": "plain", + "optional": true + }, + "anon_gid": { + "type": "number", + "description": "An integer representing the anonymous group id with a default value of 65534.\nAnon_gid may only be set with squashMode of ROOT_SQUASH. An error will be returned\nif this field is specified for other squashMode settings.", + "description_kind": "plain", + "optional": true + }, + "anon_uid": { + "type": "number", + "description": "An integer representing the anonymous user id with a default value of 65534.\nAnon_uid may only be set with squashMode of ROOT_SQUASH. An error will be returned\nif this field is specified for other squashMode settings.", + "description_kind": "plain", + "optional": true + }, + "ip_ranges": { + "type": [ + "list", + "string" + ], + "description": "List of either IPv4 addresses, or ranges in CIDR notation which may mount the file share.\nOverlapping IP ranges are not allowed, both within and across NfsExportOptions. An error will be returned.\nThe limit is 64 IP ranges/addresses for each FileShareConfig among all NfsExportOptions.", + "description_kind": "plain", + "optional": true + }, + "squash_mode": { + "type": "string", + "description": "Either NO_ROOT_SQUASH, for allowing root access on the exported directory, or ROOT_SQUASH,\nfor not allowing root access. The default is NO_ROOT_SQUASH. Default value: \"NO_ROOT_SQUASH\" Possible values: [\"NO_ROOT_SQUASH\", \"ROOT_SQUASH\"]", + "description_kind": "plain", + "optional": true + } + }, + "description": "Nfs Export Options. There is a limit of 10 export options per file share.", + "description_kind": "plain" + }, + "max_items": 10 + } + }, + "description": "File system shares on the instance. For this version, only a\nsingle file share is supported.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "networks": { + "nesting_mode": "list", + "block": { + "attributes": { + "connect_mode": { + "type": "string", + "description": "The network connect mode of the Filestore instance.\nIf not provided, the connect mode defaults to\nDIRECT_PEERING. Default value: \"DIRECT_PEERING\" Possible values: [\"DIRECT_PEERING\", \"PRIVATE_SERVICE_ACCESS\"]", + "description_kind": "plain", + "optional": true + }, + "ip_addresses": { + "type": [ + "list", + "string" + ], + "description": "A list of IPv4 or IPv6 addresses.", + "description_kind": "plain", + "computed": true + }, + "modes": { + "type": [ + "list", + "string" + ], + "description": "IP versions for which the instance has\nIP addresses assigned. Possible values: [\"ADDRESS_MODE_UNSPECIFIED\", \"MODE_IPV4\", \"MODE_IPV6\"]", + "description_kind": "plain", + "required": true + }, + "network": { + "type": "string", + "description": "The name of the GCE VPC network to which the\ninstance is connected.", + "description_kind": "plain", + "required": true + }, + "reserved_ip_range": { + "type": "string", + "description": "A /29 CIDR block that identifies the range of IP\naddresses reserved for this instance.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "VPC networks to which the instance is connected. For this version,\nonly a single network is supported.", + "description_kind": "plain" + }, + "min_items": 1 + }, + "performance_config": { + "nesting_mode": "list", + "block": { + "block_types": { + "fixed_iops": { + "nesting_mode": "list", + "block": { + "attributes": { + "max_iops": { + "type": "number", + "description": "The number of IOPS to provision for the instance.\nmax_iops must be in multiple of 1000.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The instance will have a fixed provisioned IOPS value,\nwhich will remain constant regardless of instance\ncapacity.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "iops_per_tb": { + "nesting_mode": "list", + "block": { + "attributes": { + "max_iops_per_tb": { + "type": "number", + "description": "The instance max IOPS will be calculated by multiplying\nthe capacity of the instance (TB) by max_iops_per_tb,\nand rounding to the nearest 1000. The instance max IOPS\nwill be changed dynamically based on the instance\ncapacity.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The instance provisioned IOPS will change dynamically\nbased on the capacity of the instance.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Performance configuration for the instance. If not provided,\nthe default performance settings will be used.", + "description_kind": "plain" + }, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -98317,7 +104488,7 @@ }, "kms_key_name": { "type": "string", - "description": "The resource ID of a Cloud KMS key. If set, the database created will\nbe a Customer-managed Encryption Key (CMEK) database encrypted with\nthis key. This feature is allowlist only in initial launch.\n\nOnly keys in the same location as this database are allowed to be used\nfor encryption. For Firestore's nam5 multi-region, this corresponds to Cloud KMS\nmulti-region us. For Firestore's eur3 multi-region, this corresponds to\nCloud KMS multi-region europe. See https://cloud.google.com/kms/docs/locations.\n\nThis value should be the KMS key resource ID in the format of\n'projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}'.\nHow to retrive this resource ID is listed at\nhttps://cloud.google.com/kms/docs/getting-resource-ids#getting_the_id_for_a_key_and_version.", + "description": "The resource ID of a Cloud KMS key. If set, the database created will\nbe a Customer-managed Encryption Key (CMEK) database encrypted with\nthis key. This feature is allowlist only in initial launch.\n\nOnly keys in the same location as this database are allowed to be used\nfor encryption. For Firestore's nam5 multi-region, this corresponds to Cloud KMS\nmulti-region us. For Firestore's eur3 multi-region, this corresponds to\nCloud KMS multi-region europe. See https://cloud.google.com/kms/docs/locations.\n\nThis value should be the KMS key resource ID in the format of\n'projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}'.\nHow to retrieve this resource ID is listed at\nhttps://cloud.google.com/kms/docs/getting-resource-ids#getting_the_id_for_a_key_and_version.", "description_kind": "plain", "required": true } @@ -98671,7 +104842,7 @@ "description": "The fields supported by this index. The last non-stored field entry is\nalways for the field path '__name__'. If, on creation, '__name__' was not\nspecified as the last field, it will be added automatically with the same\ndirection as that of the last field defined. If the final field in a\ncomposite index is not directional, the '__name__' will be ordered\n'\"ASCENDING\"' (unless explicitly specified otherwise).", "description_kind": "plain" }, - "min_items": 2 + "min_items": 1 }, "timeouts": { "nesting_mode": "single", @@ -98705,6 +104876,12 @@ "description_kind": "plain", "computed": true }, + "deletion_protection": { + "type": "bool", + "description": "When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the instance will fail. When the field is set to false, deleting the instance is allowed.", + "description_kind": "plain", + "optional": true + }, "display_name": { "type": "string", "description": "The folder's display name. A folder's display name must be unique amongst its siblings, e.g. no two folders with the same parent can share the same display name. The display name must start and end with a letter or digit, may contain letters, digits, spaces, hyphens and underscores and can be no longer than 30 characters.", @@ -98740,6 +104917,15 @@ "description": "The resource name of the parent Folder or Organization. Must be of the form folders/{folder_id} or organizations/{org_id}.", "description_kind": "plain", "required": true + }, + "tags": { + "type": [ + "map", + "string" + ], + "description": "A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource.", + "description_kind": "plain", + "optional": true } }, "block_types": { @@ -100682,7 +106868,7 @@ }, "version": { "type": "string", - "description": "Version of ACM installed", + "description": "Version of Config Sync installed", "description_kind": "plain", "optional": true } @@ -100806,7 +106992,7 @@ }, "version": { "type": "string", - "description": "Version of ACM installed", + "description": "Version of Config Sync installed", "description_kind": "plain", "deprecated": true, "optional": true @@ -101079,7 +107265,7 @@ "attributes": { "installation": { "type": "string", - "description": "Configures the manner in which the template library is installed on the cluster. Possible values: [\"INSTALATION_UNSPECIFIED\", \"NOT_INSTALLED\", \"ALL\"]", + "description": "Configures the manner in which the template library is installed on the cluster. Possible values: [\"INSTALLATION_UNSPECIFIED\", \"NOT_INSTALLED\", \"ALL\"]", "description_kind": "plain", "optional": true } @@ -101584,7 +107770,7 @@ }, "metrics_gcp_service_account_email": { "type": "string", - "description": "The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring. The GSA should have the Monitoring Metric Writer(roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA.", + "description": "Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring.", "description_kind": "plain", "optional": true }, @@ -101600,6 +107786,12 @@ "description": "Specifies whether the Config Sync Repo is in \"hierarchical\" or \"unstructured\" mode.", "description_kind": "plain", "optional": true + }, + "stop_syncing": { + "type": "bool", + "description": "Set to true to stop syncing configs for a single cluster. Default: false.", + "description_kind": "plain", + "optional": true } }, "block_types": { @@ -101723,7 +107915,7 @@ }, "enabled": { "type": "bool", - "description": "Whether Hierarchy Controller is enabled in this cluster.", + "description": "**DEPRECATED** Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead.", "description_kind": "plain", "optional": true } @@ -102352,7 +108544,7 @@ "attributes": { "issuer": { "type": "string", - "description": "A JSON Web Token (JWT) issuer URI. 'issuer' must start with 'https://' and // be a valid\nwith length <2000 characters. For example: 'https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster' (must be 'locations' rather than 'zones'). If the cluster is provisioned with Terraform, this is '\"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}\"'.", + "description": "A JSON Web Token (JWT) issuer URI. 'issuer' must start with 'https://' and // be a valid\nwith length <2000 characters. For example: 'https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster'. If the cluster is provisioned with Terraform, this is '\"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}\"'.", "description_kind": "plain", "required": true } @@ -102372,7 +108564,7 @@ "attributes": { "resource_link": { "type": "string", - "description": "Self-link of the GCP resource for the GKE cluster.\nFor example: '//container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster'.\nIt can be at the most 1000 characters in length. If the cluster is provisioned with Terraform,\nthis can be '\"//container.googleapis.com/${google_container_cluster.my-cluster.id}\"' or\n'google_container_cluster.my-cluster.id'.", + "description": "Self-link of the GCP resource for the GKE cluster.\nFor example: '//container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster'.\nIt can be at the most 1000 characters in length. If the cluster is provisioned with Terraform,\nthis can be '\"//container.googleapis.com/${google_container_cluster.my-cluster.id}\"' or\n'google_container_cluster.my-cluster.id'.", "description_kind": "plain", "required": true } @@ -103687,7 +109879,7 @@ "required": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -103778,7 +109970,7 @@ "required": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -104553,7 +110745,7 @@ "required": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -104690,7 +110882,7 @@ "max_items": 1 } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -104796,7 +110988,7 @@ "required": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -106098,43 +112290,50 @@ "type": "string", "description": "Contains the vCenter CA certificate public key for SSL verification.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "cluster": { "type": "string", "description": "The name of the vCenter cluster for the user cluster.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "datacenter": { "type": "string", "description": "The name of the vCenter datacenter for the user cluster.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "datastore": { "type": "string", "description": "The name of the vCenter datastore for the user cluster.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "folder": { "type": "string", "description": "The name of the vCenter folder for the user cluster.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "resource_pool": { "type": "string", "description": "The name of the vCenter resource pool for the user cluster.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "storage_policy_name": { "type": "string", "description": "The name of the vCenter storage policy for the user cluster.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true } }, "description": "VmwareVCenterConfig specifies vCenter config for the user cluster.\nInherited from the admin cluster.", @@ -106311,7 +112510,7 @@ }, "image_type": { "type": "string", - "description": "The OS image to be used for each node in a node pool.\nCurrently 'cos', 'ubuntu', 'ubuntu_containerd' and 'windows' are supported.", + "description": "The OS image to be used for each node in a node pool.\nCurrently 'cos', 'cos_cgv2', 'ubuntu', 'ubuntu_cgv2', 'ubuntu_containerd' and 'windows' are supported.", "description_kind": "plain", "required": true }, @@ -106781,7 +112980,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -107039,7 +113238,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -107334,7 +113533,7 @@ "required": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain", "deprecated": true }, @@ -107706,7 +113905,7 @@ "required": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -107761,7 +113960,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -107945,19 +114144,28 @@ "description_kind": "plain" } }, - "google_iam_access_boundary_policy": { + "google_healthcare_pipeline_job": { "version": 0, "block": { "attributes": { - "display_name": { + "dataset": { "type": "string", - "description": "The display name of the rule.", + "description": "Healthcare Dataset under which the Pipeline Job is to run", + "description_kind": "plain", + "required": true + }, + "disable_lineage": { + "type": "bool", + "description": "If true, disables writing lineage for the pipeline.", "description_kind": "plain", "optional": true }, - "etag": { - "type": "string", - "description": "The hash of the resource. Used internally during updates.", + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", "description_kind": "plain", "computed": true }, @@ -107967,261 +114175,1316 @@ "optional": true, "computed": true }, - "name": { + "labels": { + "type": [ + "map", + "string" + ], + "description": "User-supplied key-value pairs used to organize Pipeline Jobs.\nLabel keys must be between 1 and 63 characters long, have a UTF-8 encoding of\nmaximum 128 bytes, and must conform to the following PCRE regular expression:\n[\\p{Ll}\\p{Lo}][\\p{Ll}\\p{Lo}\\p{N}_-]{0,62}\nLabel values are optional, must be between 1 and 63 characters long, have a\nUTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE\nregular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}\nNo more than 64 labels can be associated with a given pipeline.\nAn object containing a list of \"key\": value pairs.\nExample: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { "type": "string", - "description": "The name of the policy.", + "description": "Location where the Pipeline Job is to run", "description_kind": "plain", "required": true }, - "parent": { + "name": { "type": "string", - "description": "The attachment point is identified by its URL-encoded full resource name.", + "description": "Specifies the name of the pipeline job. This field is user-assigned.", "description_kind": "plain", "required": true + }, + "self_link": { + "type": "string", + "description": "The fully qualified name of this dataset", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true } }, "block_types": { - "rules": { + "backfill_pipeline_job": { "nesting_mode": "list", "block": { "attributes": { - "description": { + "mapping_pipeline_job": { "type": "string", - "description": "The description of the rule.", + "description": "Specifies the mapping pipeline job to backfill, the name format\nshould follow: projects/{projectId}/locations/{locationId}/datasets/{datasetId}/pipelineJobs/{pipelineJobId}.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Specifies the backfill configuration.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "mapping_pipeline_job": { + "nesting_mode": "list", + "block": { + "attributes": { + "fhir_store_destination": { + "type": "string", + "description": "If set, the mapping pipeline will write snapshots to this\nFHIR store without assigning stable IDs. You must\ngrant your pipeline project's Cloud Healthcare Service\nAgent serviceaccount healthcare.fhirResources.executeBundle\nand healthcare.fhirResources.create permissions on the\ndestination store. The destination store must set\n[disableReferentialIntegrity][FhirStore.disable_referential_integrity]\nto true. The destination store must use FHIR version R4.\nFormat: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{fhirStoreID}.", + "description_kind": "plain", + "optional": true + }, + "reconciliation_destination": { + "type": "bool", + "description": "If set to true, a mapping pipeline will send output snapshots\nto the reconciliation pipeline in its dataset. A reconciliation\npipeline must exist in this dataset before a mapping pipeline\nwith a reconciliation destination can be created.", "description_kind": "plain", "optional": true } }, "block_types": { - "access_boundary_rule": { + "fhir_streaming_source": { "nesting_mode": "list", "block": { "attributes": { - "available_permissions": { - "type": [ - "list", - "string" - ], - "description": "A list of permissions that may be allowed for use on the specified resource.", + "description": { + "type": "string", + "description": "Describes the streaming FHIR data source.", "description_kind": "plain", "optional": true }, - "available_resource": { + "fhir_store": { "type": "string", - "description": "The full resource name of a Google Cloud resource entity.", + "description": "The path to the FHIR store in the format projects/{projectId}/locations/{locationId}/datasets/{datasetId}/fhirStores/{fhirStoreId}.", + "description_kind": "plain", + "required": true + } + }, + "description": "A streaming FHIR data source.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "mapping_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "Describes the mapping configuration.", "description_kind": "plain", "optional": true } }, "block_types": { - "availability_condition": { + "whistle_config_source": { "nesting_mode": "list", "block": { "attributes": { - "description": { + "import_uri_prefix": { "type": "string", - "description": "Description of the expression. This is a longer text which describes the expression,\ne.g. when hovered over it in a UI.", - "description_kind": "plain", - "optional": true - }, - "expression": { - "type": "string", - "description": "Textual representation of an expression in Common Expression Language syntax.", + "description": "Directory path where all the Whistle files are located.\nExample: gs://{bucket-id}/{path/to/import-root/dir}", "description_kind": "plain", "required": true }, - "location": { - "type": "string", - "description": "String indicating the location of the expression for error reporting,\ne.g. a file name and a position in the file.", - "description_kind": "plain", - "optional": true - }, - "title": { + "uri": { "type": "string", - "description": "Title for the expression, i.e. a short string describing its purpose.\nThis can be used e.g. in UIs which allow to enter the expression.", + "description": "Main configuration file which has the entrypoint or the root function.\nExample: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.", "description_kind": "plain", - "optional": true + "required": true } }, - "description": "The availability condition further constrains the access allowed by the access boundary rule.", + "description": "Specifies the path to the mapping configuration for harmonization pipeline.", "description_kind": "plain" }, "max_items": 1 } }, - "description": "An access boundary rule in an IAM policy.", + "description": "The location of the mapping configuration.", "description_kind": "plain" }, + "min_items": 1, "max_items": 1 } }, - "description": "Rules to be applied.", + "description": "Specifies mapping configuration.", "description_kind": "plain" }, - "min_items": 1 + "max_items": 1 }, - "timeouts": { - "nesting_mode": "single", + "reconciliation_pipeline_job": { + "nesting_mode": "list", "block": { "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "delete": { + "fhir_store_destination": { "type": "string", + "description": "The harmonized FHIR store to write harmonized FHIR resources to,\nin the format of: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{id}", "description_kind": "plain", "optional": true }, - "update": { + "matching_uri_prefix": { "type": "string", + "description": "Specifies the top level directory of the matching configs used\nin all mapping pipelines, which extract properties for resources\nto be matched on.\nExample: gs://{bucket-id}/{path/to/matching/configs}", "description_kind": "plain", - "optional": true - } - }, - "description_kind": "plain" - } - } - }, - "description_kind": "plain" - } - }, - "google_iam_deny_policy": { - "version": 0, - "block": { - "attributes": { - "display_name": { - "type": "string", - "description": "The display name of the rule.", - "description_kind": "plain", - "optional": true - }, - "etag": { - "type": "string", - "description": "The hash of the resource. Used internally during updates.", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "name": { - "type": "string", - "description": "The name of the policy.", - "description_kind": "plain", - "required": true - }, - "parent": { - "type": "string", - "description": "The attachment point is identified by its URL-encoded full resource name.", - "description_kind": "plain", - "required": true - } - }, - "block_types": { - "rules": { - "nesting_mode": "list", - "block": { - "attributes": { - "description": { - "type": "string", - "description": "The description of the rule.", - "description_kind": "plain", - "optional": true + "required": true } }, "block_types": { - "deny_rule": { + "merge_config": { "nesting_mode": "list", "block": { "attributes": { - "denied_permissions": { - "type": [ - "list", - "string" - ], - "description": "The permissions that are explicitly denied by this rule. Each permission uses the format '{service-fqdn}/{resource}.{verb}',\nwhere '{service-fqdn}' is the fully qualified domain name for the service. For example, 'iam.googleapis.com/roles.list'.", - "description_kind": "plain", - "optional": true - }, - "denied_principals": { - "type": [ - "list", - "string" - ], - "description": "The identities that are prevented from using one or more permissions on Google Cloud resources.", - "description_kind": "plain", - "optional": true - }, - "exception_permissions": { - "type": [ - "list", - "string" - ], - "description": "Specifies the permissions that this rule excludes from the set of denied permissions given by deniedPermissions.\nIf a permission appears in deniedPermissions and in exceptionPermissions then it will not be denied.\nThe excluded permissions can be specified using the same syntax as deniedPermissions.", - "description_kind": "plain", - "optional": true - }, - "exception_principals": { - "type": [ - "list", - "string" - ], - "description": "The identities that are excluded from the deny rule, even if they are listed in the deniedPrincipals.\nFor example, you could add a Google group to the deniedPrincipals, then exclude specific users who belong to that group.", + "description": { + "type": "string", + "description": "Describes the mapping configuration.", "description_kind": "plain", "optional": true } }, "block_types": { - "denial_condition": { + "whistle_config_source": { "nesting_mode": "list", "block": { "attributes": { - "description": { + "import_uri_prefix": { "type": "string", - "description": "Description of the expression. This is a longer text which describes the expression,\ne.g. when hovered over it in a UI.", - "description_kind": "plain", - "optional": true - }, - "expression": { - "type": "string", - "description": "Textual representation of an expression in Common Expression Language syntax.", + "description": "Directory path where all the Whistle files are located.\nExample: gs://{bucket-id}/{path/to/import-root/dir}", "description_kind": "plain", "required": true }, - "location": { - "type": "string", - "description": "String indicating the location of the expression for error reporting,\ne.g. a file name and a position in the file.", - "description_kind": "plain", - "optional": true - }, - "title": { + "uri": { "type": "string", - "description": "Title for the expression, i.e. a short string describing its purpose.\nThis can be used e.g. in UIs which allow to enter the expression.", + "description": "Main configuration file which has the entrypoint or the root function.\nExample: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.", "description_kind": "plain", - "optional": true + "required": true } }, - "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header.", + "description": "Specifies the path to the mapping configuration for harmonization pipeline.", "description_kind": "plain" }, + "min_items": 1, "max_items": 1 } }, - "description": "A deny rule in an IAM deny policy.", + "description": "Specifies the location of the reconciliation configuration.", "description_kind": "plain" }, + "min_items": 1, "max_items": 1 } }, - "description": "Rules to be applied.", + "description": "Specifies reconciliation configuration.", "description_kind": "plain" }, - "min_items": 1 + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_healthcare_workspace": { + "version": 0, + "block": { + "attributes": { + "dataset": { + "type": "string", + "description": "Identifies the dataset addressed by this request. Must be in the format\n'projects/{project}/locations/{location}/datasets/{dataset}'", + "description_kind": "plain", + "required": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "The user labels. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "The name of the workspace, in the format 'projects/{projectId}/locations/{location}/datasets/{datasetId}/dataMapperWorkspaces/{workspaceId}'", + "description_kind": "plain", + "required": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "data_project_ids": { + "type": [ + "list", + "string" + ], + "description": "Project IDs for data projects hosted in a workspace.", + "description_kind": "plain", + "required": true + } + }, + "description": "Settings associated with this workspace.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_iam_access_boundary_policy": { + "version": 0, + "block": { + "attributes": { + "display_name": { + "type": "string", + "description": "The display name of the rule.", + "description_kind": "plain", + "optional": true + }, + "etag": { + "type": "string", + "description": "The hash of the resource. Used internally during updates.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "The name of the policy.", + "description_kind": "plain", + "required": true + }, + "parent": { + "type": "string", + "description": "The attachment point is identified by its URL-encoded full resource name.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "rules": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "The description of the rule.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "access_boundary_rule": { + "nesting_mode": "list", + "block": { + "attributes": { + "available_permissions": { + "type": [ + "list", + "string" + ], + "description": "A list of permissions that may be allowed for use on the specified resource.", + "description_kind": "plain", + "optional": true + }, + "available_resource": { + "type": "string", + "description": "The full resource name of a Google Cloud resource entity.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "availability_condition": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "Description of the expression. This is a longer text which describes the expression,\ne.g. when hovered over it in a UI.", + "description_kind": "plain", + "optional": true + }, + "expression": { + "type": "string", + "description": "Textual representation of an expression in Common Expression Language syntax.", + "description_kind": "plain", + "required": true + }, + "location": { + "type": "string", + "description": "String indicating the location of the expression for error reporting,\ne.g. a file name and a position in the file.", + "description_kind": "plain", + "optional": true + }, + "title": { + "type": "string", + "description": "Title for the expression, i.e. a short string describing its purpose.\nThis can be used e.g. in UIs which allow to enter the expression.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The availability condition further constrains the access allowed by the access boundary rule.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "An access boundary rule in an IAM policy.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Rules to be applied.", + "description_kind": "plain" + }, + "min_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_iam_deny_policy": { + "version": 0, + "block": { + "attributes": { + "display_name": { + "type": "string", + "description": "The display name of the rule.", + "description_kind": "plain", + "optional": true + }, + "etag": { + "type": "string", + "description": "The hash of the resource. Used internally during updates.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "The name of the policy.", + "description_kind": "plain", + "required": true + }, + "parent": { + "type": "string", + "description": "The attachment point is identified by its URL-encoded full resource name.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "rules": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "The description of the rule.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "deny_rule": { + "nesting_mode": "list", + "block": { + "attributes": { + "denied_permissions": { + "type": [ + "list", + "string" + ], + "description": "The permissions that are explicitly denied by this rule. Each permission uses the format '{service-fqdn}/{resource}.{verb}',\nwhere '{service-fqdn}' is the fully qualified domain name for the service. For example, 'iam.googleapis.com/roles.list'.", + "description_kind": "plain", + "optional": true + }, + "denied_principals": { + "type": [ + "list", + "string" + ], + "description": "The identities that are prevented from using one or more permissions on Google Cloud resources.", + "description_kind": "plain", + "optional": true + }, + "exception_permissions": { + "type": [ + "list", + "string" + ], + "description": "Specifies the permissions that this rule excludes from the set of denied permissions given by deniedPermissions.\nIf a permission appears in deniedPermissions and in exceptionPermissions then it will not be denied.\nThe excluded permissions can be specified using the same syntax as deniedPermissions.", + "description_kind": "plain", + "optional": true + }, + "exception_principals": { + "type": [ + "list", + "string" + ], + "description": "The identities that are excluded from the deny rule, even if they are listed in the deniedPrincipals.\nFor example, you could add a Google group to the deniedPrincipals, then exclude specific users who belong to that group.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "denial_condition": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "Description of the expression. This is a longer text which describes the expression,\ne.g. when hovered over it in a UI.", + "description_kind": "plain", + "optional": true + }, + "expression": { + "type": "string", + "description": "Textual representation of an expression in Common Expression Language syntax.", + "description_kind": "plain", + "required": true + }, + "location": { + "type": "string", + "description": "String indicating the location of the expression for error reporting,\ne.g. a file name and a position in the file.", + "description_kind": "plain", + "optional": true + }, + "title": { + "type": "string", + "description": "Title for the expression, i.e. a short string describing its purpose.\nThis can be used e.g. in UIs which allow to enter the expression.", + "description_kind": "plain", + "optional": true + } + }, + "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "A deny rule in an IAM deny policy.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Rules to be applied.", + "description_kind": "plain" + }, + "min_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_iam_folders_policy_binding": { + "version": 0, + "block": { + "attributes": { + "annotations": { + "type": [ + "map", + "string" + ], + "description": "Optional. User defined annotations. See https://google.aip.dev/148#annotations for more details such as format and size limitations\n\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "optional": true + }, + "create_time": { + "type": "string", + "description": "Output only. The time when the policy binding was created.", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "Optional. The description of the policy binding. Must be less than or equal to 63 characters.", + "description_kind": "plain", + "optional": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "etag": { + "type": "string", + "description": "Optional. The etag for the policy binding. If this is provided on update, it must match the server's etag.", + "description_kind": "plain", + "computed": true + }, + "folder": { + "type": "string", + "description": "The parent folder for the PolicyBinding.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "The location of the PolicyBinding.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "The name of the policy binding in the format '{binding_parent/locations/{location}/policyBindings/{policy_binding_id}'", + "description_kind": "plain", + "computed": true + }, + "policy": { + "type": "string", + "description": "Required. Immutable. The resource name of the policy to be bound. The binding parent and policy must belong to the same Organization (or Project).", + "description_kind": "plain", + "required": true + }, + "policy_binding_id": { + "type": "string", + "description": "The Policy Binding ID.", + "description_kind": "plain", + "required": true + }, + "policy_kind": { + "type": "string", + "description": "Immutable. The kind of the policy to attach in this binding. This\nfield must be one of the following: - Left empty (will be automatically set\nto the policy kind) - The input policy kind Possible values: POLICY_KIND_UNSPECIFIED PRINCIPAL_ACCESS_BOUNDARY ACCESS", + "description_kind": "plain", + "optional": true + }, + "policy_uid": { + "type": "string", + "description": "Output only. The globally unique ID of the policy to be bound.", + "description_kind": "plain", + "computed": true + }, + "uid": { + "type": "string", + "description": "Output only. The globally unique ID of the policy binding. Assigned when the policy binding is created.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. The time when the policy binding was most recently updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "condition": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + "description_kind": "plain", + "optional": true + }, + "expression": { + "type": "string", + "description": "Textual representation of an expression in Common Expression Language syntax.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "description_kind": "plain", + "optional": true + }, + "title": { + "type": "string", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Represents a textual expression in the Common Expression Language\n(CEL) syntax. CEL is a C-like expression language. The syntax and semantics of\nCEL are documented at https://github.com/google/cel-spec.\nExample (Comparison):\ntitle: \\\"Summary size limit\\\"\ndescription: \\\"Determines if a summary is less than 100 chars\\\"\nexpression: \\\"document.summary.size() < 100\\\"\nExample\n(Equality):\ntitle: \\\"Requestor is owner\\\"\ndescription: \\\"Determines if requestor is the document owner\\\"\nexpression: \\\"document.owner == request.auth.claims.email\\\" Example\n(Logic):\ntitle: \\\"Public documents\\\"\ndescription: \\\"Determine whether the document should be publicly visible\\\"\nexpression: \\\"document.type != 'private' && document.type != 'internal'\\\"\nExample (Data Manipulation):\ntitle: \\\"Notification string\\\"\ndescription: \\\"Create a notification string with a timestamp.\\\"\nexpression: \\\"'New message received at ' + string(document.create_time)\\\"\nThe exact variables and functions that may be referenced within an expression are\ndetermined by the service that evaluates it. See the service documentation for\nadditional information.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "target": { + "nesting_mode": "list", + "block": { + "attributes": { + "principal_set": { + "type": "string", + "description": "Required. Immutable. The resource name of the policy to be bound.\nThe binding parent and policy must belong to the same Organization (or Project).", + "description_kind": "plain", + "optional": true + } + }, + "description": "Target is the full resource name of the resource to which the policy will be bound. Immutable once set.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_iam_organizations_policy_binding": { + "version": 0, + "block": { + "attributes": { + "annotations": { + "type": [ + "map", + "string" + ], + "description": "Optional. User defined annotations. See https://google.aip.dev/148#annotations for more details such as format and size limitations\n\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "optional": true + }, + "create_time": { + "type": "string", + "description": "Output only. The time when the policy binding was created.", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "Optional. The description of the policy binding. Must be less than or equal to 63 characters.", + "description_kind": "plain", + "optional": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "etag": { + "type": "string", + "description": "Optional. The etag for the policy binding. If this is provided on update, it must match the server's etag.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "The location of the Policy Binding", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "The name of the policy binding in the format '{binding_parent/locations/{location}/policyBindings/{policy_binding_id}'", + "description_kind": "plain", + "computed": true + }, + "organization": { + "type": "string", + "description": "The parent organization of the Policy Binding.", + "description_kind": "plain", + "required": true + }, + "policy": { + "type": "string", + "description": "Required. Immutable. The resource name of the policy to be bound. The binding parent and policy must belong to the same Organization (or Project).", + "description_kind": "plain", + "required": true + }, + "policy_binding_id": { + "type": "string", + "description": "The Policy Binding ID.", + "description_kind": "plain", + "required": true + }, + "policy_kind": { + "type": "string", + "description": "Immutable. The kind of the policy to attach in this binding. This\nfield must be one of the following: - Left empty (will be automatically set\nto the policy kind) - The input policy kind Possible values: POLICY_KIND_UNSPECIFIED PRINCIPAL_ACCESS_BOUNDARY ACCESS", + "description_kind": "plain", + "optional": true + }, + "policy_uid": { + "type": "string", + "description": "Output only. The globally unique ID of the policy to be bound.", + "description_kind": "plain", + "computed": true + }, + "uid": { + "type": "string", + "description": "Output only. The globally unique ID of the policy binding. Assigned when the policy binding is created.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. The time when the policy binding was most recently updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "condition": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + "description_kind": "plain", + "optional": true + }, + "expression": { + "type": "string", + "description": "Textual representation of an expression in Common Expression Language syntax.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "description_kind": "plain", + "optional": true + }, + "title": { + "type": "string", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Represents a textual expression in the Common Expression Language\n(CEL) syntax. CEL is a C-like expression language. The syntax and semantics of\nCEL are documented at https://github.com/google/cel-spec.\nExample (Comparison):\ntitle: \\\"Summary size limit\\\"\ndescription: \\\"Determines if a summary is less than 100 chars\\\"\nexpression: \\\"document.summary.size() < 100\\\"\nExample\n(Equality):\ntitle: \\\"Requestor is owner\\\"\ndescription: \\\"Determines if requestor is the document owner\\\"\nexpression: \\\"document.owner == request.auth.claims.email\\\" Example\n(Logic):\ntitle: \\\"Public documents\\\"\ndescription: \\\"Determine whether the document should be publicly visible\\\"\nexpression: \\\"document.type != 'private' && document.type != 'internal'\\\"\nExample (Data Manipulation):\ntitle: \\\"Notification string\\\"\ndescription: \\\"Create a notification string with a timestamp.\\\"\nexpression: \\\"'New message received at ' + string(document.create_time)\\\"\nThe exact variables and functions that may be referenced within an expression are\ndetermined by the service that evaluates it. See the service documentation for\nadditional information.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "target": { + "nesting_mode": "list", + "block": { + "attributes": { + "principal_set": { + "type": "string", + "description": "Required. Immutable. The resource name of the policy to be bound.\nThe binding parent and policy must belong to the same Organization (or Project).", + "description_kind": "plain", + "optional": true + } + }, + "description": "Target is the full resource name of the resource to which the policy will be bound. Immutable once set.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_iam_principal_access_boundary_policy": { + "version": 0, + "block": { + "attributes": { + "annotations": { + "type": [ + "map", + "string" + ], + "description": "User defined annotations. See https://google.aip.dev/148#annotations\nfor more details such as format and size limitations\n\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "optional": true + }, + "create_time": { + "type": "string", + "description": "Output only. The time when the principal access boundary policy was created.", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "The description of the principal access boundary policy. Must be less than or equal to 63 characters.", + "description_kind": "plain", + "optional": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "etag": { + "type": "string", + "description": "The etag for the principal access boundary. If this is provided on update, it must match the server's etag.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "The location the principal access boundary policy is in.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. The resource name of the principal access boundary policy. The following format is supported:\n 'organizations/{organization_id}/locations/{location}/principalAccessBoundaryPolicies/{policy_id}'", + "description_kind": "plain", + "computed": true + }, + "organization": { + "type": "string", + "description": "The parent organization of the principal access boundary policy.", + "description_kind": "plain", + "required": true + }, + "principal_access_boundary_policy_id": { + "type": "string", + "description": "The ID to use to create the principal access boundary policy.\nThis value must start with a lowercase letter followed by up to 62 lowercase letters, numbers, hyphens, or dots. Pattern, /a-z{2,62}/.", + "description_kind": "plain", + "required": true + }, + "uid": { + "type": "string", + "description": "Output only. The globally unique ID of the principal access boundary policy.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. The time when the principal access boundary policy was most recently updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "details": { + "nesting_mode": "list", + "block": { + "attributes": { + "enforcement_version": { + "type": "string", + "description": "The version number that indicates which Google Cloud services\nare included in the enforcement (e.g. \\\"latest\\\", \\\"1\\\", ...). If empty, the\nPAB policy version will be set to the current latest version, and this version\nwon't get updated when new versions are released.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "rules": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "The description of the principal access boundary policy rule. Must be less than or equal to 256 characters.", + "description_kind": "plain", + "optional": true + }, + "effect": { + "type": "string", + "description": "The access relationship of principals to the resources in this rule.\nPossible values: ALLOW", + "description_kind": "plain", + "required": true + }, + "resources": { + "type": [ + "list", + "string" + ], + "description": "A list of Cloud Resource Manager resources. The resource\nand all the descendants are included. The number of resources in a policy\nis limited to 500 across all rules.\nThe following resource types are supported:\n* Organizations, such as '//cloudresourcemanager.googleapis.com/organizations/123'.\n* Folders, such as '//cloudresourcemanager.googleapis.com/folders/123'.\n* Projects, such as '//cloudresourcemanager.googleapis.com/projects/123'\nor '//cloudresourcemanager.googleapis.com/projects/my-project-id'.", + "description_kind": "plain", + "required": true + } + }, + "description": "A list of principal access boundary policy rules. The number of rules in a policy is limited to 500.", + "description_kind": "plain" + }, + "min_items": 1 + } + }, + "description": "Principal access boundary policy details", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_iam_projects_policy_binding": { + "version": 0, + "block": { + "attributes": { + "annotations": { + "type": [ + "map", + "string" + ], + "description": "Optional. User defined annotations. See https://google.aip.dev/148#annotations for more details such as format and size limitations\n\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "optional": true + }, + "create_time": { + "type": "string", + "description": "Output only. The time when the policy binding was created.", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "Optional. The description of the policy binding. Must be less than or equal to 63 characters.", + "description_kind": "plain", + "optional": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "etag": { + "type": "string", + "description": "Optional. The etag for the policy binding. If this is provided on update, it must match the server's etag.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "The location of the Policy Binding", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "The name of the policy binding in the format '{binding_parent/locations/{location}/policyBindings/{policy_binding_id}'", + "description_kind": "plain", + "computed": true + }, + "policy": { + "type": "string", + "description": "Required. Immutable. The resource name of the policy to be bound. The binding parent and policy must belong to the same Organization (or Project).", + "description_kind": "plain", + "required": true + }, + "policy_binding_id": { + "type": "string", + "description": "The Policy Binding ID.", + "description_kind": "plain", + "required": true + }, + "policy_kind": { + "type": "string", + "description": "Immutable. The kind of the policy to attach in this binding. This\nfield must be one of the following: - Left empty (will be automatically set\nto the policy kind) - The input policy kind Possible values: POLICY_KIND_UNSPECIFIED PRINCIPAL_ACCESS_BOUNDARY ACCESS", + "description_kind": "plain", + "optional": true + }, + "policy_uid": { + "type": "string", + "description": "Output only. The globally unique ID of the policy to be bound.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "uid": { + "type": "string", + "description": "Output only. The globally unique ID of the policy binding. Assigned when the policy binding is created.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. The time when the policy binding was most recently updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "condition": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + "description_kind": "plain", + "optional": true + }, + "expression": { + "type": "string", + "description": "Textual representation of an expression in Common Expression Language syntax.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "description_kind": "plain", + "optional": true + }, + "title": { + "type": "string", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Represents a textual expression in the Common Expression Language\n(CEL) syntax. CEL is a C-like expression language. The syntax and semantics of\nCEL are documented at https://github.com/google/cel-spec.\nExample (Comparison):\ntitle: \\\"Summary size limit\\\"\ndescription: \\\"Determines if a summary is less than 100 chars\\\"\nexpression: \\\"document.summary.size() < 100\\\"\nExample\n(Equality):\ntitle: \\\"Requestor is owner\\\"\ndescription: \\\"Determines if requestor is the document owner\\\"\nexpression: \\\"document.owner == request.auth.claims.email\\\" Example\n(Logic):\ntitle: \\\"Public documents\\\"\ndescription: \\\"Determine whether the document should be publicly visible\\\"\nexpression: \\\"document.type != 'private' && document.type != 'internal'\\\"\nExample (Data Manipulation):\ntitle: \\\"Notification string\\\"\ndescription: \\\"Create a notification string with a timestamp.\\\"\nexpression: \\\"'New message received at ' + string(document.create_time)\\\"\nThe exact variables and functions that may be referenced within an expression are\ndetermined by the service that evaluates it. See the service documentation for\nadditional information.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "target": { + "nesting_mode": "list", + "block": { + "attributes": { + "principal_set": { + "type": "string", + "description": "Required. Immutable. The resource name of the policy to be bound.\nThe binding parent and policy must belong to the same Organization (or Project).", + "description_kind": "plain", + "optional": true + } + }, + "description": "Target is the full resource name of the resource to which the policy will be bound. Immutable once set.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 }, "timeouts": { "nesting_mode": "single", @@ -108908,26 +116171,78 @@ }, "description_kind": "plain" } - } - }, - "description_kind": "plain" - } - }, - "google_iap_app_engine_service_iam_binding": { - "version": 0, - "block": { - "attributes": { - "app_id": { - "type": "string", - "description_kind": "plain", - "required": true - }, - "etag": { - "type": "string", - "description_kind": "plain", - "computed": true }, - "id": { + "x509": { + "nesting_mode": "list", + "block": { + "block_types": { + "trust_store": { + "nesting_mode": "list", + "block": { + "block_types": { + "intermediate_cas": { + "nesting_mode": "list", + "block": { + "attributes": { + "pem_certificate": { + "type": "string", + "description": "PEM certificate of the PKI used for validation. Must only contain one\nca certificate(either root or intermediate cert).", + "description_kind": "plain", + "optional": true + } + }, + "description": "Set of intermediate CA certificates used for building the trust chain to\ntrust anchor.\nIMPORTANT: Intermediate CAs are only supported when configuring x509 federation.", + "description_kind": "plain" + } + }, + "trust_anchors": { + "nesting_mode": "list", + "block": { + "attributes": { + "pem_certificate": { + "type": "string", + "description": "PEM certificate of the PKI used for validation. Must only contain one\nca certificate(either root or intermediate cert).", + "description_kind": "plain", + "optional": true + } + }, + "description": "List of Trust Anchors to be used while performing validation\nagainst a given TrustStore. The incoming end entity's certificate\nmust be chained up to one of the trust anchors here.", + "description_kind": "plain" + }, + "min_items": 1 + } + }, + "description": "A Trust store, use this trust store as a wrapper to config the trust\nanchor and optional intermediate cas to help build the trust chain for\nthe incoming end entity certificate. Follow the x509 guidelines to\ndefine those PEM encoded certs. Only 1 trust store is currently\nsupported.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + } + }, + "description": "An X.509-type identity provider represents a CA. It is trusted to assert a\nclient identity if the client has a certificate that chains up to this CA.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description_kind": "plain" + } + }, + "google_iap_app_engine_service_iam_binding": { + "version": 0, + "block": { + "attributes": { + "app_id": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "etag": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "id": { "type": "string", "description_kind": "plain", "optional": true, @@ -109421,6 +116736,334 @@ "description_kind": "plain" } }, + "google_iap_settings": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "The resource name of the IAP protected resource. Name can have below resources:\n* organizations/{organization_id}\n* folders/{folder_id}\n* projects/{project_id}\n* projects/{project_id}/iap_web\n* projects/{project_id}/iap_web/compute\n* projects/{project_id}/iap_web/compute-{region}\n* projects/{project_id}/iap_web/compute/services/{service_id}\n* projects/{project_id}/iap_web/compute-{region}/services/{service_id}\n* projects/{project_id}/iap_web/appengine-{app_id}\n* projects/{project_id}/iap_web/appengine-{app_id}/services/{service_id}\n* projects/{project_id}/iap_web/appengine-{app_id}/services/{service_id}/version/{version_id}", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "access_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "identity_sources": { + "type": [ + "list", + "string" + ], + "description": "Identity sources that IAP can use to authenticate the end user. Only one identity source\ncan be configured. The possible values are:\n\n* 'WORKFORCE_IDENTITY_FEDERATION': Use external identities set up on Google Cloud Workforce\n \t\t\t\t Identity Federation. Possible values: [\"WORKFORCE_IDENTITY_FEDERATION\"]", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "allowed_domains_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "domains": { + "type": [ + "list", + "string" + ], + "description": "List of trusted domains.", + "description_kind": "plain", + "optional": true + }, + "enable": { + "type": "bool", + "description": "Configuration for customers to opt in for the feature.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Settings to configure and enable allowed domains.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "cors_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "allow_http_options": { + "type": "bool", + "description": "Configuration to allow HTTP OPTIONS calls to skip authorization.\nIf undefined, IAP will not apply any special logic to OPTIONS requests.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Configuration to allow cross-origin requests via IAP.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "gcip_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "login_page_uri": { + "type": "string", + "description": "Login page URI associated with the GCIP tenants. Typically, all resources within\nthe same project share the same login page, though it could be overridden at the\nsub resource level.", + "description_kind": "plain", + "optional": true + }, + "tenant_ids": { + "type": [ + "list", + "string" + ], + "description": "GCIP tenant ids that are linked to the IAP resource. tenantIds could be a string\nbeginning with a number character to indicate authenticating with GCIP tenant flow,\nor in the format of _ to indicate authenticating with GCIP agent flow. If agent flow\nis used, tenantIds should only contain one single element, while for tenant flow,\ntenantIds can contain multiple elements.", + "description_kind": "plain", + "optional": true + } + }, + "description": "GCIP claims and endpoint configurations for 3p identity providers.\n* Enabling gcipSetting significantly changes the way IAP authenticates users. Identity Platform does not support IAM, so IAP will not enforce any IAM policies for requests to your application.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "oauth_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "login_hint": { + "type": "string", + "description": "Domain hint to send as hd=? parameter in OAuth request flow.\nEnables redirect to primary IDP by skipping Google's login screen.\n(https://developers.google.com/identity/protocols/OpenIDConnect#hd-param)\nNote: IAP does not verify that the id token's hd claim matches this value\nsince access behavior is managed by IAM policies.\n* loginHint setting is not a replacement for access control. Always enforce an appropriate access policy if you want to restrict access to users outside your domain.", + "description_kind": "plain", + "optional": true + }, + "programmatic_clients": { + "type": [ + "list", + "string" + ], + "description": "List of client ids allowed to use IAP programmatically.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Settings to configure IAP's OAuth behavior.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "reauth_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "max_age": { + "type": "string", + "description": "Reauth session lifetime, how long before a user has to reauthenticate again.\nA duration in seconds with up to nine fractional digits, ending with 's'.\nExample: \"3.5s\".", + "description_kind": "plain", + "required": true + }, + "method": { + "type": "string", + "description": "Reauth method requested. The possible values are:\n\n* 'LOGIN': Prompts the user to log in again.\n* 'SECURE_KEY': User must use their secure key 2nd factor device.\n* 'ENROLLED_SECOND_FACTORS': User can use any enabled 2nd factor. Possible values: [\"LOGIN\", \"SECURE_KEY\", \"ENROLLED_SECOND_FACTORS\"]", + "description_kind": "plain", + "required": true + }, + "policy_type": { + "type": "string", + "description": "How IAP determines the effective policy in cases of hierarchical policies.\nPolicies are merged from higher in the hierarchy to lower in the hierarchy.\nThe possible values are:\n\n* 'MINIMUM': This policy acts as a minimum to other policies, lower in the hierarchy.\n\t\t Effective policy may only be the same or stricter.\n* 'DEFAULT': This policy acts as a default if no other reauth policy is set. Possible values: [\"MINIMUM\", \"DEFAULT\"]", + "description_kind": "plain", + "required": true + } + }, + "description": "Settings to configure reauthentication policies in IAP.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "workforce_identity_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "workforce_pools": { + "type": [ + "list", + "string" + ], + "description": "The workforce pool resources. Only one workforce pool is accepted.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "oauth2": { + "nesting_mode": "list", + "block": { + "attributes": { + "client_id": { + "type": "string", + "description": "The OAuth 2.0 client ID registered in the workforce identity\nfederation OAuth 2.0 Server.", + "description_kind": "plain", + "optional": true + }, + "client_secret": { + "type": "string", + "description": "Input only. The OAuth 2.0 client secret created while registering\nthe client ID.", + "description_kind": "plain", + "optional": true, + "sensitive": true + }, + "client_secret_sha256": { + "type": "string", + "description": "Output only. SHA256 hash value for the client secret. This field\nis returned by IAP when the settings are retrieved.", + "description_kind": "plain", + "computed": true + } + }, + "description": "OAuth 2.0 settings for IAP to perform OIDC flow with workforce identity\nfederation services.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Settings to configure the workforce identity federation, including workforce pools\nand OAuth 2.0 settings.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Top level wrapper for all access related setting in IAP.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "application_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "cookie_domain": { + "type": "string", + "description": "The Domain value to set for cookies generated by IAP. This value is not validated by the API,\nbut will be ignored at runtime if invalid.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "access_denied_page_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "access_denied_page_uri": { + "type": "string", + "description": "The URI to be redirected to when access is denied.", + "description_kind": "plain", + "optional": true + }, + "generate_troubleshooting_uri": { + "type": "bool", + "description": "Whether to generate a troubleshooting URL on access denied events to this application.", + "description_kind": "plain", + "optional": true + }, + "remediation_token_generation_enabled": { + "type": "bool", + "description": "Whether to generate remediation token on access denied events to this application.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Customization for Access Denied page. IAP allows customers to define a custom URI\nto use as the error page when access is denied to users. If IAP prevents access\nto this page, the default IAP error page will be displayed instead.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "attribute_propagation_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "enable": { + "type": "bool", + "description": "Whether the provided attribute propagation settings should be evaluated on user requests.\nIf set to true, attributes returned from the expression will be propagated in the set output credentials.", + "description_kind": "plain", + "optional": true + }, + "expression": { + "type": "string", + "description": "Raw string CEL expression. Must return a list of attributes. A maximum of 45 attributes can\nbe selected. Expressions can select different attribute types from attributes:\nattributes.saml_attributes, attributes.iap_attributes.", + "description_kind": "plain", + "optional": true + }, + "output_credentials": { + "type": [ + "list", + "string" + ], + "description": "Which output credentials attributes selected by the CEL expression should be propagated in.\nAll attributes will be fully duplicated in each selected output credential.\nPossible values are:\n\n* 'HEADER': Propagate attributes in the headers with \"x-goog-iap-attr-\" prefix.\n* 'JWT': Propagate attributes in the JWT of the form:\n \"additional_claims\": { \"my_attribute\": [\"value1\", \"value2\"] }\n* 'RCTOKEN': Propagate attributes in the RCToken of the form: \"\n additional_claims\": { \"my_attribute\": [\"value1\", \"value2\"] } Possible values: [\"HEADER\", \"JWT\", \"RCTOKEN\"]", + "description_kind": "plain", + "optional": true + } + }, + "description": "Settings to configure attribute propagation.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "csm_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "rctoken_aud": { + "type": "string", + "description": "Audience claim set in the generated RCToken. This value is not validated by IAP.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Settings to configure IAP's behavior for a service mesh.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Top level wrapper for all application related settings in IAP.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_iap_tunnel_dest_group": { "version": 0, "block": { @@ -111144,7 +118787,7 @@ "attributes": { "quota": { "type": "number", - "description": "A sign up APIs quota that customers can override temporarily.", + "description": "A sign up APIs quota that customers can override temporarily. Value can be in between 1 and 1000.", "description_kind": "plain", "optional": true }, @@ -111161,7 +118804,7 @@ "optional": true } }, - "description": "Quota for the Signup endpoint, if overwritten. Signup quota is measured in sign ups per project per hour per IP.", + "description": "Quota for the Signup endpoint, if overwritten. Signup quota is measured in sign ups per project per hour per IP. None of quota, startTime, or quotaDuration can be skipped.", "description_kind": "plain" }, "max_items": 1 @@ -111649,157 +119292,6 @@ "description_kind": "plain" } }, - "google_identity_platform_project_default_config": { - "version": 0, - "block": { - "attributes": { - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "name": { - "type": "string", - "description": "The name of the Config resource. Example: \"projects/my-awesome-project/config\"", - "description_kind": "plain", - "computed": true - }, - "project": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - } - }, - "block_types": { - "sign_in": { - "nesting_mode": "list", - "block": { - "attributes": { - "allow_duplicate_emails": { - "type": "bool", - "description": "Whether to allow more than one account to have the same email.", - "description_kind": "plain", - "optional": true - }, - "hash_config": { - "type": [ - "list", - [ - "object", - { - "algorithm": "string", - "memory_cost": "number", - "rounds": "number", - "salt_separator": "string", - "signer_key": "string" - } - ] - ], - "description": "Output only. Hash config information.", - "description_kind": "plain", - "computed": true - } - }, - "block_types": { - "anonymous": { - "nesting_mode": "list", - "block": { - "attributes": { - "enabled": { - "type": "bool", - "description": "Whether anonymous user auth is enabled for the project or not.", - "description_kind": "plain", - "required": true - } - }, - "description": "Configuration options related to authenticating an anonymous user.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "email": { - "nesting_mode": "list", - "block": { - "attributes": { - "enabled": { - "type": "bool", - "description": "Whether email auth is enabled for the project or not.", - "description_kind": "plain", - "optional": true - }, - "password_required": { - "type": "bool", - "description": "Whether a password is required for email auth or not. If true, both an email and\npassword must be provided to sign in. If false, a user may sign in via either\nemail/password or email link.", - "description_kind": "plain", - "optional": true - } - }, - "description": "Configuration options related to authenticating a user by their email address.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "phone_number": { - "nesting_mode": "list", - "block": { - "attributes": { - "enabled": { - "type": "bool", - "description": "Whether phone number auth is enabled for the project or not.", - "description_kind": "plain", - "optional": true - }, - "test_phone_numbers": { - "type": [ - "map", - "string" - ], - "description": "A map of that can be used for phone auth testing.", - "description_kind": "plain", - "optional": true - } - }, - "description": "Configuration options related to authenticated a user by their phone number.", - "description_kind": "plain" - }, - "max_items": 1 - } - }, - "description": "Configuration related to local sign in methods.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "timeouts": { - "nesting_mode": "single", - "block": { - "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "delete": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "update": { - "type": "string", - "description_kind": "plain", - "optional": true - } - }, - "description_kind": "plain" - } - } - }, - "description_kind": "plain", - "deprecated": true - } - }, "google_identity_platform_tenant": { "version": 0, "block": { @@ -112240,7 +119732,7 @@ }, "description": { "type": "string", - "description": "An arbitrary description for the Conection.", + "description": "An arbitrary description for the Connection.", "description_kind": "plain", "optional": true }, @@ -112386,7 +119878,7 @@ }, "auth_type": { "type": "string", - "description": "authType of the Connection Possible values: [\"USER_PASSWORD\", \"OAUTH2_JWT_BEARER\", \"OAUTH2_CLIENT_CREDENTIALS\", \"SSH_PUBLIC_KEY\", \"OAUTH2_AUTH_CODE_FLOW\"]", + "description": "authType of the Connection Possible values: [\"AUTH_TYPE_UNSPECIFIED\", \"USER_PASSWORD\", \"OAUTH2_JWT_BEARER\", \"OAUTH2_CLIENT_CREDENTIALS\", \"SSH_PUBLIC_KEY\", \"OAUTH2_AUTH_CODE_FLOW\"]", "description_kind": "plain", "required": true } @@ -112434,12 +119926,12 @@ }, "type": { "type": "string", - "description": "Type of Encription Key Possible values: [\"GOOGLE_MANAGED\", \"CUSTOMER_MANAGED\"]", + "description": "Type of Encryption Key Possible values: [\"GOOGLE_MANAGED\", \"CUSTOMER_MANAGED\"]", "description_kind": "plain", "required": true } }, - "description": "Encription key value of configVariable.", + "description": "Encryption key value of configVariable.", "description_kind": "plain" }, "max_items": 1 @@ -112746,12 +120238,12 @@ }, "type": { "type": "string", - "description": "Type of Encription Key Possible values: [\"GOOGLE_MANAGED\", \"CUSTOMER_MANAGED\"]", + "description": "Type of Encryption Key Possible values: [\"GOOGLE_MANAGED\", \"CUSTOMER_MANAGED\"]", "description_kind": "plain", "required": true } }, - "description": "Encription key value of configVariable.", + "description": "Encryption key value of configVariable.", "description_kind": "plain" }, "max_items": 1 @@ -112880,7 +120372,7 @@ "optional": true } }, - "description": "Encription key value of configVariable.", + "description": "Encryption key value of configVariable.", "description_kind": "plain" }, "max_items": 1 @@ -112966,12 +120458,12 @@ }, "type": { "type": "string", - "description": "Type of Encription Key Possible values: [\"GOOGLE_MANAGED\", \"CUSTOMER_MANAGED\"]", + "description": "Type of Encryption Key Possible values: [\"GOOGLE_MANAGED\", \"CUSTOMER_MANAGED\"]", "description_kind": "plain", "optional": true } }, - "description": "Encription key value of configVariable", + "description": "Encryption key value of configVariable", "description_kind": "plain" }, "max_items": 1 @@ -113230,12 +120722,12 @@ }, "type": { "type": "string", - "description": "Type of Encription Key Possible values: [\"GOOGLE_MANAGED\", \"CUSTOMER_MANAGED\"]", + "description": "Type of Encryption Key Possible values: [\"GOOGLE_MANAGED\", \"CUSTOMER_MANAGED\"]", "description_kind": "plain", "optional": true } }, - "description": "Encription key value of configVariable", + "description": "Encryption key value of configVariable", "description_kind": "plain" }, "max_items": 1 @@ -114085,13 +121577,6 @@ "description_kind": "plain", "optional": true }, - "create_sample_workflows": { - "type": "bool", - "description": "Indicates if sample workflow should be created along with provisioning.", - "description_kind": "plain", - "deprecated": true, - "optional": true - }, "id": { "type": "string", "description_kind": "plain", @@ -114110,13 +121595,6 @@ "optional": true, "computed": true }, - "provision_gmek": { - "type": "bool", - "description": "Indicates provision with GMEK or CMEK.", - "description_kind": "plain", - "deprecated": true, - "optional": true - }, "run_as_service_account": { "type": "string", "description": "User input run-as service account, if empty, will bring up a new default service account.", @@ -116100,6 +123578,92 @@ "description_kind": "plain" } }, + "google_logging_log_scope": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "Output only. The creation timestamp of the log scopes.", + "description_kind": "plain", + "computed": true + }, + "description": { + "type": "string", + "description": "Describes this log scopes.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "The location of the resource. The only supported location is global so far.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "The resource name of the log scope. For example: \\'projects/my-project/locations/global/logScopes/my-log-scope\\'", + "description_kind": "plain", + "required": true + }, + "parent": { + "type": "string", + "description": "The parent of the resource.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "resource_names": { + "type": [ + "list", + "string" + ], + "description": "Names of one or more parent resources : * \\'projects/[PROJECT_ID]\\' May alternatively be one or more views : * \\'projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]\\' A log scope can include a maximum of 50 projects and a maximum of 100 resources in total.", + "description_kind": "plain", + "required": true + }, + "update_time": { + "type": "string", + "description": "Output only. The last update timestamp of the log scopes.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_logging_log_view": { "version": 0, "block": { @@ -117295,12 +124859,24 @@ "description_kind": "plain", "computed": true }, + "deletion_policy": { + "type": "string", + "description": "Policy to determine if the cluster should be deleted forcefully.\nIf setting deletion_policy = \"FORCE\", the Looker instance will be deleted regardless\nof its nested resources. If set to \"DEFAULT\", Looker instances that still have\nnested resources will return an error. Possible values: DEFAULT, FORCE", + "description_kind": "plain", + "optional": true + }, "egress_public_ip": { "type": "string", "description": "Public Egress IP (IPv4).", "description_kind": "plain", "computed": true }, + "fips_enabled": { + "type": "bool", + "description": "FIPS 140-2 Encryption enablement for Looker (Google Cloud Core).", + "description_kind": "plain", + "optional": true + }, "id": { "type": "string", "description_kind": "plain", @@ -117339,7 +124915,7 @@ }, "platform_edition": { "type": "string", - "description": "Platform editions for a Looker instance. Each edition maps to a set of instance features, like its size. Must be one of these values:\n- LOOKER_CORE_TRIAL: trial instance (Currently Unavailable)\n- LOOKER_CORE_STANDARD: pay as you go standard instance (Currently Unavailable)\n- LOOKER_CORE_STANDARD_ANNUAL: subscription standard instance\n- LOOKER_CORE_ENTERPRISE_ANNUAL: subscription enterprise instance\n- LOOKER_CORE_EMBED_ANNUAL: subscription embed instance Default value: \"LOOKER_CORE_TRIAL\" Possible values: [\"LOOKER_CORE_TRIAL\", \"LOOKER_CORE_STANDARD\", \"LOOKER_CORE_STANDARD_ANNUAL\", \"LOOKER_CORE_ENTERPRISE_ANNUAL\", \"LOOKER_CORE_EMBED_ANNUAL\"]", + "description": "Platform editions for a Looker instance. Each edition maps to a set of instance features, like its size. Must be one of these values:\n- LOOKER_CORE_TRIAL: trial instance (Currently Unavailable)\n- LOOKER_CORE_STANDARD: pay as you go standard instance (Currently Unavailable)\n- LOOKER_CORE_STANDARD_ANNUAL: subscription standard instance\n- LOOKER_CORE_ENTERPRISE_ANNUAL: subscription enterprise instance\n- LOOKER_CORE_EMBED_ANNUAL: subscription embed instance\n- LOOKER_CORE_NONPROD_STANDARD_ANNUAL: nonprod subscription standard instance\n- LOOKER_CORE_NONPROD_ENTERPRISE_ANNUAL: nonprod subscription enterprise instance\n- LOOKER_CORE_NONPROD_EMBED_ANNUAL: nonprod subscription embed instance Default value: \"LOOKER_CORE_TRIAL\" Possible values: [\"LOOKER_CORE_TRIAL\", \"LOOKER_CORE_STANDARD\", \"LOOKER_CORE_STANDARD_ANNUAL\", \"LOOKER_CORE_ENTERPRISE_ANNUAL\", \"LOOKER_CORE_EMBED_ANNUAL\", \"LOOKER_CORE_NONPROD_STANDARD_ANNUAL\", \"LOOKER_CORE_NONPROD_ENTERPRISE_ANNUAL\", \"LOOKER_CORE_NONPROD_EMBED_ANNUAL\"]", "description_kind": "plain", "optional": true }, @@ -117355,6 +124931,12 @@ "optional": true, "computed": true }, + "psc_enabled": { + "type": "bool", + "description": "Whether Public Service Connect (PSC) is enabled on the Looker instance", + "description_kind": "plain", + "optional": true + }, "public_ip_enabled": { "type": "bool", "description": "Whether public IP is enabled on the Looker instance.", @@ -117627,6 +125209,61 @@ "description": "Looker Instance OAuth login settings.", "description_kind": "plain" }, + "min_items": 1, + "max_items": 1 + }, + "psc_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "allowed_vpcs": { + "type": [ + "list", + "string" + ], + "description": "List of VPCs that are allowed ingress into the Looker instance.", + "description_kind": "plain", + "optional": true + }, + "looker_service_attachment_uri": { + "type": "string", + "description": "URI of the Looker service attachment.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "service_attachments": { + "nesting_mode": "list", + "block": { + "attributes": { + "connection_status": { + "type": "string", + "description": "Status of the service attachment connection.", + "description_kind": "plain", + "computed": true + }, + "local_fqdn": { + "type": "string", + "description": "Fully qualified domain name that will be used in the private DNS record created for the service attachment.", + "description_kind": "plain", + "optional": true + }, + "target_service_attachment_uri": { + "type": "string", + "description": "URI of the service attachment to connect to.", + "description_kind": "plain", + "optional": true + } + }, + "description": "List of egress service attachment configurations.", + "description_kind": "plain" + } + } + }, + "description": "Information for Private Service Connect (PSC) setup for a Looker instance.", + "description_kind": "plain" + }, "max_items": 1 }, "timeouts": { @@ -117684,6 +125321,289 @@ "description_kind": "plain" } }, + "google_managed_kafka_cluster": { + "version": 0, + "block": { + "attributes": { + "cluster_id": { + "type": "string", + "description": "The ID to use for the cluster, which will become the final component of the cluster's name. The ID must be 1-63 characters long, and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' to comply with RFC 1035. This value is structured like: 'my-cluster-id'.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The time when the cluster was created.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "List of label KEY=VALUE pairs to add. Keys must start with a lowercase character and contain only hyphens (-), underscores ( ), lowercase characters, and numbers. Values must contain only hyphens (-), underscores ( ), lowercase characters, and numbers.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "ID of the location of the Kafka resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "The name of the cluster. Structured like: 'projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID'.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "state": { + "type": "string", + "description": "The current state of the cluster. Possible values: 'STATE_UNSPECIFIED', 'CREATING', 'ACTIVE', 'DELETING'.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The time when the cluster was last updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "capacity_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "memory_bytes": { + "type": "string", + "description": "The memory to provision for the cluster in bytes. The value must be between 1 GiB and 8 GiB per vCPU. Ex. 1024Mi, 4Gi.", + "description_kind": "plain", + "required": true + }, + "vcpu_count": { + "type": "string", + "description": "The number of vCPUs to provision for the cluster. The minimum is 3.", + "description_kind": "plain", + "required": true + } + }, + "description": "A capacity configuration of a Kafka cluster.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "gcp_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "kms_key": { + "type": "string", + "description": "The Cloud KMS Key name to use for encryption. The key must be located in the same region as the cluster and cannot be changed. Must be in the format 'projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY'.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "access_config": { + "nesting_mode": "list", + "block": { + "block_types": { + "network_configs": { + "nesting_mode": "list", + "block": { + "attributes": { + "subnet": { + "type": "string", + "description": "Name of the VPC subnet from which the cluster is accessible. Both broker and bootstrap server IP addresses and DNS entries are automatically created in the subnet. The subnet must be located in the same region as the cluster. The project may differ. The name of the subnet must be in the format 'projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET'.", + "description_kind": "plain", + "required": true + } + }, + "description": "Virtual Private Cloud (VPC) subnets where IP addresses for the Kafka cluster are allocated. To make the cluster available in a VPC, you must specify at least one subnet per network. You must specify between 1 and 10 subnets. Additional subnets may be specified with additional 'network_configs' blocks.", + "description_kind": "plain" + }, + "min_items": 1 + } + }, + "description": "The configuration of access to the Kafka cluster.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + } + }, + "description": "Configuration properties for a Kafka cluster deployed to Google Cloud Platform.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "rebalance_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "mode": { + "type": "string", + "description": "The rebalance behavior for the cluster. When not specified, defaults to 'NO_REBALANCE'. Possible values: 'MODE_UNSPECIFIED', 'NO_REBALANCE', 'AUTO_REBALANCE_ON_SCALE_UP'.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Defines rebalancing behavior of a Kafka cluster.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_managed_kafka_topic": { + "version": 0, + "block": { + "attributes": { + "cluster": { + "type": "string", + "description": "The cluster name.", + "description_kind": "plain", + "required": true + }, + "configs": { + "type": [ + "map", + "string" + ], + "description": "Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: 'cleanup.policy=compact', 'compression.type=producer'.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "ID of the location of the Kafka resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "The name of the topic. The 'topic' segment is used when connecting directly to the cluster. Must be in the format 'projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID'.", + "description_kind": "plain", + "computed": true + }, + "partition_count": { + "type": "number", + "description": "The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.", + "description_kind": "plain", + "optional": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "replication_factor": { + "type": "number", + "description": "The number of replicas of each partition. A replication factor of 3 is recommended for high availability.", + "description_kind": "plain", + "required": true + }, + "topic_id": { + "type": "string", + "description": "The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: 'my-topic-name'.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_memcache_instance": { "version": 0, "block": { @@ -118003,6 +125923,375 @@ "description_kind": "plain" } }, + "google_memorystore_instance": { + "version": 0, + "block": { + "attributes": { + "authorization_mode": { + "type": "string", + "description": "Optional. Immutable. Authorization mode of the instance. Possible values:\n AUTH_DISABLED\nIAM_AUTH", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "create_time": { + "type": "string", + "description": "Output only. Creation timestamp of the instance.", + "description_kind": "plain", + "computed": true + }, + "deletion_protection_enabled": { + "type": "bool", + "description": "Optional. If set to true deletion of the instance will fail.", + "description_kind": "plain", + "optional": true + }, + "discovery_endpoints": { + "type": [ + "list", + [ + "object", + { + "address": "string", + "network": "string", + "port": "number" + } + ] + ], + "description": "Output only. Endpoints clients can connect to the instance through. Currently only one\ndiscovery endpoint is supported.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "endpoints": { + "type": [ + "list", + [ + "list", + "string" + ] + ], + "description": "Endpoints for the instance.", + "description_kind": "plain", + "computed": true + }, + "engine_configs": { + "type": [ + "map", + "string" + ], + "description": "Optional. User-provided engine configurations for the instance.", + "description_kind": "plain", + "optional": true + }, + "engine_version": { + "type": "string", + "description": "Optional. Immutable. Engine version of the instance.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "instance_id": { + "type": "string", + "description": "Required. The ID to use for the instance, which will become the final component of\nthe instance's resource name.\n\nThis value is subject to the following restrictions:\n\n* Must be 4-63 characters in length\n* Must begin with a letter or digit\n* Must contain only lowercase letters, digits, and hyphens\n* Must not end with a hyphen\n* Must be unique within a location", + "description_kind": "plain", + "required": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Optional. Labels to represent user-provided metadata. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. It identifies the resource within its parent collection as described in https://google.aip.dev/122. See documentation for resource type 'memorystore.googleapis.com/CertificateAuthority'.", + "description_kind": "plain", + "required": true + }, + "mode": { + "type": "string", + "description": "Optional. Standalone or cluster. \n Possible values:\n CLUSTER\nSTANDALONE Possible values: [\"CLUSTER\", \"STANDALONE\"]", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "Identifier. Unique name of the instance.\nFormat: projects/{project}/locations/{location}/instances/{instance}", + "description_kind": "plain", + "computed": true + }, + "node_config": { + "type": [ + "list", + [ + "object", + { + "size_gb": "number" + } + ] + ], + "description": "Represents configuration for nodes of the instance.", + "description_kind": "plain", + "computed": true + }, + "node_type": { + "type": "string", + "description": "Optional. Immutable. Machine type for individual nodes of the instance. \n Possible values:\n SHARED_CORE_NANO\nHIGHMEM_MEDIUM\nHIGHMEM_XLARGE\nSTANDARD_SMALL", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "psc_auto_connections": { + "type": [ + "list", + [ + "object", + { + "connection_type": "string", + "forwarding_rule": "string", + "ip_address": "string", + "network": "string", + "port": "number", + "project_id": "string", + "psc_connection_id": "string", + "psc_connection_status": "string", + "service_attachment": "string" + } + ] + ], + "description": "Output only. User inputs and resource details of the auto-created PSC connections.", + "description_kind": "plain", + "computed": true + }, + "replica_count": { + "type": "number", + "description": "Optional. Number of replica nodes per shard. If omitted the default is 0 replicas.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "shard_count": { + "type": "number", + "description": "Required. Number of shards for the instance.", + "description_kind": "plain", + "required": true + }, + "state": { + "type": "string", + "description": "Output only. Current state of the instance. \n Possible values:\n CREATING\nACTIVE\nUPDATING\nDELETING", + "description_kind": "plain", + "computed": true + }, + "state_info": { + "type": [ + "list", + [ + "object", + { + "update_info": [ + "list", + [ + "object", + { + "target_replica_count": "number", + "target_shard_count": "number" + } + ] + ] + } + ] + ], + "description": "Additional information about the state of the instance.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "transit_encryption_mode": { + "type": "string", + "description": "Optional. Immutable. In-transit encryption mode of the instance. \n Possible values:\n TRANSIT_ENCRYPTION_DISABLED\nSERVER_AUTHENTICATION", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "uid": { + "type": "string", + "description": "Output only. System assigned, unique identifier for the instance.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. Latest update timestamp of the instance.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "desired_psc_auto_connections": { + "nesting_mode": "list", + "block": { + "attributes": { + "network": { + "type": "string", + "description": "Required. The consumer network where the IP address resides, in the form of\nprojects/{project_id}/global/networks/{network_id}.", + "description_kind": "plain", + "required": true + }, + "project_id": { + "type": "string", + "description": "Required. The consumer project_id where the forwarding rule is created from.", + "description_kind": "plain", + "required": true + } + }, + "description": "Required. Immutable. User inputs for the auto-created PSC connections.", + "description_kind": "plain" + }, + "min_items": 1 + }, + "persistence_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "mode": { + "type": "string", + "description": "Optional. Current persistence mode. \n Possible values:\nDISABLED\nRDB\nAOF Possible values: [\"DISABLED\", \"RDB\", \"AOF\"]", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "aof_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "append_fsync": { + "type": "string", + "description": "Optional. The fsync mode. \n Possible values:\n NEVER\nEVERY_SEC\nALWAYS", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Configuration for AOF based persistence.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "rdb_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "rdb_snapshot_period": { + "type": "string", + "description": "Optional. Period between RDB snapshots. \n Possible values:\n ONE_HOUR\nSIX_HOURS\nTWELVE_HOURS\nTWENTY_FOUR_HOURS", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "rdb_snapshot_start_time": { + "type": "string", + "description": "Optional. Time that the first snapshot was/will be attempted, and to which future\nsnapshots will be aligned. If not provided, the current time will be\nused.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Configuration for RDB based persistence.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Represents persistence configuration for a instance.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + }, + "zone_distribution_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "mode": { + "type": "string", + "description": "Optional. Current zone distribution mode. Defaults to MULTI_ZONE. \n Possible values:\n MULTI_ZONE\nSINGLE_ZONE Possible values: [\"MULTI_ZONE\", \"SINGLE_ZONE\"]", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "zone": { + "type": "string", + "description": "Optional. Defines zone where all resources will be allocated with SINGLE_ZONE mode.\nIgnored for MULTI_ZONE mode.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Zone distribution configuration for allocation of instance resources.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description_kind": "plain" + } + }, "google_migration_center_group": { "version": 0, "block": { @@ -118586,6 +126875,15 @@ "description": "If an alert policy that was active has no data for this long, any open incidents will close.", "description_kind": "plain", "optional": true + }, + "notification_prompts": { + "type": [ + "list", + "string" + ], + "description": "Control when notifications will be sent out. Possible values: [\"NOTIFICATION_PROMPT_UNSPECIFIED\", \"OPENED\", \"CLOSED\"]", + "description_kind": "plain", + "optional": true } }, "block_types": { @@ -118822,6 +127120,12 @@ "description_kind": "plain", "optional": true }, + "disable_metric_validation": { + "type": "bool", + "description": "Whether to disable metric existence validation for this condition.\n\nThis allows alerting policies to be defined on metrics that do not yet\nexist, improving advanced customer workflows such as configuring\nalerting policies using Terraform.\n\nUsers with the 'monitoring.alertPolicyViewer' role are able to see the\nname of the non-existent metric in the alerting policy condition.", + "description_kind": "plain", + "optional": true + }, "duration": { "type": "string", "description": "Alerts are considered firing once their PromQL expression evaluated\nto be \"true\" for this long. Alerts whose PromQL expression was not\nevaluated to be \"true\" for long enough are considered pending. The\ndefault value is zero. Must be zero or positive.", @@ -120771,7 +129075,7 @@ }, "kdc_hostname": { "type": "string", - "description": "Hostname of the Active Directory server used as Kerberos Key Distribution Center. Only requried for volumes using kerberized NFSv4.1", + "description": "Hostname of the Active Directory server used as Kerberos Key Distribution Center. Only required for volumes using kerberized NFSv4.1", "description_kind": "plain", "optional": true }, @@ -121401,6 +129705,12 @@ "description_kind": "plain", "optional": true }, + "allow_auto_tiering": { + "type": "bool", + "description": "Optional. True if the storage pool supports Auto Tiering enabled volumes. Default is false.\nAuto-tiering can be enabled after storage pool creation but it can't be disabled once enabled.", + "description_kind": "plain", + "optional": true + }, "capacity_gib": { "type": "string", "description": "Capacity of the storage pool (in GiB).", @@ -121479,6 +129789,12 @@ "optional": true, "computed": true }, + "replica_zone": { + "type": "string", + "description": "Specifies the replica zone for regional Flex pools. 'zone' and 'replica_zone' values can be swapped to initiate a\n[zone switch](https://cloud.google.com/netapp/volumes/docs/configure-and-use/storage-pools/edit-or-delete-storage-pool#switch_active_and_replica_zones).", + "description_kind": "plain", + "optional": true + }, "service_level": { "type": "string", "description": "Service level of the storage pool. Possible values: [\"PREMIUM\", \"EXTREME\", \"STANDARD\", \"FLEX\"]", @@ -121505,6 +129821,12 @@ "description": "Number of volume in the storage pool.", "description_kind": "plain", "computed": true + }, + "zone": { + "type": "string", + "description": "Specifies the active zone for regional Flex pools. 'zone' and 'replica_zone' values can be swapped to initiate a\n[zone switch](https://cloud.google.com/netapp/volumes/docs/configure-and-use/storage-pools/edit-or-delete-storage-pool#switch_active_and_replica_zones).\nIf you want to create a zonal Flex pool, specify a zone name for 'location' and omit 'zone'.", + "description_kind": "plain", + "optional": true } }, "block_types": { @@ -121551,6 +129873,12 @@ "description_kind": "plain", "required": true }, + "cold_tier_size_gib": { + "type": "string", + "description": "Output only. Size of the volume cold tier data in GiB.", + "description_kind": "plain", + "computed": true + }, "create_time": { "type": "string", "description": "Create time of the volume. A timestamp in RFC3339 UTC \"Zulu\" format. Examples: \"2023-06-22T09:13:01.617Z\".", @@ -121559,7 +129887,7 @@ }, "deletion_policy": { "type": "string", - "description": "Policy to determine if the volume should be deleted forcefully.\nVolumes may have nested snapshot resources. Deleting such a volume will fail.\nSetting this parameter to FORCE will delete volumes including nested snapshots.", + "description": "Policy to determine if the volume should be deleted forcefully.\nVolumes may have nested snapshot resources. Deleting such a volume will fail.\nSetting this parameter to FORCE will delete volumes including nested snapshots.\nPossible values: DEFAULT, FORCE.", "description_kind": "plain", "optional": true }, @@ -121617,6 +129945,12 @@ "description_kind": "plain", "optional": true }, + "large_capacity": { + "type": "bool", + "description": "Optional. Flag indicating if the volume will be a large capacity volume or a regular volume.", + "description_kind": "plain", + "optional": true + }, "ldap_enabled": { "type": "bool", "description": "Flag indicating if the volume is NFS LDAP enabled or not. Inherited from storage pool.", @@ -121646,6 +129980,12 @@ "description_kind": "plain", "computed": true }, + "multiple_endpoints": { + "type": "bool", + "description": "Optional. Flag indicating if the volume will have an IP address per node for volumes supporting multiple IP endpoints.\nOnly the volume with largeCapacity will be allowed to have multiple endpoints.", + "description_kind": "plain", + "optional": true + }, "name": { "type": "string", "description": "The name of the volume. Needs to be unique per location.", @@ -121679,6 +130019,12 @@ "description_kind": "plain", "computed": true }, + "replica_zone": { + "type": "string", + "description": "Specifies the replica zone for regional volume.", + "description_kind": "plain", + "computed": true + }, "restricted_actions": { "type": [ "list", @@ -121697,7 +130043,7 @@ }, "service_level": { "type": "string", - "description": "Service level of the volume. Inherited from storage pool. Supported values are : PREMIUM, EXTERME, STANDARD, FLEX.", + "description": "Service level of the volume. Inherited from storage pool. Supported values are : PREMIUM, EXTREME, STANDARD, FLEX.", "description_kind": "plain", "computed": true }, @@ -121737,7 +130083,7 @@ }, "storage_pool": { "type": "string", - "description": "Name of the storage pool to create the volume in. Pool needs enough spare capacity to accomodate the volume.", + "description": "Name of the storage pool to create the volume in. Pool needs enough spare capacity to accommodate the volume.", "description_kind": "plain", "required": true }, @@ -121762,6 +130108,12 @@ "description": "Used capacity of the volume (in GiB). This is computed periodically and it does not represent the realtime usage.", "description_kind": "plain", "computed": true + }, + "zone": { + "type": "string", + "description": "Specifies the active zone for regional volume.", + "description_kind": "plain", + "computed": true } }, "block_types": { @@ -121812,7 +130164,7 @@ }, "allowed_clients": { "type": "string", - "description": "Defines the client ingress specification (allowed clients) as a comma seperated list with IPv4 CIDRs or IPv4 host addresses.", + "description": "Defines the client ingress specification (allowed clients) as a comma separated list with IPv4 CIDRs or IPv4 host addresses.", "description_kind": "plain", "optional": true }, @@ -122040,6 +130392,28 @@ }, "max_items": 1 }, + "tiering_policy": { + "nesting_mode": "list", + "block": { + "attributes": { + "cooling_threshold_days": { + "type": "number", + "description": "Optional. Time in days to mark the volume's data block as cold and make it eligible for tiering, can be range from 7-183.\nDefault is 31.", + "description_kind": "plain", + "optional": true + }, + "tier_action": { + "type": "string", + "description": "Optional. Flag indicating if the volume has tiering policy enable/pause. Default is PAUSED. Default value: \"PAUSED\" Possible values: [\"ENABLED\", \"PAUSED\"]", + "description_kind": "plain", + "optional": true + } + }, + "description": "Tiering policy for the volume.", + "description_kind": "plain" + }, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -122400,6 +130774,145 @@ "description_kind": "plain" } }, + "google_network_connectivity_group": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "Output only. The time the hub was created.", + "description_kind": "plain", + "computed": true + }, + "description": { + "type": "string", + "description": "An optional description of the group.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "hub": { + "type": "string", + "description": "The name of the hub. Hub names must be unique. They use the following form: projects/{projectNumber}/locations/global/hubs/{hubId}", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements).\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "The name of the group. Group names must be unique. Possible values: [\"default\", \"center\", \"edge\"]", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "route_table": { + "type": "string", + "description": "Output only. The name of the route table that corresponds to this group. They use the following form: 'projects/{projectNumber}/locations/global/hubs/{hubId}/routeTables/{route_table_id}'", + "description_kind": "plain", + "computed": true + }, + "state": { + "type": "string", + "description": "Output only. The current lifecycle state of this hub.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "uid": { + "type": "string", + "description": "Output only. The Google-generated UUID for the group. This value is unique across all group resources. If a group is deleted and another with the same name is created, the new route table is assigned a different uniqueId.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. The time the hub was last updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "auto_accept": { + "nesting_mode": "list", + "block": { + "attributes": { + "auto_accept_projects": { + "type": [ + "list", + "string" + ], + "description": "A list of project ids or project numbers for which you want to enable auto-accept. The auto-accept setting is applied to spokes being created or updated in these projects.", + "description_kind": "plain", + "required": true + } + }, + "description": "Optional. The auto-accept setting for this group.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_network_connectivity_hub": { "version": 0, "block": { @@ -122454,6 +130967,13 @@ "optional": true, "computed": true }, + "preset_topology": { + "type": "string", + "description": "Optional. The topology implemented in this hub. Currently, this field is only used when policyMode = PRESET. The available preset topologies are MESH and STAR. If presetTopology is unspecified and policyMode = PRESET, the presetTopology defaults to MESH. When policyMode = CUSTOM, the presetTopology is set to PRESET_TOPOLOGY_UNSPECIFIED. Possible values: [\"MESH\", \"STAR\"]", + "description_kind": "plain", + "optional": true, + "computed": true + }, "project": { "type": "string", "description_kind": "plain", @@ -122630,7 +131150,7 @@ }, "usage": { "type": "string", - "description": "The type of usage set for this InternalRange. Possible values: [\"FOR_VPC\", \"EXTERNAL_TO_VPC\"]", + "description": "The type of usage set for this InternalRange. Possible values: [\"FOR_VPC\", \"EXTERNAL_TO_VPC\", \"FOR_MIGRATION\"]", "description_kind": "plain", "required": true }, @@ -122645,6 +131165,28 @@ } }, "block_types": { + "migration": { + "nesting_mode": "list", + "block": { + "attributes": { + "source": { + "type": "string", + "description": "Resource path as an URI of the source resource, for example a subnet.\nThe project for the source resource should match the project for the\nInternalRange.\nAn example /projects/{project}/regions/{region}/subnetworks/{subnet}", + "description_kind": "plain", + "required": true + }, + "target": { + "type": "string", + "description": "Resource path of the target resource. The target project can be\ndifferent, as in the cases when migrating to peer networks. The resource\nmay not exist yet.\nFor example /projects/{project}/regions/{region}/subnetworks/{subnet}", + "description_kind": "plain", + "required": true + } + }, + "description": "Specification for migration with source and target resource names.", + "description_kind": "plain" + }, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -123255,6 +131797,13 @@ "description_kind": "plain", "computed": true }, + "group": { + "type": "string", + "description": "The name of the group that this spoke is associated with.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "hub": { "type": "string", "description": "Immutable. The URI of the hub that this spoke is attached to.", @@ -123327,6 +131876,15 @@ "nesting_mode": "list", "block": { "attributes": { + "include_import_ranges": { + "type": [ + "list", + "string" + ], + "description": "IP ranges allowed to be included during import from hub (does not control transit connectivity).\nThe only allowed value for now is \"ALL_IPV4_RANGES\".", + "description_kind": "plain", + "optional": true + }, "site_to_site_data_transfer": { "type": "bool", "description": "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", @@ -123348,10 +131906,65 @@ }, "max_items": 1 }, + "linked_producer_vpc_network": { + "nesting_mode": "list", + "block": { + "attributes": { + "exclude_export_ranges": { + "type": [ + "list", + "string" + ], + "description": "IP ranges encompassing the subnets to be excluded from peering.", + "description_kind": "plain", + "optional": true + }, + "include_export_ranges": { + "type": [ + "list", + "string" + ], + "description": "IP ranges allowed to be included from peering.", + "description_kind": "plain", + "optional": true + }, + "network": { + "type": "string", + "description": "The URI of the Service Consumer VPC that the Producer VPC is peered with.", + "description_kind": "plain", + "required": true + }, + "peering": { + "type": "string", + "description": "The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state.", + "description_kind": "plain", + "required": true + }, + "producer_network": { + "type": "string", + "description": "The URI of the Producer VPC.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Producer VPC network that is associated with the spoke.", + "description_kind": "plain" + }, + "max_items": 1 + }, "linked_router_appliance_instances": { "nesting_mode": "list", "block": { "attributes": { + "include_import_ranges": { + "type": [ + "list", + "string" + ], + "description": "IP ranges allowed to be included during import from hub (does not control transit connectivity).\nThe only allowed value for now is \"ALL_IPV4_RANGES\".", + "description_kind": "plain", + "optional": true + }, "site_to_site_data_transfer": { "type": "bool", "description": "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", @@ -123368,13 +131981,13 @@ "type": "string", "description": "The IP address on the VM to use for peering.", "description_kind": "plain", - "optional": true + "required": true }, "virtual_machine": { "type": "string", "description": "The URI of the virtual machine resource", "description_kind": "plain", - "optional": true + "required": true } }, "description": "The list of router appliance instances", @@ -123401,6 +132014,15 @@ "description_kind": "plain", "optional": true }, + "include_export_ranges": { + "type": [ + "list", + "string" + ], + "description": "IP ranges allowed to be included from peering.", + "description_kind": "plain", + "optional": true + }, "uri": { "type": "string", "description": "The URI of the VPC network resource.", @@ -123417,6 +132039,15 @@ "nesting_mode": "list", "block": { "attributes": { + "include_import_ranges": { + "type": [ + "list", + "string" + ], + "description": "IP ranges allowed to be included during import from hub (does not control transit connectivity).\nThe only allowed value for now is \"ALL_IPV4_RANGES\".", + "description_kind": "plain", + "optional": true + }, "site_to_site_data_transfer": { "type": "bool", "description": "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", @@ -123652,6 +132283,169 @@ "description_kind": "plain" } }, + "google_network_management_vpc_flow_logs_config": { + "version": 0, + "block": { + "attributes": { + "aggregation_interval": { + "type": "string", + "description": "Optional. The aggregation interval for the logs. Default value is\nINTERVAL_5_SEC. Possible values: AGGREGATION_INTERVAL_UNSPECIFIED INTERVAL_5_SEC INTERVAL_30_SEC INTERVAL_1_MIN INTERVAL_5_MIN INTERVAL_10_MIN INTERVAL_15_MIN\"", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "create_time": { + "type": "string", + "description": "Output only. The time the config was created.", + "description_kind": "plain", + "computed": true + }, + "description": { + "type": "string", + "description": "Optional. The user-supplied description of the VPC Flow Logs configuration. Maximum\nof 512 characters.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "filter_expr": { + "type": "string", + "description": "Optional. Export filter used to define which VPC Flow Logs should be logged.", + "description_kind": "plain", + "optional": true + }, + "flow_sampling": { + "type": "number", + "description": "Optional. The value of the field must be in (0, 1]. The sampling rate\nof VPC Flow Logs where 1.0 means all collected logs are reported. Setting the\nsampling rate to 0.0 is not allowed. If you want to disable VPC Flow Logs, use\nthe state field instead. Default value is 1.0.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "interconnect_attachment": { + "type": "string", + "description": "Traffic will be logged from the Interconnect Attachment. Format: projects/{project_id}/regions/{region}/interconnectAttachments/{name}", + "description_kind": "plain", + "optional": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Optional. Resource labels to represent user-provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. It identifies the resource\nwithin its parent collection as described in https://google.aip.dev/122. See documentation\nfor resource type 'networkmanagement.googleapis.com/VpcFlowLogsConfig'.", + "description_kind": "plain", + "required": true + }, + "metadata": { + "type": "string", + "description": "Optional. Configures whether all, none or a subset of metadata fields\nshould be added to the reported VPC flow logs. Default value is INCLUDE_ALL_METADATA.\n Possible values: METADATA_UNSPECIFIED INCLUDE_ALL_METADATA EXCLUDE_ALL_METADATA CUSTOM_METADATA", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "metadata_fields": { + "type": [ + "list", + "string" + ], + "description": "Optional. Custom metadata fields to include in the reported VPC flow\nlogs. Can only be specified if \\\"metadata\\\" was set to CUSTOM_METADATA.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "Identifier. Unique name of the configuration using the form: 'projects/{project_id}/locations/global/vpcFlowLogsConfigs/{vpc_flow_logs_config_id}'", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "state": { + "type": "string", + "description": "Optional. The state of the VPC Flow Log configuration. Default value\nis ENABLED. When creating a new configuration, it must be enabled. Possible", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. The time the config was updated.", + "description_kind": "plain", + "computed": true + }, + "vpc_flow_logs_config_id": { + "type": "string", + "description": "Required. ID of the 'VpcFlowLogsConfig'.", + "description_kind": "plain", + "required": true + }, + "vpn_tunnel": { + "type": "string", + "description": "Traffic will be logged from the VPN Tunnel. Format: projects/{project_id}/regions/{region}/vpnTunnels/{name}", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_network_security_address_group": { "version": 0, "block": { @@ -123963,6 +132757,810 @@ "description_kind": "plain" } }, + "google_network_security_authz_policy": { + "version": 0, + "block": { + "attributes": { + "action": { + "type": "string", + "description": "When the action is CUSTOM, customProvider must be specified.\nWhen the action is ALLOW, only requests matching the policy will be allowed.\nWhen the action is DENY, only requests matching the policy will be denied.\n\nWhen a request arrives, the policies are evaluated in the following order:\n1. If there is a CUSTOM policy that matches the request, the CUSTOM policy is evaluated using the custom authorization providers and the request is denied if the provider rejects the request.\n2. If there are any DENY policies that match the request, the request is denied.\n3. If there are no ALLOW policies for the resource or if any of the ALLOW policies match the request, the request is allowed.\n4. Else the request is denied by default if none of the configured AuthzPolicies with ALLOW action match the request. Possible values: [\"ALLOW\", \"DENY\", \"CUSTOM\"]", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The timestamp when the resource was created.", + "description_kind": "plain", + "computed": true + }, + "description": { + "type": "string", + "description": "A human-readable description of the resource.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Set of labels associated with the AuthzExtension resource.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "The location of the resource.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. Name of the AuthzPolicy resource.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The timestamp when the resource was updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "custom_provider": { + "nesting_mode": "list", + "block": { + "block_types": { + "authz_extension": { + "nesting_mode": "list", + "block": { + "attributes": { + "resources": { + "type": [ + "list", + "string" + ], + "description": "A list of references to authorization extensions that will be invoked for requests matching this policy. Limited to 1 custom provider.", + "description_kind": "plain", + "required": true + } + }, + "description": "Delegate authorization decision to user authored Service Extension. Only one of cloudIap or authzExtension can be specified.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "cloud_iap": { + "nesting_mode": "list", + "block": { + "attributes": { + "enabled": { + "type": "bool", + "description": "Enable Cloud IAP at the AuthzPolicy level.", + "description_kind": "plain", + "required": true + } + }, + "description": "Delegates authorization decisions to Cloud IAP. Applicable only for managed load balancers. Enabling Cloud IAP at the AuthzPolicy level is not compatible with Cloud IAP settings in the BackendService. Enabling IAP in both places will result in request failure. Ensure that IAP is enabled in either the AuthzPolicy or the BackendService but not in both places.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Required if the action is CUSTOM. Allows delegating authorization decisions to Cloud IAP or to Service Extensions. One of cloudIap or authzExtension must be specified.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "http_rules": { + "nesting_mode": "list", + "block": { + "attributes": { + "when": { + "type": "string", + "description": "CEL expression that describes the conditions to be satisfied for the action. The result of the CEL expression is ANDed with the from and to. Refer to the CEL language reference for a list of available attributes.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "from": { + "nesting_mode": "list", + "block": { + "block_types": { + "not_sources": { + "nesting_mode": "list", + "block": { + "block_types": { + "principals": { + "nesting_mode": "list", + "block": { + "attributes": { + "contains": { + "type": "string", + "description": "The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc.def", + "description_kind": "plain", + "optional": true + }, + "exact": { + "type": "string", + "description": "The input string must match exactly the string specified here.\nExamples:\n* abc only matches the value abc.", + "description_kind": "plain", + "optional": true + }, + "ignore_case": { + "type": "bool", + "description": "If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true.", + "description_kind": "plain", + "optional": true + }, + "prefix": { + "type": "string", + "description": "The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value abc.xyz", + "description_kind": "plain", + "optional": true + }, + "suffix": { + "type": "string", + "description": "The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc", + "description_kind": "plain", + "optional": true + } + }, + "description": "A list of identities derived from the client's certificate. This field will not match on a request unless mutual TLS is enabled for the Forwarding rule or Gateway. Each identity is a string whose value is matched against the URI SAN, or DNS SAN or the subject field in the client's certificate. The match can be exact, prefix, suffix or a substring match. One of exact, prefix, suffix or contains must be specified.\nLimited to 5 principals.", + "description_kind": "plain" + } + }, + "resources": { + "nesting_mode": "list", + "block": { + "block_types": { + "iam_service_account": { + "nesting_mode": "list", + "block": { + "attributes": { + "contains": { + "type": "string", + "description": "The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc.def", + "description_kind": "plain", + "optional": true + }, + "exact": { + "type": "string", + "description": "The input string must match exactly the string specified here.\nExamples:\n* abc only matches the value abc.", + "description_kind": "plain", + "optional": true + }, + "ignore_case": { + "type": "bool", + "description": "If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true.", + "description_kind": "plain", + "optional": true + }, + "prefix": { + "type": "string", + "description": "The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value abc.xyz", + "description_kind": "plain", + "optional": true + }, + "suffix": { + "type": "string", + "description": "The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc", + "description_kind": "plain", + "optional": true + } + }, + "description": "An IAM service account to match against the source service account of the VM sending the request.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "tag_value_id_set": { + "nesting_mode": "list", + "block": { + "attributes": { + "ids": { + "type": [ + "list", + "string" + ], + "description": "A list of resource tag value permanent IDs to match against the resource manager tags value associated with the source VM of a request. The match follows AND semantics which means all the ids must match.\nLimited to 5 matches.", + "description_kind": "plain", + "optional": true + } + }, + "description": "A list of resource tag value permanent IDs to match against the resource manager tags value associated with the source VM of a request.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "A list of resources to match against the resource of the source VM of a request.\nLimited to 5 resources.", + "description_kind": "plain" + } + } + }, + "description": "Describes the properties of a request's sources. At least one of sources or notSources must be specified. Limited to 5 sources. A match occurs when ANY source (in sources or notSources) matches the request. Within a single source, the match follows AND semantics across fields and OR semantics within a single field, i.e. a match occurs when ANY principal matches AND ANY ipBlocks match.", + "description_kind": "plain" + } + }, + "sources": { + "nesting_mode": "list", + "block": { + "block_types": { + "principals": { + "nesting_mode": "list", + "block": { + "attributes": { + "contains": { + "type": "string", + "description": "The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc.def", + "description_kind": "plain", + "optional": true + }, + "exact": { + "type": "string", + "description": "The input string must match exactly the string specified here.\nExamples:\n* abc only matches the value abc.", + "description_kind": "plain", + "optional": true + }, + "ignore_case": { + "type": "bool", + "description": "If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true.", + "description_kind": "plain", + "optional": true + }, + "prefix": { + "type": "string", + "description": "The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value abc.xyz", + "description_kind": "plain", + "optional": true + }, + "suffix": { + "type": "string", + "description": "The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc", + "description_kind": "plain", + "optional": true + } + }, + "description": "A list of identities derived from the client's certificate. This field will not match on a request unless mutual TLS is enabled for the Forwarding rule or Gateway. Each identity is a string whose value is matched against the URI SAN, or DNS SAN or the subject field in the client's certificate. The match can be exact, prefix, suffix or a substring match. One of exact, prefix, suffix or contains must be specified.\nLimited to 5 principals.", + "description_kind": "plain" + } + }, + "resources": { + "nesting_mode": "list", + "block": { + "block_types": { + "iam_service_account": { + "nesting_mode": "list", + "block": { + "attributes": { + "contains": { + "type": "string", + "description": "The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc.def", + "description_kind": "plain", + "optional": true + }, + "exact": { + "type": "string", + "description": "The input string must match exactly the string specified here.\nExamples:\n* abc only matches the value abc.", + "description_kind": "plain", + "optional": true + }, + "ignore_case": { + "type": "bool", + "description": "If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true.", + "description_kind": "plain", + "optional": true + }, + "prefix": { + "type": "string", + "description": "The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value abc.xyz", + "description_kind": "plain", + "optional": true + }, + "suffix": { + "type": "string", + "description": "The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc", + "description_kind": "plain", + "optional": true + } + }, + "description": "An IAM service account to match against the source service account of the VM sending the request.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "tag_value_id_set": { + "nesting_mode": "list", + "block": { + "attributes": { + "ids": { + "type": [ + "list", + "string" + ], + "description": "A list of resource tag value permanent IDs to match against the resource manager tags value associated with the source VM of a request. The match follows AND semantics which means all the ids must match.\nLimited to 5 matches.", + "description_kind": "plain", + "optional": true + } + }, + "description": "A list of resource tag value permanent IDs to match against the resource manager tags value associated with the source VM of a request.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "A list of resources to match against the resource of the source VM of a request.\nLimited to 5 resources.", + "description_kind": "plain" + } + } + }, + "description": "Describes the properties of a request's sources. At least one of sources or notSources must be specified. Limited to 5 sources. A match occurs when ANY source (in sources or notSources) matches the request. Within a single source, the match follows AND semantics across fields and OR semantics within a single field, i.e. a match occurs when ANY principal matches AND ANY ipBlocks match.", + "description_kind": "plain" + } + } + }, + "description": "Describes properties of one or more sources of a request.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "to": { + "nesting_mode": "list", + "block": { + "block_types": { + "operations": { + "nesting_mode": "list", + "block": { + "attributes": { + "methods": { + "type": [ + "list", + "string" + ], + "description": "A list of HTTP methods to match against. Each entry must be a valid HTTP method name (GET, PUT, POST, HEAD, PATCH, DELETE, OPTIONS). It only allows exact match and is always case sensitive.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "header_set": { + "nesting_mode": "list", + "block": { + "block_types": { + "headers": { + "nesting_mode": "list", + "block": { + "attributes": { + "name": { + "type": "string", + "description": "Specifies the name of the header in the request.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "value": { + "nesting_mode": "list", + "block": { + "attributes": { + "contains": { + "type": "string", + "description": "The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc.def", + "description_kind": "plain", + "optional": true + }, + "exact": { + "type": "string", + "description": "The input string must match exactly the string specified here.\nExamples:\n* abc only matches the value abc.", + "description_kind": "plain", + "optional": true + }, + "ignore_case": { + "type": "bool", + "description": "If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true.", + "description_kind": "plain", + "optional": true + }, + "prefix": { + "type": "string", + "description": "The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value abc.xyz", + "description_kind": "plain", + "optional": true + }, + "suffix": { + "type": "string", + "description": "The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc", + "description_kind": "plain", + "optional": true + } + }, + "description": "Specifies how the header match will be performed.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "A list of headers to match against in http header. The match can be one of exact, prefix, suffix, or contains (substring match). The match follows AND semantics which means all the headers must match. Matches are always case sensitive unless the ignoreCase is set. Limited to 5 matches.", + "description_kind": "plain" + } + } + }, + "description": "A list of headers to match against in http header.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "hosts": { + "nesting_mode": "list", + "block": { + "attributes": { + "contains": { + "type": "string", + "description": "The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc.def", + "description_kind": "plain", + "optional": true + }, + "exact": { + "type": "string", + "description": "The input string must match exactly the string specified here.\nExamples:\n* abc only matches the value abc.", + "description_kind": "plain", + "optional": true + }, + "ignore_case": { + "type": "bool", + "description": "If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true.", + "description_kind": "plain", + "optional": true + }, + "prefix": { + "type": "string", + "description": "The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value abc.xyz", + "description_kind": "plain", + "optional": true + }, + "suffix": { + "type": "string", + "description": "The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc", + "description_kind": "plain", + "optional": true + } + }, + "description": "A list of HTTP Hosts to match against. The match can be one of exact, prefix, suffix, or contains (substring match). Matches are always case sensitive unless the ignoreCase is set.\nLimited to 5 matches.", + "description_kind": "plain" + } + }, + "paths": { + "nesting_mode": "list", + "block": { + "attributes": { + "contains": { + "type": "string", + "description": "The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc.def", + "description_kind": "plain", + "optional": true + }, + "exact": { + "type": "string", + "description": "The input string must match exactly the string specified here.\nExamples:\n* abc only matches the value abc.", + "description_kind": "plain", + "optional": true + }, + "ignore_case": { + "type": "bool", + "description": "If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true.", + "description_kind": "plain", + "optional": true + }, + "prefix": { + "type": "string", + "description": "The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value abc.xyz", + "description_kind": "plain", + "optional": true + }, + "suffix": { + "type": "string", + "description": "The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead.\nExamples:\n* abc matches the value xyz.abc", + "description_kind": "plain", + "optional": true + } + }, + "description": "A list of paths to match against. The match can be one of exact, prefix, suffix, or contains (substring match). Matches are always case sensitive unless the ignoreCase is set.\nLimited to 5 matches.\nNote that this path match includes the query parameters. For gRPC services, this should be a fully-qualified name of the form /package.service/method.", + "description_kind": "plain" + } + } + }, + "description": "Describes properties of one or more targets of a request. At least one of operations or notOperations must be specified. Limited to 5 operations. A match occurs when ANY operation (in operations or notOperations) matches. Within an operation, the match follows AND semantics across fields and OR semantics within a field, i.e. a match occurs when ANY path matches AND ANY header matches and ANY method matches.", + "description_kind": "plain" + } + } + }, + "description": "Describes properties of one or more targets of a request", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "A list of authorization HTTP rules to match against the incoming request.A policy match occurs when at least one HTTP rule matches the request or when no HTTP rules are specified in the policy. At least one HTTP Rule is required for Allow or Deny Action.\nLimited to 5 rules.", + "description_kind": "plain" + } + }, + "target": { + "nesting_mode": "list", + "block": { + "attributes": { + "load_balancing_scheme": { + "type": "string", + "description": "All gateways and forwarding rules referenced by this policy and extensions must share the same load balancing scheme.\nFor more information, refer to [Backend services overview](https://cloud.google.com/load-balancing/docs/backend-service). Possible values: [\"INTERNAL_MANAGED\", \"EXTERNAL_MANAGED\", \"INTERNAL_SELF_MANAGED\"]", + "description_kind": "plain", + "required": true + }, + "resources": { + "type": [ + "list", + "string" + ], + "description": "A list of references to the Forwarding Rules on which this policy will be applied.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Specifies the set of resources to which this policy should be applied to.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_network_security_client_tls_policy": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "Time the ClientTlsPolicy was created in UTC.", + "description_kind": "plain", + "computed": true + }, + "description": { + "type": "string", + "description": "A free-text description of the resource. Max length 1024 characters.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Set of label tags associated with the ClientTlsPolicy resource.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "The location of the client tls policy.\nThe default value is 'global'.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "Name of the ClientTlsPolicy resource.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "sni": { + "type": "string", + "description": "Server Name Indication string to present to the server during TLS handshake. E.g: \"secure.example.com\".", + "description_kind": "plain", + "optional": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Time the ClientTlsPolicy was updated in UTC.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "client_certificate": { + "nesting_mode": "list", + "block": { + "block_types": { + "certificate_provider_instance": { + "nesting_mode": "list", + "block": { + "attributes": { + "plugin_instance": { + "type": "string", + "description": "Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to \"google_cloud_private_spiffe\" to use Certificate Authority Service certificate provider instance.", + "description_kind": "plain", + "required": true + } + }, + "description": "The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "grpc_endpoint": { + "nesting_mode": "list", + "block": { + "attributes": { + "target_uri": { + "type": "string", + "description": "The target URI of the gRPC endpoint. Only UDS path is supported, and should start with \"unix:\".", + "description_kind": "plain", + "required": true + } + }, + "description": "gRPC specific configuration to access the gRPC server to obtain the cert and private key.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Defines a mechanism to provision client identity (public and private keys) for peer to peer authentication. The presence of this dictates mTLS.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "server_validation_ca": { + "nesting_mode": "list", + "block": { + "block_types": { + "certificate_provider_instance": { + "nesting_mode": "list", + "block": { + "attributes": { + "plugin_instance": { + "type": "string", + "description": "Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to \"google_cloud_private_spiffe\" to use Certificate Authority Service certificate provider instance.", + "description_kind": "plain", + "required": true + } + }, + "description": "The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "grpc_endpoint": { + "nesting_mode": "list", + "block": { + "attributes": { + "target_uri": { + "type": "string", + "description": "The target URI of the gRPC endpoint. Only UDS path is supported, and should start with \"unix:\".", + "description_kind": "plain", + "required": true + } + }, + "description": "gRPC specific configuration to access the gRPC server to obtain the cert and private key.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Defines the mechanism to obtain the Certificate Authority certificate to validate the server certificate. If empty, client does not validate the server certificate.", + "description_kind": "plain" + } + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_network_security_firewall_endpoint": { "version": 0, "block": { @@ -124276,6 +133874,12 @@ "description_kind": "plain", "computed": true }, + "tls_inspection_policy": { + "type": "string", + "description": "Name of a TlsInspectionPolicy resource that defines how TLS inspection is performed for any rule that enables it.\nNote: google_network_security_tls_inspection_policy resource is still in [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) therefore it will need to import the provider.", + "description_kind": "plain", + "optional": true + }, "update_time": { "type": "string", "description": "The timestamp when the resource was updated.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", @@ -124726,6 +134330,221 @@ "description_kind": "plain" } }, + "google_network_security_server_tls_policy": { + "version": 0, + "block": { + "attributes": { + "allow_open": { + "type": "bool", + "description": "This field applies only for Traffic Director policies. It is must be set to false for external HTTPS load balancer policies.\nDetermines if server allows plaintext connections. If set to true, server allows plain text connections. By default, it is set to false. This setting is not exclusive of other encryption modes. For example, if allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections. See documentation of other encryption modes to confirm compatibility.\nConsider using it if you wish to upgrade in place your deployment to TLS while having mixed TLS and non-TLS traffic reaching port :80.", + "description_kind": "plain", + "optional": true + }, + "create_time": { + "type": "string", + "description": "Time the ServerTlsPolicy was created in UTC.", + "description_kind": "plain", + "computed": true + }, + "description": { + "type": "string", + "description": "A free-text description of the resource. Max length 1024 characters.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Set of label tags associated with the ServerTlsPolicy resource.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "The location of the server tls policy.\nThe default value is 'global'.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "Name of the ServerTlsPolicy resource.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Time the ServerTlsPolicy was updated in UTC.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "mtls_policy": { + "nesting_mode": "list", + "block": { + "attributes": { + "client_validation_mode": { + "type": "string", + "description": "When the client presents an invalid certificate or no certificate to the load balancer, the clientValidationMode specifies how the client connection is handled.\nRequired if the policy is to be used with the external HTTPS load balancing. For Traffic Director it must be empty. Possible values: [\"CLIENT_VALIDATION_MODE_UNSPECIFIED\", \"ALLOW_INVALID_OR_MISSING_CLIENT_CERT\", \"REJECT_INVALID\"]", + "description_kind": "plain", + "optional": true + }, + "client_validation_trust_config": { + "type": "string", + "description": "Reference to the TrustConfig from certificatemanager.googleapis.com namespace.\nIf specified, the chain validation will be performed against certificates configured in the given TrustConfig.\nAllowed only if the policy is to be used with external HTTPS load balancers.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "client_validation_ca": { + "nesting_mode": "list", + "block": { + "block_types": { + "certificate_provider_instance": { + "nesting_mode": "list", + "block": { + "attributes": { + "plugin_instance": { + "type": "string", + "description": "Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to \"google_cloud_private_spiffe\" to use Certificate Authority Service certificate provider instance.", + "description_kind": "plain", + "required": true + } + }, + "description": "Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty.\nDefines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "grpc_endpoint": { + "nesting_mode": "list", + "block": { + "attributes": { + "target_uri": { + "type": "string", + "description": "The target URI of the gRPC endpoint. Only UDS path is supported, and should start with \"unix:\".", + "description_kind": "plain", + "required": true + } + }, + "description": "gRPC specific configuration to access the gRPC server to obtain the cert and private key.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Required if the policy is to be used with Traffic Director. For external HTTPS load balancers it must be empty.\nDefines the mechanism to obtain the Certificate Authority certificate to validate the client certificate.", + "description_kind": "plain" + } + } + }, + "description": "This field is required if the policy is used with external HTTPS load balancers. This field can be empty for Traffic Director.\nDefines a mechanism to provision peer validation certificates for peer to peer authentication (Mutual TLS - mTLS). If not specified, client certificate will not be requested. The connection is treated as TLS and not mTLS. If allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "server_certificate": { + "nesting_mode": "list", + "block": { + "block_types": { + "certificate_provider_instance": { + "nesting_mode": "list", + "block": { + "attributes": { + "plugin_instance": { + "type": "string", + "description": "Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to \"google_cloud_private_spiffe\" to use Certificate Authority Service certificate provider instance.", + "description_kind": "plain", + "required": true + } + }, + "description": "Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty.\nDefines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "grpc_endpoint": { + "nesting_mode": "list", + "block": { + "attributes": { + "target_uri": { + "type": "string", + "description": "The target URI of the gRPC endpoint. Only UDS path is supported, and should start with \"unix:\".", + "description_kind": "plain", + "required": true + } + }, + "description": "gRPC specific configuration to access the gRPC server to obtain the cert and private key.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Defines a mechanism to provision client identity (public and private keys) for peer to peer authentication. The presence of this dictates mTLS.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_network_security_tls_inspection_policy": { "version": 0, "block": { @@ -124924,10 +134743,22 @@ "description_kind": "plain" } }, - "google_network_services_edge_cache_keyset": { + "google_network_services_authz_extension": { "version": 0, "block": { "attributes": { + "authority": { + "type": "string", + "description": "The :authority header in the gRPC request sent from Envoy to the extension service.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The timestamp when the resource was created.", + "description_kind": "plain", + "computed": true + }, "description": { "type": "string", "description": "A human-readable description of the resource.", @@ -124943,6 +134774,22 @@ "description_kind": "plain", "computed": true }, + "fail_open": { + "type": "bool", + "description": "Determines how the proxy behaves if the call to the extension fails or times out.\nWhen set to TRUE, request or response processing continues without error. Any subsequent extensions in the extension chain are also executed. When set to FALSE or the default setting of FALSE is used, one of the following happens:\n* If response headers have not been delivered to the downstream client, a generic 500 error is returned to the client. The error response can be tailored by configuring a custom error response in the load balancer.\n* If response headers have been delivered, then the HTTP stream to the downstream client is reset.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "forward_headers": { + "type": [ + "list", + "string" + ], + "description": "List of the HTTP headers to forward to the extension (from the client). If omitted, all headers are sent. Each element is a string indicating the header name.", + "description_kind": "plain", + "optional": true + }, "id": { "type": "string", "description_kind": "plain", @@ -124954,11 +134801,140 @@ "map", "string" ], - "description": "Set of label tags associated with the EdgeCache resource.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description": "Set of labels associated with the AuthzExtension resource.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "optional": true }, - "name": { + "load_balancing_scheme": { + "type": "string", + "description": "All backend services and forwarding rules referenced by this extension must share the same load balancing scheme.\nFor more information, refer to [Backend services overview](https://cloud.google.com/load-balancing/docs/backend-service). Possible values: [\"INTERNAL_MANAGED\", \"EXTERNAL_MANAGED\"]", + "description_kind": "plain", + "required": true + }, + "location": { + "type": "string", + "description": "The location of the resource.", + "description_kind": "plain", + "required": true + }, + "metadata": { + "type": [ + "map", + "string" + ], + "description": "The metadata provided here is included as part of the metadata_context (of type google.protobuf.Struct) in the ProcessingRequest message sent to the extension server. The metadata is available under the namespace com.google.authz_extension.. The following variables are supported in the metadata Struct:\n\n{forwarding_rule_id} - substituted with the forwarding rule's fully qualified resource name.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "Identifier. Name of the AuthzExtension resource.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "service": { + "type": "string", + "description": "The reference to the service that runs the extension.\nTo configure a callout extension, service must be a fully-qualified reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) in the format:\nhttps://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService} or https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}.", + "description_kind": "plain", + "required": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "timeout": { + "type": "string", + "description": "Specifies the timeout for each individual message on the stream. The timeout must be between 10-10000 milliseconds.", + "description_kind": "plain", + "required": true + }, + "update_time": { + "type": "string", + "description": "The timestamp when the resource was updated.", + "description_kind": "plain", + "computed": true + }, + "wire_format": { + "type": "string", + "description": "The format of communication supported by the callout extension. Default value: \"EXT_PROC_GRPC\" Possible values: [\"WIRE_FORMAT_UNSPECIFIED\", \"EXT_PROC_GRPC\"]", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_network_services_edge_cache_keyset": { + "version": 0, + "block": { + "attributes": { + "description": { + "type": "string", + "description": "A human-readable description of the resource.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Set of label tags associated with the EdgeCache resource.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "name": { "type": "string", "description": "Name of the resource; provided by the client when the resource is created.\nThe name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter,\nand all following characters must be a dash, underscore, letter or digit.", "description_kind": "plain", @@ -125497,7 +135473,7 @@ "description_kind": "plain" }, "min_items": 1, - "max_items": 10 + "max_items": 50 }, "path_matcher": { "nesting_mode": "list", @@ -126128,7 +136104,7 @@ "description_kind": "plain" }, "min_items": 1, - "max_items": 10 + "max_items": 50 } }, "description": "Defines how requests are routed, modified, cached and/or which origin content is filled from.", @@ -126268,6 +136244,12 @@ "optional": true, "computed": true }, + "routing_mode": { + "type": "string", + "description": "The routing mode of the Gateway. This field is configurable only for gateways of type SECURE_WEB_GATEWAY. This field is required for gateways of type SECURE_WEB_GATEWAY. Possible values: [\"NEXT_HOP_ROUTING_MODE\"]", + "description_kind": "plain", + "optional": true + }, "scope": { "type": "string", "description": "Immutable. Scope determines how configuration across multiple Gateway instances are merged.\nThe configuration for multiple Gateway instances with the same scope will be merged as presented as\na single coniguration to the proxy/load balancer.\nMax length 64 characters. Scope should start with a letter and can only have letters, numbers, hyphens.", @@ -128245,33 +138227,67 @@ "description_kind": "plain" } }, - "google_org_policy_custom_constraint": { + "google_oracle_database_autonomous_database": { "version": 0, "block": { "attributes": { - "action_type": { + "admin_password": { "type": "string", - "description": "The action to take if the condition is met. Possible values: [\"ALLOW\", \"DENY\"]", + "description": "The password for the default ADMIN user.", + "description_kind": "plain", + "optional": true + }, + "autonomous_database_id": { + "type": "string", + "description": "The ID of the Autonomous Database to create. This value is restricted\nto (^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$) and must be a maximum of 63\ncharacters in length. The value must start with a letter and end with\na letter or a number.", "description_kind": "plain", "required": true }, - "condition": { + "cidr": { "type": "string", - "description": "A CEL condition that refers to a supported service resource, for example 'resource.management.autoUpgrade == false'. For details about CEL usage, see [Common Expression Language](https://cloud.google.com/resource-manager/docs/organization-policy/creating-managing-custom-constraints#common_expression_language).", + "description": "The subnet CIDR range for the Autonmous Database.", "description_kind": "plain", "required": true }, - "description": { + "create_time": { "type": "string", - "description": "A human-friendly description of the constraint to display as an error message when the policy is violated.", + "description": "The date and time that the Autonomous Database was created.", + "description_kind": "plain", + "computed": true + }, + "database": { + "type": "string", + "description": "The name of the Autonomous Database. The database name must be unique in\nthe project. The name must begin with a letter and can\ncontain a maximum of 30 alphanumeric characters.", + "description_kind": "plain", + "required": true + }, + "deletion_protection": { + "type": "bool", + "description": "Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail.", "description_kind": "plain", "optional": true }, "display_name": { "type": "string", - "description": "A human-friendly name for the constraint.", + "description": "The display name for the Autonomous Database. The name does not have to\nbe unique within your project.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "entitlement_id": { + "type": "string", + "description": "The ID of the subscription entitlement associated with the Autonomous\nDatabase.", + "description_kind": "plain", + "computed": true }, "id": { "type": "string", @@ -128279,39 +138295,1392 @@ "optional": true, "computed": true }, - "method_types": { + "labels": { "type": [ - "list", + "map", "string" ], - "description": "A list of RESTful methods for which to enforce the constraint. Can be 'CREATE', 'UPDATE', or both. Not all Google Cloud services support both methods. To see supported methods for each service, find the service in [Supported services](https://cloud.google.com/resource-manager/docs/organization-policy/custom-constraint-supported-services).", + "description": "The labels or tags associated with the Autonomous Database. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. See documentation for resource type 'oracledatabase.googleapis.com/AutonomousDatabaseBackup'.", "description_kind": "plain", "required": true }, "name": { "type": "string", - "description": "Immutable. The name of the custom constraint. This is unique within the organization.", + "description": "Identifier. The name of the Autonomous Database resource in the following format:\nprojects/{project}/locations/{region}/autonomousDatabases/{autonomous_database}", "description_kind": "plain", - "required": true + "computed": true }, - "parent": { + "network": { "type": "string", - "description": "The parent of the resource, an organization. Format should be 'organizations/{organization_id}'.", + "description": "The name of the VPC network used by the Autonomous Database.\nFormat: projects/{project}/global/networks/{network}", "description_kind": "plain", "required": true }, - "resource_types": { + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "terraform_labels": { "type": [ - "list", + "map", "string" ], - "description": "Immutable. The fully qualified name of the Google Cloud REST resource containing the object and field you want to restrict. For example, 'container.googleapis.com/NodePool'.", - "description_kind": "plain", - "required": true - }, - "update_time": { - "type": "string", - "description": "Output only. The timestamp representing when the constraint was last updated.", + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "properties": { + "nesting_mode": "list", + "block": { + "attributes": { + "actual_used_data_storage_size_tb": { + "type": "number", + "description": "The amount of storage currently being used for user and system data, in\nterabytes.", + "description_kind": "plain", + "computed": true + }, + "allocated_storage_size_tb": { + "type": "number", + "description": "The amount of storage currently allocated for the database tables and\nbilled for, rounded up in terabytes.", + "description_kind": "plain", + "computed": true + }, + "apex_details": { + "type": [ + "list", + [ + "object", + { + "apex_version": "string", + "ords_version": "string" + } + ] + ], + "description": "Oracle APEX Application Development.\nhttps://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseApex", + "description_kind": "plain", + "computed": true + }, + "are_primary_allowlisted_ips_used": { + "type": "bool", + "description": "This field indicates the status of Data Guard and Access control for the\nAutonomous Database. The field's value is null if Data Guard is disabled\nor Access Control is disabled. The field's value is TRUE if both Data Guard\nand Access Control are enabled, and the Autonomous Database is using\nprimary IP access control list (ACL) for standby. The field's value is\nFALSE if both Data Guard and Access Control are enabled, and the Autonomous\nDatabase is using a different IP access control list (ACL) for standby\ncompared to primary.", + "description_kind": "plain", + "computed": true + }, + "autonomous_container_database_id": { + "type": "string", + "description": "The Autonomous Container Database OCID.", + "description_kind": "plain", + "computed": true + }, + "available_upgrade_versions": { + "type": [ + "list", + "string" + ], + "description": "The list of available Oracle Database upgrade versions for an Autonomous\nDatabase.", + "description_kind": "plain", + "computed": true + }, + "backup_retention_period_days": { + "type": "number", + "description": "The retention period for the Autonomous Database. This field is specified\nin days, can range from 1 day to 60 days, and has a default value of\n60 days.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "character_set": { + "type": "string", + "description": "The character set for the Autonomous Database. The default is AL32UTF8.", + "description_kind": "plain", + "optional": true + }, + "compute_count": { + "type": "number", + "description": "The number of compute servers for the Autonomous Database.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "connection_strings": { + "type": [ + "list", + [ + "object", + { + "all_connection_strings": [ + "list", + [ + "object", + { + "high": "string", + "low": "string", + "medium": "string" + } + ] + ], + "dedicated": "string", + "high": "string", + "low": "string", + "medium": "string", + "profiles": [ + "list", + [ + "object", + { + "consumer_group": "string", + "display_name": "string", + "host_format": "string", + "is_regional": "bool", + "protocol": "string", + "session_mode": "string", + "syntax_format": "string", + "tls_authentication": "string", + "value": "string" + } + ] + ] + } + ] + ], + "description": "The connection string used to connect to the Autonomous Database.\nhttps://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionStrings", + "description_kind": "plain", + "computed": true + }, + "connection_urls": { + "type": [ + "list", + [ + "object", + { + "apex_uri": "string", + "database_transforms_uri": "string", + "graph_studio_uri": "string", + "machine_learning_notebook_uri": "string", + "machine_learning_user_management_uri": "string", + "mongo_db_uri": "string", + "ords_uri": "string", + "sql_dev_web_uri": "string" + } + ] + ], + "description": "The URLs for accessing Oracle Application Express (APEX) and SQL Developer\nWeb with a browser from a Compute instance.\nhttps://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionUrls", + "description_kind": "plain", + "computed": true + }, + "data_safe_state": { + "type": "string", + "description": "The current state of the Data Safe registration for the\nAutonomous Database. \n Possible values:\n DATA_SAFE_STATE_UNSPECIFIED\nREGISTERING\nREGISTERED\nDEREGISTERING\nNOT_REGISTERED\nFAILED", + "description_kind": "plain", + "computed": true + }, + "data_storage_size_gb": { + "type": "number", + "description": "The size of the data stored in the database, in gigabytes.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "data_storage_size_tb": { + "type": "number", + "description": "The size of the data stored in the database, in terabytes.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "database_management_state": { + "type": "string", + "description": "The current state of database management for the Autonomous Database. \n Possible values:\n DATABASE_MANAGEMENT_STATE_UNSPECIFIED\nENABLING\nENABLED\nDISABLING\nNOT_ENABLED\nFAILED_ENABLING\nFAILED_DISABLING", + "description_kind": "plain", + "computed": true + }, + "db_edition": { + "type": "string", + "description": "The edition of the Autonomous Databases. \n Possible values:\n DATABASE_EDITION_UNSPECIFIED\nSTANDARD_EDITION\nENTERPRISE_EDITION", + "description_kind": "plain", + "optional": true + }, + "db_version": { + "type": "string", + "description": "The Oracle Database version for the Autonomous Database.", + "description_kind": "plain", + "optional": true + }, + "db_workload": { + "type": "string", + "description": "Possible values:\n DB_WORKLOAD_UNSPECIFIED\nOLTP\nDW\nAJD\nAPEX", + "description_kind": "plain", + "required": true + }, + "failed_data_recovery_duration": { + "type": "string", + "description": "This field indicates the number of seconds of data loss during a Data\nGuard failover.", + "description_kind": "plain", + "computed": true + }, + "is_auto_scaling_enabled": { + "type": "bool", + "description": "This field indicates if auto scaling is enabled for the Autonomous Database\nCPU core count.", + "description_kind": "plain", + "optional": true + }, + "is_local_data_guard_enabled": { + "type": "bool", + "description": "This field indicates whether the Autonomous Database has local (in-region)\nData Guard enabled.", + "description_kind": "plain", + "computed": true + }, + "is_storage_auto_scaling_enabled": { + "type": "bool", + "description": "This field indicates if auto scaling is enabled for the Autonomous Database\nstorage.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "license_type": { + "type": "string", + "description": "The license type used for the Autonomous Database. \n Possible values:\n LICENSE_TYPE_UNSPECIFIED\nLICENSE_INCLUDED\nBRING_YOUR_OWN_LICENSE", + "description_kind": "plain", + "required": true + }, + "lifecycle_details": { + "type": "string", + "description": "The details of the current lifestyle state of the Autonomous Database.", + "description_kind": "plain", + "computed": true + }, + "local_adg_auto_failover_max_data_loss_limit": { + "type": "number", + "description": "This field indicates the maximum data loss limit for an Autonomous\nDatabase, in seconds.", + "description_kind": "plain", + "computed": true + }, + "local_disaster_recovery_type": { + "type": "string", + "description": "This field indicates the local disaster recovery (DR) type of an\nAutonomous Database. \n Possible values:\n LOCAL_DISASTER_RECOVERY_TYPE_UNSPECIFIED\nADG\nBACKUP_BASED", + "description_kind": "plain", + "computed": true + }, + "local_standby_db": { + "type": [ + "list", + [ + "object", + { + "data_guard_role_changed_time": "string", + "disaster_recovery_role_changed_time": "string", + "lag_time_duration": "string", + "lifecycle_details": "string", + "state": "string" + } + ] + ], + "description": "Autonomous Data Guard standby database details.\nhttps://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseStandbySummary", + "description_kind": "plain", + "computed": true + }, + "maintenance_begin_time": { + "type": "string", + "description": "The date and time when maintenance will begin.", + "description_kind": "plain", + "computed": true + }, + "maintenance_end_time": { + "type": "string", + "description": "The date and time when maintenance will end.", + "description_kind": "plain", + "computed": true + }, + "maintenance_schedule_type": { + "type": "string", + "description": "The maintenance schedule of the Autonomous Database. \n Possible values:\n MAINTENANCE_SCHEDULE_TYPE_UNSPECIFIED\nEARLY\nREGULAR", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "memory_per_oracle_compute_unit_gbs": { + "type": "number", + "description": "The amount of memory enabled per ECPU, in gigabytes.", + "description_kind": "plain", + "computed": true + }, + "memory_table_gbs": { + "type": "number", + "description": "The memory assigned to in-memory tables in an Autonomous Database.", + "description_kind": "plain", + "computed": true + }, + "mtls_connection_required": { + "type": "bool", + "description": "This field specifies if the Autonomous Database requires mTLS connections.", + "description_kind": "plain", + "optional": true + }, + "n_character_set": { + "type": "string", + "description": "The national character set for the Autonomous Database. The default is\nAL16UTF16.", + "description_kind": "plain", + "optional": true + }, + "next_long_term_backup_time": { + "type": "string", + "description": "The long term backup schedule of the Autonomous Database.", + "description_kind": "plain", + "computed": true + }, + "oci_url": { + "type": "string", + "description": "The Oracle Cloud Infrastructure link for the Autonomous Database.", + "description_kind": "plain", + "computed": true + }, + "ocid": { + "type": "string", + "description": "OCID of the Autonomous Database.\nhttps://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle", + "description_kind": "plain", + "computed": true + }, + "open_mode": { + "type": "string", + "description": "This field indicates the current mode of the Autonomous Database. \n Possible values:\n OPEN_MODE_UNSPECIFIED\nREAD_ONLY\nREAD_WRITE", + "description_kind": "plain", + "computed": true + }, + "operations_insights_state": { + "type": "string", + "description": "Possible values:\n OPERATIONS_INSIGHTS_STATE_UNSPECIFIED\nENABLING\nENABLED\nDISABLING\nNOT_ENABLED\nFAILED_ENABLING\nFAILED_DISABLING", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "peer_db_ids": { + "type": [ + "list", + "string" + ], + "description": "The list of OCIDs of standby databases located in Autonomous Data Guard\nremote regions that are associated with the source database.", + "description_kind": "plain", + "computed": true + }, + "permission_level": { + "type": "string", + "description": "The permission level of the Autonomous Database. \n Possible values:\n PERMISSION_LEVEL_UNSPECIFIED\nRESTRICTED\nUNRESTRICTED", + "description_kind": "plain", + "computed": true + }, + "private_endpoint": { + "type": "string", + "description": "The private endpoint for the Autonomous Database.", + "description_kind": "plain", + "computed": true + }, + "private_endpoint_ip": { + "type": "string", + "description": "The private endpoint IP address for the Autonomous Database.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "private_endpoint_label": { + "type": "string", + "description": "The private endpoint label for the Autonomous Database.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "refreshable_mode": { + "type": "string", + "description": "The refresh mode of the cloned Autonomous Database. \n Possible values:\n REFRESHABLE_MODE_UNSPECIFIED\nAUTOMATIC\nMANUAL", + "description_kind": "plain", + "computed": true + }, + "refreshable_state": { + "type": "string", + "description": "The refresh State of the clone. \n Possible values:\n REFRESHABLE_STATE_UNSPECIFIED\nREFRESHING\nNOT_REFRESHING", + "description_kind": "plain", + "computed": true + }, + "role": { + "type": "string", + "description": "The Data Guard role of the Autonomous Database. \n Possible values:\n ROLE_UNSPECIFIED\nPRIMARY\nSTANDBY\nDISABLED_STANDBY\nBACKUP_COPY\nSNAPSHOT_STANDBY", + "description_kind": "plain", + "computed": true + }, + "scheduled_operation_details": { + "type": [ + "list", + [ + "object", + { + "day_of_week": "string", + "start_time": [ + "list", + [ + "object", + { + "hours": "number", + "minutes": "number", + "nanos": "number", + "seconds": "number" + } + ] + ], + "stop_time": [ + "list", + [ + "object", + { + "hours": "number", + "minutes": "number", + "nanos": "number", + "seconds": "number" + } + ] + ] + } + ] + ], + "description": "The list and details of the scheduled operations of the Autonomous\nDatabase.", + "description_kind": "plain", + "computed": true + }, + "sql_web_developer_url": { + "type": "string", + "description": "The SQL Web Developer URL for the Autonomous Database.", + "description_kind": "plain", + "computed": true + }, + "state": { + "type": "string", + "description": "Possible values:\n STATE_UNSPECIFIED\nPROVISIONING\nAVAILABLE\nSTOPPING\nSTOPPED\nSTARTING\nTERMINATING\nTERMINATED\nUNAVAILABLE\nRESTORE_IN_PROGRESS\nRESTORE_FAILED\nBACKUP_IN_PROGRESS\nSCALE_IN_PROGRESS\nAVAILABLE_NEEDS_ATTENTION\nUPDATING\nMAINTENANCE_IN_PROGRESS\nRESTARTING\nRECREATING\nROLE_CHANGE_IN_PROGRESS\nUPGRADING\nINACCESSIBLE\nSTANDBY", + "description_kind": "plain", + "computed": true + }, + "supported_clone_regions": { + "type": [ + "list", + "string" + ], + "description": "The list of available regions that can be used to create a clone for the\nAutonomous Database.", + "description_kind": "plain", + "computed": true + }, + "total_auto_backup_storage_size_gbs": { + "type": "number", + "description": "The storage space used by automatic backups of Autonomous Database, in\ngigabytes.", + "description_kind": "plain", + "computed": true + }, + "used_data_storage_size_tbs": { + "type": "number", + "description": "The storage space used by Autonomous Database, in gigabytes.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "customer_contacts": { + "nesting_mode": "list", + "block": { + "attributes": { + "email": { + "type": "string", + "description": "The email address used by Oracle to send notifications regarding databases\nand infrastructure.", + "description_kind": "plain", + "required": true + } + }, + "description": "The list of customer contacts.", + "description_kind": "plain" + } + } + }, + "description": "The properties of an Autonomous Database.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_oracle_database_cloud_exadata_infrastructure": { + "version": 0, + "block": { + "attributes": { + "cloud_exadata_infrastructure_id": { + "type": "string", + "description": "The ID of the Exadata Infrastructure to create. This value is restricted\nto (^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$) and must be a maximum of 63\ncharacters in length. The value must start with a letter and end with\na letter or a number.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The date and time that the Exadata Infrastructure was created.", + "description_kind": "plain", + "computed": true + }, + "deletion_protection": { + "type": "bool", + "description": "Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail.", + "description_kind": "plain", + "optional": true + }, + "display_name": { + "type": "string", + "description": "User friendly name for this resource.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "entitlement_id": { + "type": "string", + "description": "Entitlement ID of the private offer against which this infrastructure\nresource is provisioned.", + "description_kind": "plain", + "computed": true + }, + "gcp_oracle_zone": { + "type": "string", + "description": "GCP location where Oracle Exadata is hosted.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Labels or tags associated with the resource. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. See documentation for resource type 'oracledatabase.googleapis.com/DbServer'.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. The name of the Exadata Infrastructure resource with the following format:\nprojects/{project}/locations/{region}/cloudExadataInfrastructures/{cloud_exadata_infrastructure}", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "properties": { + "nesting_mode": "list", + "block": { + "attributes": { + "activated_storage_count": { + "type": "number", + "description": "The requested number of additional storage servers activated for the\nExadata Infrastructure.", + "description_kind": "plain", + "computed": true + }, + "additional_storage_count": { + "type": "number", + "description": "The requested number of additional storage servers for the Exadata\nInfrastructure.", + "description_kind": "plain", + "computed": true + }, + "available_storage_size_gb": { + "type": "number", + "description": "The available storage can be allocated to the Exadata Infrastructure\nresource, in gigabytes (GB).", + "description_kind": "plain", + "computed": true + }, + "compute_count": { + "type": "number", + "description": "The number of compute servers for the Exadata Infrastructure.", + "description_kind": "plain", + "optional": true + }, + "cpu_count": { + "type": "number", + "description": "The number of enabled CPU cores.", + "description_kind": "plain", + "computed": true + }, + "data_storage_size_tb": { + "type": "number", + "description": "Size, in terabytes, of the DATA disk group.", + "description_kind": "plain", + "computed": true + }, + "db_node_storage_size_gb": { + "type": "number", + "description": "The local node storage allocated in GBs.", + "description_kind": "plain", + "computed": true + }, + "db_server_version": { + "type": "string", + "description": "The software version of the database servers (dom0) in the Exadata\nInfrastructure.", + "description_kind": "plain", + "computed": true + }, + "max_cpu_count": { + "type": "number", + "description": "The total number of CPU cores available.", + "description_kind": "plain", + "computed": true + }, + "max_data_storage_tb": { + "type": "number", + "description": "The total available DATA disk group size.", + "description_kind": "plain", + "computed": true + }, + "max_db_node_storage_size_gb": { + "type": "number", + "description": "The total local node storage available in GBs.", + "description_kind": "plain", + "computed": true + }, + "max_memory_gb": { + "type": "number", + "description": "The total memory available in GBs.", + "description_kind": "plain", + "computed": true + }, + "memory_size_gb": { + "type": "number", + "description": "The memory allocated in GBs.", + "description_kind": "plain", + "computed": true + }, + "monthly_db_server_version": { + "type": "string", + "description": "The monthly software version of the database servers (dom0)\nin the Exadata Infrastructure. Example: 20.1.15", + "description_kind": "plain", + "computed": true + }, + "monthly_storage_server_version": { + "type": "string", + "description": "The monthly software version of the storage servers (cells)\nin the Exadata Infrastructure. Example: 20.1.15", + "description_kind": "plain", + "computed": true + }, + "next_maintenance_run_id": { + "type": "string", + "description": "The OCID of the next maintenance run.", + "description_kind": "plain", + "computed": true + }, + "next_maintenance_run_time": { + "type": "string", + "description": "The time when the next maintenance run will occur.", + "description_kind": "plain", + "computed": true + }, + "next_security_maintenance_run_time": { + "type": "string", + "description": "The time when the next security maintenance run will occur.", + "description_kind": "plain", + "computed": true + }, + "oci_url": { + "type": "string", + "description": "Deep link to the OCI console to view this resource.", + "description_kind": "plain", + "computed": true + }, + "ocid": { + "type": "string", + "description": "OCID of created infra.\nhttps://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle", + "description_kind": "plain", + "computed": true + }, + "shape": { + "type": "string", + "description": "The shape of the Exadata Infrastructure. The shape determines the\namount of CPU, storage, and memory resources allocated to the instance.", + "description_kind": "plain", + "required": true + }, + "state": { + "type": "string", + "description": "The current lifecycle state of the Exadata Infrastructure. \n Possible values:\n STATE_UNSPECIFIED\nPROVISIONING\nAVAILABLE\nUPDATING\nTERMINATING\nTERMINATED\nFAILED\nMAINTENANCE_IN_PROGRESS", + "description_kind": "plain", + "computed": true + }, + "storage_count": { + "type": "number", + "description": "The number of Cloud Exadata storage servers for the Exadata Infrastructure.", + "description_kind": "plain", + "optional": true + }, + "storage_server_version": { + "type": "string", + "description": "The software version of the storage servers (cells) in the Exadata\nInfrastructure.", + "description_kind": "plain", + "computed": true + }, + "total_storage_size_gb": { + "type": "number", + "description": "The total storage allocated to the Exadata Infrastructure\nresource, in gigabytes (GB).", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "customer_contacts": { + "nesting_mode": "list", + "block": { + "attributes": { + "email": { + "type": "string", + "description": "The email address used by Oracle to send notifications regarding databases\nand infrastructure.", + "description_kind": "plain", + "required": true + } + }, + "description": "The list of customer contacts.", + "description_kind": "plain" + } + }, + "maintenance_window": { + "nesting_mode": "list", + "block": { + "attributes": { + "custom_action_timeout_mins": { + "type": "number", + "description": "Determines the amount of time the system will wait before the start of each\ndatabase server patching operation. Custom action timeout is in minutes and\nvalid value is between 15 to 120 (inclusive).", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "days_of_week": { + "type": [ + "list", + "string" + ], + "description": "Days during the week when maintenance should be performed.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "hours_of_day": { + "type": [ + "list", + "number" + ], + "description": "The window of hours during the day when maintenance should be performed.\nThe window is a 4 hour slot. Valid values are:\n 0 - represents time slot 0:00 - 3:59 UTC\n 4 - represents time slot 4:00 - 7:59 UTC\n 8 - represents time slot 8:00 - 11:59 UTC\n 12 - represents time slot 12:00 - 15:59 UTC\n 16 - represents time slot 16:00 - 19:59 UTC\n 20 - represents time slot 20:00 - 23:59 UTC", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "is_custom_action_timeout_enabled": { + "type": "bool", + "description": "If true, enables the configuration of a custom action timeout (waiting\nperiod) between database server patching operations.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "lead_time_week": { + "type": "number", + "description": "Lead time window allows user to set a lead time to prepare for a down time.\nThe lead time is in weeks and valid value is between 1 to 4.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "months": { + "type": [ + "list", + "string" + ], + "description": "Months during the year when maintenance should be performed.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "patching_mode": { + "type": "string", + "description": "Cloud CloudExadataInfrastructure node patching method, either \"ROLLING\"\n or \"NONROLLING\". Default value is ROLLING. \n Possible values:\n PATCHING_MODE_UNSPECIFIED\nROLLING\nNON_ROLLING", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "preference": { + "type": "string", + "description": "The maintenance window scheduling preference. \n Possible values:\n MAINTENANCE_WINDOW_PREFERENCE_UNSPECIFIED\nCUSTOM_PREFERENCE\nNO_PREFERENCE", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "weeks_of_month": { + "type": [ + "list", + "number" + ], + "description": "Weeks during the month when maintenance should be performed. Weeks start on\nthe 1st, 8th, 15th, and 22nd days of the month, and have a duration of 7\ndays. Weeks start and end based on calendar dates, not days of the week.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Maintenance window as defined by Oracle.\nhttps://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/MaintenanceWindow", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Various properties of Exadata Infrastructure.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_oracle_database_cloud_vm_cluster": { + "version": 0, + "block": { + "attributes": { + "backup_subnet_cidr": { + "type": "string", + "description": "CIDR range of the backup subnet.", + "description_kind": "plain", + "required": true + }, + "cidr": { + "type": "string", + "description": "Network settings. CIDR to use for cluster IP allocation.", + "description_kind": "plain", + "required": true + }, + "cloud_vm_cluster_id": { + "type": "string", + "description": "The ID of the VM Cluster to create. This value is restricted\nto (^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$) and must be a maximum of 63\ncharacters in length. The value must start with a letter and end with\na letter or a number.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The date and time that the VM cluster was created.", + "description_kind": "plain", + "computed": true + }, + "deletion_protection": { + "type": "bool", + "description": "Whether Terraform will be prevented from destroying the cluster. Deleting this cluster via terraform destroy or terraform apply will only succeed if this field is false in the Terraform state.", + "description_kind": "plain", + "optional": true + }, + "display_name": { + "type": "string", + "description": "User friendly name for this resource.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "exadata_infrastructure": { + "type": "string", + "description": "The name of the Exadata Infrastructure resource on which VM cluster\nresource is created, in the following format:\nprojects/{project}/locations/{region}/cloudExadataInfrastuctures/{cloud_extradata_infrastructure}", + "description_kind": "plain", + "required": true + }, + "gcp_oracle_zone": { + "type": "string", + "description": "GCP location where Oracle Exadata is hosted. It is same as GCP Oracle zone\nof Exadata infrastructure.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Labels or tags associated with the VM Cluster. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. See documentation for resource type 'oracledatabase.googleapis.com/DbNode'.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. The name of the VM Cluster resource with the format:\nprojects/{project}/locations/{region}/cloudVmClusters/{cloud_vm_cluster}", + "description_kind": "plain", + "computed": true + }, + "network": { + "type": "string", + "description": "The name of the VPC network.\nFormat: projects/{project}/global/networks/{network}", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "properties": { + "nesting_mode": "list", + "block": { + "attributes": { + "cluster_name": { + "type": "string", + "description": "OCI Cluster name.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "compartment_id": { + "type": "string", + "description": "Compartment ID of cluster.", + "description_kind": "plain", + "computed": true + }, + "cpu_core_count": { + "type": "number", + "description": "Number of enabled CPU cores.", + "description_kind": "plain", + "required": true + }, + "data_storage_size_tb": { + "type": "number", + "description": "The data disk group size to be allocated in TBs.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "db_node_storage_size_gb": { + "type": "number", + "description": "Local storage per VM", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "db_server_ocids": { + "type": [ + "list", + "string" + ], + "description": "OCID of database servers.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "disk_redundancy": { + "type": "string", + "description": "The type of redundancy. \n Possible values:\n DISK_REDUNDANCY_UNSPECIFIED\nHIGH\nNORMAL", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "dns_listener_ip": { + "type": "string", + "description": "DNS listener IP.", + "description_kind": "plain", + "computed": true + }, + "domain": { + "type": "string", + "description": "Parent DNS domain where SCAN DNS and hosts names are qualified.\nex: ocispdelegated.ocisp10jvnet.oraclevcn.com", + "description_kind": "plain", + "computed": true + }, + "gi_version": { + "type": "string", + "description": "Grid Infrastructure Version.", + "description_kind": "plain", + "optional": true + }, + "hostname": { + "type": "string", + "description": "host name without domain.\nformat: \"-\" with some suffix.\nex: sp2-yi0xq where \"sp2\" is the hostname_prefix.", + "description_kind": "plain", + "computed": true + }, + "hostname_prefix": { + "type": "string", + "description": "Prefix for VM cluster host names.", + "description_kind": "plain", + "optional": true + }, + "license_type": { + "type": "string", + "description": "License type of VM Cluster. \n Possible values:\n LICENSE_TYPE_UNSPECIFIED\nLICENSE_INCLUDED\nBRING_YOUR_OWN_LICENSE", + "description_kind": "plain", + "required": true + }, + "local_backup_enabled": { + "type": "bool", + "description": "Use local backup.", + "description_kind": "plain", + "optional": true + }, + "memory_size_gb": { + "type": "number", + "description": "Memory allocated in GBs.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "node_count": { + "type": "number", + "description": "Number of database servers.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "oci_url": { + "type": "string", + "description": "Deep link to the OCI console to view this resource.", + "description_kind": "plain", + "computed": true + }, + "ocid": { + "type": "string", + "description": "Oracle Cloud Infrastructure ID of VM Cluster.", + "description_kind": "plain", + "computed": true + }, + "ocpu_count": { + "type": "number", + "description": "OCPU count per VM. Minimum is 0.1.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "scan_dns": { + "type": "string", + "description": "SCAN DNS name.\nex: sp2-yi0xq-scan.ocispdelegated.ocisp10jvnet.oraclevcn.com", + "description_kind": "plain", + "computed": true + }, + "scan_dns_record_id": { + "type": "string", + "description": "OCID of scan DNS record.", + "description_kind": "plain", + "computed": true + }, + "scan_ip_ids": { + "type": [ + "list", + "string" + ], + "description": "OCIDs of scan IPs.", + "description_kind": "plain", + "computed": true + }, + "scan_listener_port_tcp": { + "type": "number", + "description": "SCAN listener port - TCP", + "description_kind": "plain", + "computed": true + }, + "scan_listener_port_tcp_ssl": { + "type": "number", + "description": "SCAN listener port - TLS", + "description_kind": "plain", + "computed": true + }, + "shape": { + "type": "string", + "description": "Shape of VM Cluster.", + "description_kind": "plain", + "computed": true + }, + "sparse_diskgroup_enabled": { + "type": "bool", + "description": "Use exadata sparse snapshots.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "ssh_public_keys": { + "type": [ + "list", + "string" + ], + "description": "SSH public keys to be stored with cluster.", + "description_kind": "plain", + "optional": true + }, + "state": { + "type": "string", + "description": "State of the cluster. \n Possible values:\n STATE_UNSPECIFIED\nPROVISIONING\nAVAILABLE\nUPDATING\nTERMINATING\nTERMINATED\nFAILED\nMAINTENANCE_IN_PROGRESS", + "description_kind": "plain", + "computed": true + }, + "storage_size_gb": { + "type": "number", + "description": "The storage allocation for the disk group, in gigabytes (GB).", + "description_kind": "plain", + "computed": true + }, + "system_version": { + "type": "string", + "description": "Operating system version of the image.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "diagnostics_data_collection_options": { + "nesting_mode": "list", + "block": { + "attributes": { + "diagnostics_events_enabled": { + "type": "bool", + "description": "Indicates whether diagnostic collection is enabled for the VM cluster", + "description_kind": "plain", + "optional": true + }, + "health_monitoring_enabled": { + "type": "bool", + "description": "Indicates whether health monitoring is enabled for the VM cluster", + "description_kind": "plain", + "optional": true + }, + "incident_logs_enabled": { + "type": "bool", + "description": "Indicates whether incident logs and trace collection are enabled for the VM\ncluster", + "description_kind": "plain", + "optional": true + } + }, + "description": "Data collection options for diagnostics.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "time_zone": { + "nesting_mode": "list", + "block": { + "attributes": { + "id": { + "type": "string", + "description": "IANA Time Zone Database time zone, e.g. \"America/New_York\".", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Represents a time zone from the\n[IANA Time Zone Database](https://www.iana.org/time-zones).", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Various properties and settings associated with Exadata VM cluster.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_org_policy_custom_constraint": { + "version": 0, + "block": { + "attributes": { + "action_type": { + "type": "string", + "description": "The action to take if the condition is met. Possible values: [\"ALLOW\", \"DENY\"]", + "description_kind": "plain", + "required": true + }, + "condition": { + "type": "string", + "description": "A CEL condition that refers to a supported service resource, for example 'resource.management.autoUpgrade == false'. For details about CEL usage, see [Common Expression Language](https://cloud.google.com/resource-manager/docs/organization-policy/creating-managing-custom-constraints#common_expression_language).", + "description_kind": "plain", + "required": true + }, + "description": { + "type": "string", + "description": "A human-friendly description of the constraint to display as an error message when the policy is violated.", + "description_kind": "plain", + "optional": true + }, + "display_name": { + "type": "string", + "description": "A human-friendly name for the constraint.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "method_types": { + "type": [ + "list", + "string" + ], + "description": "A list of RESTful methods for which to enforce the constraint. Can be 'CREATE', 'UPDATE', or both. Not all Google Cloud services support both methods. To see supported methods for each service, find the service in [Supported services](https://cloud.google.com/resource-manager/docs/organization-policy/custom-constraint-supported-services).", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Immutable. The name of the custom constraint. This is unique within the organization.", + "description_kind": "plain", + "required": true + }, + "parent": { + "type": "string", + "description": "The parent of the resource, an organization. Format should be 'organizations/{organization_id}'.", + "description_kind": "plain", + "required": true + }, + "resource_types": { + "type": [ + "list", + "string" + ], + "description": "Immutable. The fully qualified name of the Google Cloud REST resource containing the object and field you want to restrict. For example, 'container.googleapis.com/NodePool'.", + "description_kind": "plain", + "required": true + }, + "update_time": { + "type": "string", + "description": "Output only. The timestamp representing when the constraint was last updated.", "description_kind": "plain", "computed": true } @@ -128425,6 +139794,12 @@ "description": "If '\"TRUE\"', then the 'Policy' is enforced. If '\"FALSE\"', then any configuration is acceptable. This field can be set only in Policies for boolean constraints.", "description_kind": "plain", "optional": true + }, + "parameters": { + "type": "string", + "description": "Optional. Required for Managed Constraints if parameters defined in constraints. Pass parameter values when policy enforcement is enabled. Ensure that parameter value types match those defined in the constraint definition. For example: { \\\"allowedLocations\\\" : [\\\"us-east1\\\", \\\"us-west1\\\"], \\\"allowAll\\\" : true }", + "description_kind": "plain", + "optional": true } }, "block_types": { @@ -128552,6 +139927,12 @@ "description": "If '\"TRUE\"', then the 'Policy' is enforced. If '\"FALSE\"', then any configuration is acceptable. This field can be set only in Policies for boolean constraints.", "description_kind": "plain", "optional": true + }, + "parameters": { + "type": "string", + "description": "Optional. Required for Managed Constraints if parameters defined in constraints. Pass parameter values when policy enforcement is enabled. Ensure that parameter value types match those defined in the constraint definition. For example: { \\\"allowedLocations\\\" : [\\\"us-east1\\\", \\\"us-west1\\\"], \\\"allowAll\\\" : true }", + "description_kind": "plain", + "optional": true } }, "block_types": { @@ -128618,7 +139999,7 @@ "max_items": 1 } }, - "description": "Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set 'enforced' to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence.", + "description": "In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set 'enforced' to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence.", "description_kind": "plain" } } @@ -131304,6 +142685,177 @@ "description_kind": "plain" } }, + "google_parallelstore_instance": { + "version": 0, + "block": { + "attributes": { + "access_points": { + "type": [ + "list", + "string" + ], + "description": "Output only. List of access_points.\nContains a list of IPv4 addresses used for client side configuration.", + "description_kind": "plain", + "computed": true + }, + "capacity_gib": { + "type": "string", + "description": "Required. Immutable. Storage capacity of Parallelstore instance in Gibibytes (GiB).", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The time when the instance was created.", + "description_kind": "plain", + "computed": true + }, + "daos_version": { + "type": "string", + "description": "The version of DAOS software running in the instance.", + "description_kind": "plain", + "computed": true + }, + "deployment_type": { + "type": "string", + "description": "Parallelstore Instance deployment type.\n Possible values:\n DEPLOYMENT_TYPE_UNSPECIFIED\n SCRATCH\n PERSISTENT", + "description_kind": "plain", + "optional": true + }, + "description": { + "type": "string", + "description": "The description of the instance. 2048 characters or less.", + "description_kind": "plain", + "optional": true + }, + "directory_stripe_level": { + "type": "string", + "description": "Stripe level for directories.\nMIN when directory has a small number of files.\nMAX when directory has a large number of files.\n Possible values:\n DIRECTORY_STRIPE_LEVEL_UNSPECIFIED\n DIRECTORY_STRIPE_LEVEL_MIN\n DIRECTORY_STRIPE_LEVEL_BALANCED\n DIRECTORY_STRIPE_LEVEL_MAX", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_reserved_ip_range": { + "type": "string", + "description": "Immutable. Contains the id of the allocated IP address\nrange associated with the private service access connection for example, \\\"test-default\\\"\nassociated with IP range 10.0.0.0/29. This field is populated by the service\nand contains the value currently used by the service.", + "description_kind": "plain", + "computed": true + }, + "file_stripe_level": { + "type": "string", + "description": "Stripe level for files.\nMIN better suited for small size files.\nMAX higher throughput performance for larger files.\n Possible values:\n FILE_STRIPE_LEVEL_UNSPECIFIED\n FILE_STRIPE_LEVEL_MIN\n FILE_STRIPE_LEVEL_BALANCED\n FILE_STRIPE_LEVEL_MAX", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "instance_id": { + "type": "string", + "description": "The logical name of the Parallelstore instance in the user project with the following restrictions:\n * Must contain only lowercase letters, numbers, and hyphens.\n * Must start with a letter.\n * Must be between 1-63 characters.\n * Must end with a number or a letter.\n * Must be unique within the customer project/ location", + "description_kind": "plain", + "required": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Cloud Labels are a flexible and lightweight mechanism for\norganizing cloud resources into groups that reflect a customer's organizational\nneeds and deployment strategies. Cloud Labels can be used to filter collections\nof resources. They can be used to control how resource metrics are aggregated.\nAnd they can be used as arguments to policy management rules (e.g. route, firewall,\nload balancing, etc.).\n\n* Label keys must be between 1 and 63 characters long and must conform to\n the following regular expression: 'a-z{0,62}'.\n* Label values must be between 0 and 63 characters long and must conform\n to the regular expression '[a-z0-9_-]{0,63}'.\n* No more than 64 labels can be associated with a given resource.\n\nSee https://goo.gl/xmQnxf for more information on and examples of labels.\n\nIf you plan to use labels in your own code, please note that additional\ncharacters may be allowed in the future. Therefore, you are advised to use\nan internal label representation, such as JSON, which doesn't rely upon\nspecific characters being disallowed. For example, representing labels\nas the string: 'name + \"_\" + value' would prove problematic if we were to\nallow '\"_\"' in a future release. \"\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Part of 'parent'. See documentation of 'projectsId'.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. The resource name of the instance, in the format\n'projects/{project}/locations/{location}/instances/{instance_id}'", + "description_kind": "plain", + "computed": true + }, + "network": { + "type": "string", + "description": "Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc)\nto which the instance is connected.", + "description_kind": "plain", + "optional": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "reserved_ip_range": { + "type": "string", + "description": "Immutable. Contains the id of the allocated IP address range\nassociated with the private service access connection for example, \\\"test-default\\\"\nassociated with IP range 10.0.0.0/29. If no range id is provided all ranges will\nbe considered.", + "description_kind": "plain", + "optional": true + }, + "state": { + "type": "string", + "description": "The instance state.\n Possible values:\n STATE_UNSPECIFIED\n CREATING\n ACTIVE\n DELETING\n FAILED\n UPGRADING", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The time when the instance was updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_privateca_ca_pool": { "version": 0, "block": { @@ -132665,7 +144217,7 @@ "attributes": { "key_id": { "type": "string", - "description": "The value of the KeyId in lowercase hexidecimal.", + "description": "The value of the KeyId in lowercase hexadecimal.", "description_kind": "plain", "optional": true } @@ -133096,7 +144648,7 @@ }, "desired_state": { "type": "string", - "description": "Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA.", + "description": "Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA.\nPossible values: ENABLED, DISABLED, STAGED.", "description_kind": "plain", "optional": true }, @@ -133342,7 +144894,7 @@ "attributes": { "key_id": { "type": "string", - "description": "The value of the KeyId in lowercase hexidecimal.", + "description": "The value of the KeyId in lowercase hexadecimal.", "description_kind": "plain", "optional": true } @@ -134805,205 +146357,206 @@ "description_kind": "plain", "required": true }, - "skip_delete": { - "type": "bool", - "description": "If true, the Terraform resource can be deleted without deleting the Project via the Google API.", - "description_kind": "plain", - "deprecated": true, - "optional": true, - "computed": true - }, - "terraform_labels": { + "tags": { "type": [ "map", "string" ], - "description": "(ReadOnly) The combination of labels configured directly on the resource and default labels configured on the provider.", - "description_kind": "plain", - "computed": true - } - }, - "block_types": { - "timeouts": { - "nesting_mode": "single", - "block": { - "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "delete": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "read": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "update": { - "type": "string", - "description_kind": "plain", - "optional": true - } - }, - "description_kind": "plain" - } - } - }, - "description_kind": "plain" - } - }, - "google_project_access_approval_settings": { - "version": 0, - "block": { - "attributes": { - "active_key_version": { - "type": "string", - "description": "The asymmetric crypto key version to use for signing approval requests.\nEmpty active_key_version indicates that a Google-managed key should be used for signing.\nThis property will be ignored if set by an ancestor of the resource, and new non-empty values may not be set.", - "description_kind": "plain", - "optional": true - }, - "ancestor_has_active_key_version": { - "type": "bool", - "description": "If the field is true, that indicates that an ancestor of this Project has set active_key_version.", - "description_kind": "plain", - "computed": true - }, - "enrolled_ancestor": { - "type": "bool", - "description": "If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project.", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "invalid_key_version": { - "type": "bool", - "description": "If the field is true, that indicates that there is some configuration issue with the active_key_version\nconfigured on this Project (e.g. it doesn't exist or the Access Approval service account doesn't have the\ncorrect permissions on it, etc.) This key version is not necessarily the effective key version at this level,\nas key versions are inherited top-down.", - "description_kind": "plain", - "computed": true - }, - "name": { - "type": "string", - "description": "The resource name of the settings. Format is \"projects/{project_id}/accessApprovalSettings\"", - "description_kind": "plain", - "computed": true - }, - "notification_emails": { - "type": [ - "set", - "string" - ], - "description": "A list of email addresses to which notifications relating to approval requests should be sent.\nNotifications relating to a resource will be sent to all emails in the settings of ancestor\nresources of that resource. A maximum of 50 email addresses are allowed.", + "description": "A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource.", "description_kind": "plain", - "optional": true, - "computed": true - }, - "project": { - "type": "string", - "description": "Project id.", - "description_kind": "plain", - "deprecated": true, "optional": true }, - "project_id": { - "type": "string", - "description": "ID of the project of the access approval settings.", - "description_kind": "plain", - "required": true - } - }, - "block_types": { - "enrolled_services": { - "nesting_mode": "set", - "block": { - "attributes": { - "cloud_product": { - "type": "string", - "description": "The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive):\n all\n appengine.googleapis.com\n bigquery.googleapis.com\n bigtable.googleapis.com\n cloudkms.googleapis.com\n compute.googleapis.com\n dataflow.googleapis.com\n iam.googleapis.com\n pubsub.googleapis.com\n storage.googleapis.com", - "description_kind": "plain", - "required": true - }, - "enrollment_level": { - "type": "string", - "description": "The enrollment level of the service. Default value: \"BLOCK_ALL\" Possible values: [\"BLOCK_ALL\"]", - "description_kind": "plain", - "optional": true - } - }, - "description": "A list of Google Cloud Services for which the given resource has Access Approval enrolled.\nAccess requests for the resource given by name against any of these services contained here will be required\nto have explicit approval. Enrollment can only be done on an all or nothing basis.\n\nA maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded.", - "description_kind": "plain" - }, - "min_items": 1 - }, - "timeouts": { - "nesting_mode": "single", - "block": { - "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "delete": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "update": { - "type": "string", - "description_kind": "plain", - "optional": true - } - }, - "description_kind": "plain" - } - } - }, - "description_kind": "plain" - } - }, - "google_project_default_service_accounts": { - "version": 0, - "block": { - "attributes": { - "action": { - "type": "string", - "description": "The action to be performed in the default service accounts. Valid values are: DEPRIVILEGE, DELETE, DISABLE.\n\t\t\t\tNote that DEPRIVILEGE action will ignore the REVERT configuration in the restore_policy.", - "description_kind": "plain", - "required": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "project": { - "type": "string", - "description": "The project ID where service accounts are created.", - "description_kind": "plain", - "required": true - }, - "restore_policy": { - "type": "string", - "description": "The action to be performed in the default service accounts on the resource destroy.\n\t\t\t\tValid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE.", - "description_kind": "plain", - "optional": true - }, - "service_accounts": { + "terraform_labels": { "type": [ "map", "string" ], - "description": "The Service Accounts changed by this resource. It is used for revert the action on the destroy.", + "description": "(ReadOnly) The combination of labels configured directly on the resource and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "read": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_project_access_approval_settings": { + "version": 0, + "block": { + "attributes": { + "active_key_version": { + "type": "string", + "description": "The asymmetric crypto key version to use for signing approval requests.\nEmpty active_key_version indicates that a Google-managed key should be used for signing.\nThis property will be ignored if set by an ancestor of the resource, and new non-empty values may not be set.", + "description_kind": "plain", + "optional": true + }, + "ancestor_has_active_key_version": { + "type": "bool", + "description": "If the field is true, that indicates that an ancestor of this Project has set active_key_version.", + "description_kind": "plain", + "computed": true + }, + "enrolled_ancestor": { + "type": "bool", + "description": "If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "invalid_key_version": { + "type": "bool", + "description": "If the field is true, that indicates that there is some configuration issue with the active_key_version\nconfigured on this Project (e.g. it doesn't exist or the Access Approval service account doesn't have the\ncorrect permissions on it, etc.) This key version is not necessarily the effective key version at this level,\nas key versions are inherited top-down.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "The resource name of the settings. Format is \"projects/{project_id}/accessApprovalSettings\"", + "description_kind": "plain", + "computed": true + }, + "notification_emails": { + "type": [ + "set", + "string" + ], + "description": "A list of email addresses to which notifications relating to approval requests should be sent.\nNotifications relating to a resource will be sent to all emails in the settings of ancestor\nresources of that resource. A maximum of 50 email addresses are allowed.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "project": { + "type": "string", + "description": "Project id.", + "description_kind": "plain", + "deprecated": true, + "optional": true + }, + "project_id": { + "type": "string", + "description": "ID of the project of the access approval settings.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "enrolled_services": { + "nesting_mode": "set", + "block": { + "attributes": { + "cloud_product": { + "type": "string", + "description": "The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive):\n all\n appengine.googleapis.com\n bigquery.googleapis.com\n bigtable.googleapis.com\n cloudkms.googleapis.com\n compute.googleapis.com\n dataflow.googleapis.com\n iam.googleapis.com\n pubsub.googleapis.com\n storage.googleapis.com", + "description_kind": "plain", + "required": true + }, + "enrollment_level": { + "type": "string", + "description": "The enrollment level of the service. Default value: \"BLOCK_ALL\" Possible values: [\"BLOCK_ALL\"]", + "description_kind": "plain", + "optional": true + } + }, + "description": "A list of Google Cloud Services for which the given resource has Access Approval enrolled.\nAccess requests for the resource given by name against any of these services contained here will be required\nto have explicit approval. Enrollment can only be done on an all or nothing basis.\n\nA maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded.", + "description_kind": "plain" + }, + "min_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_project_default_service_accounts": { + "version": 0, + "block": { + "attributes": { + "action": { + "type": "string", + "description": "The action to be performed in the default service accounts. Valid values are: DEPRIVILEGE, DELETE, DISABLE.\n\t\t\t\tNote that DEPRIVILEGE action will ignore the REVERT configuration in the restore_policy.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "project": { + "type": "string", + "description": "The project ID where service accounts are created.", + "description_kind": "plain", + "required": true + }, + "restore_policy": { + "type": "string", + "description": "The action to be performed in the default service accounts on the resource destroy.\n\t\t\t\tValid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE.", + "description_kind": "plain", + "optional": true + }, + "service_accounts": { + "type": [ + "map", + "string" + ], + "description": "The Service Accounts changed by this resource. It is used for revert the action on the destroy.", "description_kind": "plain", "computed": true } @@ -136287,7 +147840,7 @@ }, "message_retention_duration": { "type": "string", - "description": "How long to retain unacknowledged messages in the subscription's\nbacklog, from the moment a message is published. If\nretain_acked_messages is true, then this also configures the retention\nof acknowledged messages, and thus configures how far back in time a\nsubscriptions.seek can be done. Defaults to 7 days. Cannot be more\nthan 7 days ('\"604800s\"') or less than 10 minutes ('\"600s\"').\n\nA duration in seconds with up to nine fractional digits, terminated\nby 's'. Example: '\"600.5s\"'.", + "description": "How long to retain unacknowledged messages in the subscription's\nbacklog, from the moment a message is published. If\nretain_acked_messages is true, then this also configures the retention\nof acknowledged messages, and thus configures how far back in time a\nsubscriptions.seek can be done. Defaults to 7 days. Cannot be more\nthan 31 days ('\"2678400s\"') or less than 10 minutes ('\"600s\"').\n\nA duration in seconds with up to nine fractional digits, terminated\nby 's'. Example: '\"600.5s\"'.", "description_kind": "plain", "optional": true }, @@ -136412,6 +147965,12 @@ "description_kind": "plain", "optional": true }, + "max_messages": { + "type": "number", + "description": "The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages.", + "description_kind": "plain", + "optional": true + }, "service_account_email": { "type": "string", "description": "The service account to use to write to Cloud Storage. If not specified, the Pub/Sub\n[service agent](https://cloud.google.com/iam/docs/service-agents),\nservice-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used.", @@ -136430,6 +147989,12 @@ "nesting_mode": "list", "block": { "attributes": { + "use_topic_schema": { + "type": "bool", + "description": "When true, the output Cloud Storage file will be serialized using the topic schema, if it exists.", + "description_kind": "plain", + "optional": true + }, "write_metadata": { "type": "bool", "description": "When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output.", @@ -136872,6 +148437,84 @@ "description_kind": "plain" }, "max_items": 1 + }, + "cloud_storage": { + "nesting_mode": "list", + "block": { + "attributes": { + "bucket": { + "type": "string", + "description": "Cloud Storage bucket. The bucket name must be without any\nprefix like \"gs://\". See the bucket naming requirements:\nhttps://cloud.google.com/storage/docs/buckets#naming.", + "description_kind": "plain", + "required": true + }, + "match_glob": { + "type": "string", + "description": "Glob pattern used to match objects that will be ingested. If unset, all\nobjects will be ingested. See the supported patterns:\nhttps://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob", + "description_kind": "plain", + "optional": true + }, + "minimum_object_create_time": { + "type": "string", + "description": "The timestamp set in RFC3339 text format. If set, only objects with a\nlarger or equal timestamp will be ingested. Unset by default, meaning\nall objects will be ingested.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "avro_format": { + "nesting_mode": "list", + "block": { + "description": "Configuration for reading Cloud Storage data in Avro binary format. The\nbytes of each object will be set to the 'data' field of a Pub/Sub message.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "pubsub_avro_format": { + "nesting_mode": "list", + "block": { + "description": "Configuration for reading Cloud Storage data written via Cloud Storage\nsubscriptions(See https://cloud.google.com/pubsub/docs/cloudstorage). The\ndata and attributes fields of the originally exported Pub/Sub message\nwill be restored when publishing.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "text_format": { + "nesting_mode": "list", + "block": { + "attributes": { + "delimiter": { + "type": "string", + "description": "The delimiter to use when using the 'text' format. Each line of text as\nspecified by the delimiter will be set to the 'data' field of a Pub/Sub\nmessage. When unset, '\\n' is used.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Configuration for reading Cloud Storage data in text format. Each line of\ntext as specified by the delimiter will be set to the 'data' field of a\nPub/Sub message.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Settings for ingestion from Cloud Storage.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "platform_logs_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "severity": { + "type": "string", + "description": "The minimum severity level of Platform Logs that will be written. If unspecified,\nno Platform Logs will be written. Default value: \"SEVERITY_UNSPECIFIED\" Possible values: [\"SEVERITY_UNSPECIFIED\", \"DISABLED\", \"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\"]", + "description_kind": "plain", + "optional": true + } + }, + "description": "Settings for Platform Logs regarding ingestion to Pub/Sub. If unset,\nno Platform Logs will be generated.'", + "description_kind": "plain" + }, + "max_items": 1 } }, "description": "Settings for ingestion from a data source into this topic.", @@ -137363,6 +149006,12 @@ "description_kind": "plain", "computed": true }, + "deletion_protection_enabled": { + "type": "bool", + "description": "Optional. Indicates if the cluster is deletion protected or not.\nIf the value if set to true, any delete cluster operation will fail.\nDefault value is true.", + "description_kind": "plain", + "optional": true + }, "discovery_endpoints": { "type": [ "list", @@ -137393,6 +149042,22 @@ "optional": true, "computed": true }, + "maintenance_schedule": { + "type": [ + "list", + [ + "object", + { + "end_time": "string", + "schedule_deadline_time": "string", + "start_time": "string" + } + ] + ], + "description": "Upcoming maintenance schedule.", + "description_kind": "plain", + "computed": true + }, "name": { "type": "string", "description": "Unique name of the resource in this scope including project and location using the form:\nprojects/{projectId}/locations/{locationId}/clusters/{clusterId}", @@ -137514,6 +149179,248 @@ } }, "block_types": { + "cross_cluster_replication_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "cluster_role": { + "type": "string", + "description": "The role of the cluster in cross cluster replication. Supported values are:\n\n1. 'CLUSTER_ROLE_UNSPECIFIED': This is an independent cluster that has never participated in cross cluster replication. It allows both reads and writes.\n\n1. 'NONE': This is an independent cluster that previously participated in cross cluster replication(either as a 'PRIMARY' or 'SECONDARY' cluster). It allows both reads and writes.\n\n1. 'PRIMARY': This cluster serves as the replication source for secondary clusters that are replicating from it. Any data written to it is automatically replicated to its secondary clusters. It allows both reads and writes.\n\n1. 'SECONDARY': This cluster replicates data from the primary cluster. It allows only reads. Possible values: [\"CLUSTER_ROLE_UNSPECIFIED\", \"NONE\", \"PRIMARY\", \"SECONDARY\"]", + "description_kind": "plain", + "optional": true + }, + "membership": { + "type": [ + "list", + [ + "object", + { + "primary_cluster": [ + "list", + [ + "object", + { + "cluster": "string", + "uid": "string" + } + ] + ], + "secondary_clusters": [ + "list", + [ + "object", + { + "cluster": "string", + "uid": "string" + } + ] + ] + } + ] + ], + "description": "An output only view of all the member clusters participating in cross cluster replication. This field is populated for all the member clusters irrespective of their cluster role.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The last time cross cluster replication config was updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "primary_cluster": { + "nesting_mode": "list", + "block": { + "attributes": { + "cluster": { + "type": "string", + "description": "The full resource path of the primary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id}", + "description_kind": "plain", + "optional": true + }, + "uid": { + "type": "string", + "description": "The unique id of the primary cluster.", + "description_kind": "plain", + "computed": true + } + }, + "description": "Details of the primary cluster that is used as the replication source for this secondary cluster. This is allowed to be set only for clusters whose cluster role is of type 'SECONDARY'.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "secondary_clusters": { + "nesting_mode": "list", + "block": { + "attributes": { + "cluster": { + "type": "string", + "description": "The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id}", + "description_kind": "plain", + "optional": true + }, + "uid": { + "type": "string", + "description": "The unique id of the secondary cluster.", + "description_kind": "plain", + "computed": true + } + }, + "description": "List of secondary clusters that are replicating from this primary cluster. This is allowed to be set only for clusters whose cluster role is of type 'PRIMARY'.", + "description_kind": "plain" + } + } + }, + "description": "Cross cluster replication config", + "description_kind": "plain" + }, + "max_items": 1 + }, + "maintenance_policy": { + "nesting_mode": "list", + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "Output only. The time when the policy was created.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond\nresolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. The time when the policy was last updated.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond\nresolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "weekly_maintenance_window": { + "nesting_mode": "list", + "block": { + "attributes": { + "day": { + "type": "string", + "description": "Required. The day of week that maintenance updates occur.\n\n- DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified.\n- MONDAY: Monday\n- TUESDAY: Tuesday\n- WEDNESDAY: Wednesday\n- THURSDAY: Thursday\n- FRIDAY: Friday\n- SATURDAY: Saturday\n- SUNDAY: Sunday Possible values: [\"DAY_OF_WEEK_UNSPECIFIED\", \"MONDAY\", \"TUESDAY\", \"WEDNESDAY\", \"THURSDAY\", \"FRIDAY\", \"SATURDAY\", \"SUNDAY\"]", + "description_kind": "plain", + "required": true + }, + "duration": { + "type": "string", + "description": "Output only. Duration of the maintenance window.\nThe current window is fixed at 1 hour.\nA duration in seconds with up to nine fractional digits,\nterminated by 's'. Example: \"3.5s\".", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "start_time": { + "nesting_mode": "list", + "block": { + "attributes": { + "hours": { + "type": "number", + "description": "Hours of day in 24 hour format. Should be from 0 to 23.\nAn API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", + "description_kind": "plain", + "optional": true + }, + "minutes": { + "type": "number", + "description": "Minutes of hour of day. Must be from 0 to 59.", + "description_kind": "plain", + "optional": true + }, + "nanos": { + "type": "number", + "description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", + "description_kind": "plain", + "optional": true + }, + "seconds": { + "type": "number", + "description": "Seconds of minutes of the time. Must normally be from 0 to 59.\nAn API may allow the value 60 if it allows leap-seconds.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Required. Start time of the window in UTC time.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + } + }, + "description": "Optional. Maintenance window that is applied to resources covered by this policy.\nMinimum 1. For the current version, the maximum number\nof weekly_window is expected to be one.", + "description_kind": "plain" + } + } + }, + "description": "Maintenance policy for a cluster", + "description_kind": "plain" + }, + "max_items": 1 + }, + "persistence_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "mode": { + "type": "string", + "description": "Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used.\n\n- DISABLED: \tPersistence (both backup and restore) is disabled for the cluster.\n- RDB: RDB based Persistence is enabled.\n- AOF: AOF based Persistence is enabled. Possible values: [\"PERSISTENCE_MODE_UNSPECIFIED\", \"DISABLED\", \"RDB\", \"AOF\"]", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "aof_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "append_fsync": { + "type": "string", + "description": "Optional. Available fsync modes.\n\n- NO - Do not explicitly call fsync(). Rely on OS defaults.\n- EVERYSEC - Call fsync() once per second in a background thread. A balance between performance and durability.\n- ALWAYS - Call fsync() for earch write command. Possible values: [\"APPEND_FSYNC_UNSPECIFIED\", \"NO\", \"EVERYSEC\", \"ALWAYS\"]", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "AOF configuration. This field will be ignored if mode is not AOF.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "rdb_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "rdb_snapshot_period": { + "type": "string", + "description": "Optional. Available snapshot periods for scheduling.\n\n- ONE_HOUR:\tSnapshot every 1 hour.\n- SIX_HOURS:\tSnapshot every 6 hours.\n- TWELVE_HOURS:\tSnapshot every 12 hours.\n- TWENTY_FOUR_HOURS:\tSnapshot every 24 hours. Possible values: [\"SNAPSHOT_PERIOD_UNSPECIFIED\", \"ONE_HOUR\", \"SIX_HOURS\", \"TWELVE_HOURS\", \"TWENTY_FOUR_HOURS\"]", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "rdb_snapshot_start_time": { + "type": "string", + "description": "The time that the first snapshot was/will be attempted, and to which\nfuture snapshots will be aligned.\nIf not provided, the current time will be used.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "RDB configuration. This field will be ignored if mode is not RDB.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Persistence config (RDB, AOF) for the cluster.", + "description_kind": "plain" + }, + "max_items": 1 + }, "psc_configs": { "nesting_mode": "list", "block": { @@ -138481,6 +150388,105 @@ "description_kind": "plain" } }, + "google_scc_folder_scc_big_query_export": { + "version": 0, + "block": { + "attributes": { + "big_query_export_id": { + "type": "string", + "description": "This must be unique within the organization.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The time at which the BigQuery export was created.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + }, + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).", + "description_kind": "plain", + "required": true + }, + "description": { + "type": "string", + "description": "The description of the export (max of 1024 characters).", + "description_kind": "plain", + "required": true + }, + "filter": { + "type": "string", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\n\nRestrictions have the form and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\n\nThe supported operators are:\n\n* = for all value types.\n* >, <, >=, <= for integer values.\n* :, meaning substring matching, for strings.\n\nThe supported value types are:\n\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\n\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.", + "description_kind": "plain", + "required": true + }, + "folder": { + "type": "string", + "description": "The folder where Cloud Security Command Center Big Query Export\nConfig lives in.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "most_recent_editor": { + "type": "string", + "description": "Email address of the user who last edited the BigQuery export.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "The resource name of this export, in the format\n'projects/{{project}}/bigQueryExports/{{big_query_export_id}}'.\nThis field is provided in responses, and is ignored when provided in create requests.", + "description_kind": "plain", + "computed": true + }, + "principal": { + "type": "string", + "description": "The service account that needs permission to create table and upload data to the BigQuery dataset.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The most recent time at which the BigQuery export was updated.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_scc_management_folder_security_health_analytics_custom_module": { "version": 0, "block": { @@ -139678,6 +151684,105 @@ "description_kind": "plain" } }, + "google_scc_organization_scc_big_query_export": { + "version": 0, + "block": { + "attributes": { + "big_query_export_id": { + "type": "string", + "description": "This must be unique within the organization.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + }, + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).", + "description_kind": "plain", + "optional": true + }, + "description": { + "type": "string", + "description": "The description of the notification config (max of 1024 characters).", + "description_kind": "plain", + "optional": true + }, + "filter": { + "type": "string", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\n\nRestrictions have the form and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\n\nThe supported operators are:\n\n* = for all value types.\n* \\>, <, >=, <= for integer values.\n* :, meaning substring matching, for strings.\n\nThe supported value types are:\n\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\n\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "most_recent_editor": { + "type": "string", + "description": "Email address of the user who last edited the BigQuery export.\nThis field is set by the server and will be ignored if provided on export creation or update.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "The resource name of this export, in the format\n'organizations/{{organization}}/bigQueryExports/{{big_query_export_id}}'.\nThis field is provided in responses, and is ignored when provided in create requests.", + "description_kind": "plain", + "computed": true + }, + "organization": { + "type": "string", + "description": "The organization whose Cloud Security Command Center the Big Query Export\nConfig lives in.", + "description_kind": "plain", + "required": true + }, + "principal": { + "type": "string", + "description": "The service account that needs permission to create table and upload data to the BigQuery dataset.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_scc_project_custom_module": { "version": 0, "block": { @@ -139998,6 +152103,105 @@ "description_kind": "plain" } }, + "google_scc_project_scc_big_query_export": { + "version": 0, + "block": { + "attributes": { + "big_query_export_id": { + "type": "string", + "description": "This must be unique within the organization.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + }, + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).", + "description_kind": "plain", + "optional": true + }, + "description": { + "type": "string", + "description": "The description of the notification config (max of 1024 characters).", + "description_kind": "plain", + "optional": true + }, + "filter": { + "type": "string", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\n\nRestrictions have the form and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\n\nThe supported operators are:\n\n* = for all value types.\n* \\>, <, >=, <= for integer values.\n* :, meaning substring matching, for strings.\n\nThe supported value types are:\n\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\n\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "most_recent_editor": { + "type": "string", + "description": "Email address of the user who last edited the BigQuery export.\nThis field is set by the server and will be ignored if provided on export creation or update.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "The resource name of this export, in the format\n'projects/{{project}}/bigQueryExports/{{big_query_export_id}}'.\nThis field is provided in responses, and is ignored when provided in create requests.", + "description_kind": "plain", + "computed": true + }, + "principal": { + "type": "string", + "description": "The service account that needs permission to create table and upload data to the BigQuery dataset.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "update_time": { + "type": "string", + "description": "The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_scc_source": { "version": 0, "block": { @@ -140425,6 +152629,111 @@ "description_kind": "plain" } }, + "google_scc_v2_folder_scc_big_query_export": { + "version": 0, + "block": { + "attributes": { + "big_query_export_id": { + "type": "string", + "description": "This must be unique within the organization. It must consist of only lowercase letters,\nnumbers, and hyphens, must start with a letter, must end with either a letter or a number,\nand must be 63 characters or less.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + }, + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).", + "description_kind": "plain", + "optional": true + }, + "description": { + "type": "string", + "description": "The description of the notification config (max of 1024 characters).", + "description_kind": "plain", + "optional": true + }, + "filter": { + "type": "string", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\n\nRestrictions have the form and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\n\nThe supported operators are:\n\n* = for all value types.\n* >, <, >=, <= for integer values.\n* :, meaning substring matching, for strings.\n\nThe supported value types are:\n\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\n\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.", + "description_kind": "plain", + "optional": true + }, + "folder": { + "type": "string", + "description": "The folder where Cloud Security Command Center Big Query Export\nConfig lives in.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "The BigQuery export configuration is stored in this location. If not provided, Use global as default.", + "description_kind": "plain", + "optional": true + }, + "most_recent_editor": { + "type": "string", + "description": "Email address of the user who last edited the BigQuery export.\nThis field is set by the server and will be ignored if provided on export creation or update.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "The resource name of this export, in the format\n'folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}'.\nThis field is provided in responses, and is ignored when provided in create requests.", + "description_kind": "plain", + "computed": true + }, + "principal": { + "type": "string", + "description": "The service account that needs permission to create table and upload data to the BigQuery dataset.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_scc_v2_organization_mute_config": { "version": 0, "block": { @@ -140622,7 +152931,7 @@ "description_kind": "plain" } }, - "google_scc_v2_organization_scc_big_query_exports": { + "google_scc_v2_organization_scc_big_query_export": { "version": 0, "block": { "attributes": { @@ -140727,6 +153036,112 @@ "description_kind": "plain" } }, + "google_scc_v2_organization_scc_big_query_exports": { + "version": 0, + "block": { + "attributes": { + "big_query_export_id": { + "type": "string", + "description": "This must be unique within the organization.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + }, + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).", + "description_kind": "plain", + "optional": true + }, + "description": { + "type": "string", + "description": "The description of the notification config (max of 1024 characters).", + "description_kind": "plain", + "optional": true + }, + "filter": { + "type": "string", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\n\nRestrictions have the form and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\n\nThe supported operators are:\n\n* = for all value types.\n* >, <, >=, <= for integer values.\n* :, meaning substring matching, for strings.\n\nThe supported value types are:\n\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\n\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "location Id is provided by organization. If not provided, Use global as default.", + "description_kind": "plain", + "optional": true + }, + "most_recent_editor": { + "type": "string", + "description": "Email address of the user who last edited the BigQuery export.\nThis field is set by the server and will be ignored if provided on export creation or update.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "The resource name of this export, in the format\n'organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}'.\nThis field is provided in responses, and is ignored when provided in create requests.", + "description_kind": "plain", + "optional": true + }, + "organization": { + "type": "string", + "description": "The organization whose Cloud Security Command Center the Big Query Export\nConfig lives in.", + "description_kind": "plain", + "required": true + }, + "principal": { + "type": "string", + "description": "The service account that needs permission to create table and upload data to the BigQuery dataset.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain", + "deprecated": true + } + }, "google_scc_v2_organization_source": { "version": 0, "block": { @@ -141154,49 +153569,39 @@ "description_kind": "plain" } }, - "google_secret_manager_secret": { + "google_scc_v2_project_scc_big_query_export": { "version": 0, "block": { "attributes": { - "annotations": { - "type": [ - "map", - "string" - ], - "description": "Custom metadata about the secret.\n\nAnnotations are distinct from various forms of labels. Annotations exist to allow\nclient tools to store their own state information without requiring a database.\n\nAnnotation keys must be between 1 and 63 characters long, have a UTF-8 encoding of\nmaximum 128 bytes, begin and end with an alphanumeric character ([a-z0-9A-Z]), and\nmay have dashes (-), underscores (_), dots (.), and alphanumerics in between these\nsymbols.\n\nThe total size of annotation keys and values must be less than 16KiB.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.\n\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "big_query_export_id": { + "type": "string", + "description": "This must be unique within the organization.", "description_kind": "plain", - "optional": true + "required": true }, "create_time": { "type": "string", - "description": "The time at which the Secret was created.", + "description": "The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", "description_kind": "plain", "computed": true }, - "effective_annotations": { - "type": [ - "map", - "string" - ], - "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).", "description_kind": "plain", - "computed": true + "optional": true }, - "effective_labels": { - "type": [ - "map", - "string" - ], - "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description": { + "type": "string", + "description": "The description of the notification config (max of 1024 characters).", "description_kind": "plain", - "computed": true + "optional": true }, - "expire_time": { + "filter": { "type": "string", - "description": "Timestamp in UTC when the Secret is scheduled to expire. This is always provided on output, regardless of what was sent on input.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".\nOnly one of 'expire_time' or 'ttl' can be provided.", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\n\nRestrictions have the form and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\n\nThe supported operators are:\n\n* = for all value types.\n* >, <, >=, <= for integer values.\n* :, meaning substring matching, for strings.\n\nThe supported value types are:\n\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\n\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.", "description_kind": "plain", - "optional": true, - "computed": true + "optional": true }, "id": { "type": "string", @@ -141204,168 +153609,780 @@ "optional": true, "computed": true }, - "labels": { - "type": [ - "map", - "string" - ], - "description": "The labels assigned to this Secret.\n\nLabel keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes,\nand must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}][\\p{Ll}\\p{Lo}\\p{N}_-]{0,62}\n\nLabel values must be between 0 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes,\nand must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}\n\nNo more than 64 labels can be assigned to a given resource.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "location": { + "type": "string", + "description": "location Id is provided by organization. If not provided, Use global as default.", "description_kind": "plain", "optional": true }, - "name": { + "most_recent_editor": { "type": "string", - "description": "The resource name of the Secret. Format:\n'projects/{{project}}/secrets/{{secret_id}}'", + "description": "Email address of the user who last edited the BigQuery export.\nThis field is set by the server and will be ignored if provided on export creation or update.", "description_kind": "plain", "computed": true }, - "project": { + "name": { "type": "string", + "description": "The resource name of this export, in the format\n'projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}'.\nThis field is provided in responses, and is ignored when provided in create requests.", "description_kind": "plain", - "optional": true, "computed": true }, - "secret_id": { + "principal": { "type": "string", - "description": "This must be unique within the project.", - "description_kind": "plain", - "required": true - }, - "terraform_labels": { - "type": [ - "map", - "string" - ], - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description": "The service account that needs permission to create table and upload data to the BigQuery dataset.", "description_kind": "plain", "computed": true }, - "ttl": { + "project": { "type": "string", - "description": "The TTL for the Secret.\nA duration in seconds with up to nine fractional digits, terminated by 's'. Example: \"3.5s\".\nOnly one of 'ttl' or 'expire_time' can be provided.", - "description_kind": "plain", - "optional": true - }, - "version_aliases": { - "type": [ - "map", - "string" - ], - "description": "Mapping from version alias to version name.\n\nA version alias is a string with a maximum length of 63 characters and can contain\nuppercase and lowercase letters, numerals, and the hyphen (-) and underscore ('_')\ncharacters. An alias string must start with a letter and cannot be the string\n'latest' or 'NEW'. No more than 50 aliases can be assigned to a given secret.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, - "version_destroy_ttl": { + "update_time": { "type": "string", - "description": "Secret Version TTL after destruction request.\nThis is a part of the delayed delete feature on Secret Version.\nFor secret with versionDestroyTtl>0, version destruction doesn't happen immediately\non calling destroy instead the version goes to a disabled state and\nthe actual destruction happens after this TTL expires.", + "description": "The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", "description_kind": "plain", - "optional": true + "computed": true } }, "block_types": { - "replication": { - "nesting_mode": "list", - "block": { - "block_types": { - "auto": { - "nesting_mode": "list", - "block": { - "block_types": { - "customer_managed_encryption": { - "nesting_mode": "list", - "block": { - "attributes": { - "kms_key_name": { - "type": "string", - "description": "The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads.", - "description_kind": "plain", - "required": true - } - }, - "description": "The customer-managed encryption configuration of the Secret.\nIf no configuration is provided, Google-managed default\nencryption is used.", - "description_kind": "plain" - }, - "max_items": 1 - } - }, - "description": "The Secret will automatically be replicated without any restrictions.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "user_managed": { - "nesting_mode": "list", - "block": { - "block_types": { - "replicas": { - "nesting_mode": "list", - "block": { - "attributes": { - "location": { - "type": "string", - "description": "The canonical IDs of the location to replicate data. For example: \"us-east1\".", - "description_kind": "plain", - "required": true - } - }, - "block_types": { - "customer_managed_encryption": { - "nesting_mode": "list", - "block": { - "attributes": { - "kms_key_name": { - "type": "string", - "description": "Describes the Cloud KMS encryption key that will be used to protect destination secret.", - "description_kind": "plain", - "required": true - } - }, - "description": "Customer Managed Encryption for the secret.", - "description_kind": "plain" - }, - "max_items": 1 - } - }, - "description": "The list of Replicas for this Secret. Cannot be empty.", - "description_kind": "plain" - }, - "min_items": 1 - } - }, - "description": "The Secret will be replicated to the regions specified by the user.", - "description_kind": "plain" - }, - "max_items": 1 - } - }, - "description": "The replication policy of the secret data attached to the Secret. It cannot be changed\nafter the Secret has been created.", - "description_kind": "plain" - }, - "min_items": 1, - "max_items": 1 - }, - "rotation": { - "nesting_mode": "list", - "block": { - "attributes": { - "next_rotation_time": { - "type": "string", - "description": "Timestamp in UTC at which the Secret is scheduled to rotate.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", - "description_kind": "plain", - "optional": true - }, - "rotation_period": { - "type": "string", - "description": "The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years).\nIf rotationPeriod is set, 'next_rotation_time' must be set. 'next_rotation_time' will be advanced by this period when the service automatically sends rotation notifications.", - "description_kind": "plain", - "optional": true - } - }, - "description": "The rotation time and period for a Secret. At 'next_rotation_time', Secret Manager will send a Pub/Sub notification to the topics configured on the Secret. 'topics' must be set to configure rotation.", - "description_kind": "plain" - }, - "max_items": 1 - }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_secret_manager_regional_secret": { + "version": 0, + "block": { + "attributes": { + "annotations": { + "type": [ + "map", + "string" + ], + "description": "Custom metadata about the regional secret.\n\nAnnotations are distinct from various forms of labels. Annotations exist to allow\nclient tools to store their own state information without requiring a database.\n\nAnnotation keys must be between 1 and 63 characters long, have a UTF-8 encoding of\nmaximum 128 bytes, begin and end with an alphanumeric character ([a-z0-9A-Z]), and\nmay have dashes (-), underscores (_), dots (.), and alphanumerics in between these\nsymbols.\n\nThe total size of annotation keys and values must be less than 16KiB.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.\n\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "optional": true + }, + "create_time": { + "type": "string", + "description": "The time at which the regional secret was created.", + "description_kind": "plain", + "computed": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "expire_time": { + "type": "string", + "description": "Timestamp in UTC when the regional secret is scheduled to expire. This is always provided on\noutput, regardless of what was sent on input. A timestamp in RFC3339 UTC \"Zulu\" format, with\nnanosecond resolution and up to nine fractional digits. Examples: \"2014-10-02T15:01:23Z\" and\n\"2014-10-02T15:01:23.045123456Z\". Only one of 'expire_time' or 'ttl' can be provided.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "The labels assigned to this regional secret.\n\nLabel keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes,\nand must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}][\\p{Ll}\\p{Lo}\\p{N}_-]{0,62}\n\nLabel values must be between 0 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes,\nand must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}\n\nNo more than 64 labels can be assigned to a given resource.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "The location of the regional secret. eg us-central1", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "The resource name of the regional secret. Format:\n'projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}'", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "secret_id": { + "type": "string", + "description": "This must be unique within the project.", + "description_kind": "plain", + "required": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "ttl": { + "type": "string", + "description": "The TTL for the regional secret. A duration in seconds with up to nine fractional digits,\nterminated by 's'. Example: \"3.5s\". Only one of 'ttl' or 'expire_time' can be provided.", + "description_kind": "plain", + "optional": true + }, + "version_aliases": { + "type": [ + "map", + "string" + ], + "description": "Mapping from version alias to version name.\n\nA version alias is a string with a maximum length of 63 characters and can contain\nuppercase and lowercase letters, numerals, and the hyphen (-) and underscore ('_')\ncharacters. An alias string must start with a letter and cannot be the string\n'latest' or 'NEW'. No more than 50 aliases can be assigned to a given secret.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + "description_kind": "plain", + "optional": true + }, + "version_destroy_ttl": { + "type": "string", + "description": "Secret Version TTL after destruction request.\nThis is a part of the delayed delete feature on Secret Version.\nFor secret with versionDestroyTtl>0, version destruction doesn't happen immediately\non calling destroy instead the version goes to a disabled state and\nthe actual destruction happens after this TTL expires. It must be atleast 24h.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "customer_managed_encryption": { + "nesting_mode": "list", + "block": { + "attributes": { + "kms_key_name": { + "type": "string", + "description": "The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads.", + "description_kind": "plain", + "required": true + } + }, + "description": "The customer-managed encryption configuration of the regional secret.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "rotation": { + "nesting_mode": "list", + "block": { + "attributes": { + "next_rotation_time": { + "type": "string", + "description": "Timestamp in UTC at which the Secret is scheduled to rotate.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine\nfractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "optional": true + }, + "rotation_period": { + "type": "string", + "description": "The Duration between rotation notifications. Must be in seconds and at least 3600s (1h)\nand at most 3153600000s (100 years). If rotationPeriod is set, 'next_rotation_time' must\nbe set. 'next_rotation_time' will be advanced by this period when the service\nautomatically sends rotation notifications.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The rotation time and period for a regional secret. At 'next_rotation_time', Secret Manager\nwill send a Pub/Sub notification to the topics configured on the Secret. 'topics' must be\nset to configure rotation.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + }, + "topics": { + "nesting_mode": "list", + "block": { + "attributes": { + "name": { + "type": "string", + "description": "The resource name of the Pub/Sub topic that will be published to, in the following format:\nprojects/*/topics/*. For publication to succeed, the Secret Manager Service\nAgent service account must have pubsub.publisher permissions on the topic.", + "description_kind": "plain", + "required": true + } + }, + "description": "A list of up to 10 Pub/Sub topics to which messages are published when control plane\noperations are called on the regional secret or its versions.", + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_secret_manager_regional_secret_iam_binding": { + "version": 0, + "block": { + "attributes": { + "etag": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "members": { + "type": [ + "set", + "string" + ], + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "role": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "secret_id": { + "type": "string", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "condition": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "expression": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "title": { + "type": "string", + "description_kind": "plain", + "required": true + } + }, + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description_kind": "plain" + } + }, + "google_secret_manager_regional_secret_iam_member": { + "version": 0, + "block": { + "attributes": { + "etag": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "member": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "role": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "secret_id": { + "type": "string", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "condition": { + "nesting_mode": "list", + "block": { + "attributes": { + "description": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "expression": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "title": { + "type": "string", + "description_kind": "plain", + "required": true + } + }, + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description_kind": "plain" + } + }, + "google_secret_manager_regional_secret_iam_policy": { + "version": 0, + "block": { + "attributes": { + "etag": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "policy_data": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "secret_id": { + "type": "string", + "description_kind": "plain", + "required": true + } + }, + "description_kind": "plain" + } + }, + "google_secret_manager_regional_secret_version": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "The time at which the regional secret version was created.", + "description_kind": "plain", + "computed": true + }, + "customer_managed_encryption": { + "type": [ + "list", + [ + "object", + { + "kms_key_version_name": "string" + } + ] + ], + "description": "The customer-managed encryption configuration of the regional secret.", + "description_kind": "plain", + "computed": true + }, + "deletion_policy": { + "type": "string", + "description": "The deletion policy for the regional secret version. Setting 'ABANDON' allows the resource\nto be abandoned rather than deleted. Setting 'DISABLE' allows the resource to be\ndisabled rather than deleted. Default is 'DELETE'. Possible values are:\n * DELETE\n * DISABLE\n * ABANDON", + "description_kind": "plain", + "optional": true + }, + "destroy_time": { + "type": "string", + "description": "The time at which the regional secret version was destroyed. Only present if state is DESTROYED.", + "description_kind": "plain", + "computed": true + }, + "enabled": { + "type": "bool", + "description": "The current state of the regional secret version.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "is_secret_data_base64": { + "type": "bool", + "description": "If set to 'true', the secret data is expected to be base64-encoded string and would be sent as is.", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description": "Location of Secret Manager regional secret resource.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "The resource name of the regional secret version. Format:\n'projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}/versions/{{version}}'", + "description_kind": "plain", + "computed": true + }, + "secret": { + "type": "string", + "description": "Secret Manager regional secret resource.", + "description_kind": "plain", + "required": true + }, + "secret_data": { + "type": "string", + "description": "The secret data. Must be no larger than 64KiB.", + "description_kind": "plain", + "required": true, + "sensitive": true + }, + "version": { + "type": "string", + "description": "The version of the Regional Secret.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_secret_manager_secret": { + "version": 0, + "block": { + "attributes": { + "annotations": { + "type": [ + "map", + "string" + ], + "description": "Custom metadata about the secret.\n\nAnnotations are distinct from various forms of labels. Annotations exist to allow\nclient tools to store their own state information without requiring a database.\n\nAnnotation keys must be between 1 and 63 characters long, have a UTF-8 encoding of\nmaximum 128 bytes, begin and end with an alphanumeric character ([a-z0-9A-Z]), and\nmay have dashes (-), underscores (_), dots (.), and alphanumerics in between these\nsymbols.\n\nThe total size of annotation keys and values must be less than 16KiB.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.\n\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "optional": true + }, + "create_time": { + "type": "string", + "description": "The time at which the Secret was created.", + "description_kind": "plain", + "computed": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "expire_time": { + "type": "string", + "description": "Timestamp in UTC when the Secret is scheduled to expire. This is always provided on output, regardless of what was sent on input.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".\nOnly one of 'expire_time' or 'ttl' can be provided.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "The labels assigned to this Secret.\n\nLabel keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes,\nand must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}][\\p{Ll}\\p{Lo}\\p{N}_-]{0,62}\n\nLabel values must be between 0 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes,\nand must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}\n\nNo more than 64 labels can be assigned to a given resource.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "The resource name of the Secret. Format:\n'projects/{{project}}/secrets/{{secret_id}}'", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "secret_id": { + "type": "string", + "description": "This must be unique within the project.", + "description_kind": "plain", + "required": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "ttl": { + "type": "string", + "description": "The TTL for the Secret.\nA duration in seconds with up to nine fractional digits, terminated by 's'. Example: \"3.5s\".\nOnly one of 'ttl' or 'expire_time' can be provided.", + "description_kind": "plain", + "optional": true + }, + "version_aliases": { + "type": [ + "map", + "string" + ], + "description": "Mapping from version alias to version name.\n\nA version alias is a string with a maximum length of 63 characters and can contain\nuppercase and lowercase letters, numerals, and the hyphen (-) and underscore ('_')\ncharacters. An alias string must start with a letter and cannot be the string\n'latest' or 'NEW'. No more than 50 aliases can be assigned to a given secret.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + "description_kind": "plain", + "optional": true + }, + "version_destroy_ttl": { + "type": "string", + "description": "Secret Version TTL after destruction request.\nThis is a part of the delayed delete feature on Secret Version.\nFor secret with versionDestroyTtl>0, version destruction doesn't happen immediately\non calling destroy instead the version goes to a disabled state and\nthe actual destruction happens after this TTL expires.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "replication": { + "nesting_mode": "list", + "block": { + "block_types": { + "auto": { + "nesting_mode": "list", + "block": { + "block_types": { + "customer_managed_encryption": { + "nesting_mode": "list", + "block": { + "attributes": { + "kms_key_name": { + "type": "string", + "description": "The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads.", + "description_kind": "plain", + "required": true + } + }, + "description": "The customer-managed encryption configuration of the Secret.\nIf no configuration is provided, Google-managed default\nencryption is used.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "The Secret will automatically be replicated without any restrictions.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "user_managed": { + "nesting_mode": "list", + "block": { + "block_types": { + "replicas": { + "nesting_mode": "list", + "block": { + "attributes": { + "location": { + "type": "string", + "description": "The canonical IDs of the location to replicate data. For example: \"us-east1\".", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "customer_managed_encryption": { + "nesting_mode": "list", + "block": { + "attributes": { + "kms_key_name": { + "type": "string", + "description": "Describes the Cloud KMS encryption key that will be used to protect destination secret.", + "description_kind": "plain", + "required": true + } + }, + "description": "Customer Managed Encryption for the secret.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "The list of Replicas for this Secret. Cannot be empty.", + "description_kind": "plain" + }, + "min_items": 1 + } + }, + "description": "The Secret will be replicated to the regions specified by the user.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "The replication policy of the secret data attached to the Secret. It cannot be changed\nafter the Secret has been created.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "rotation": { + "nesting_mode": "list", + "block": { + "attributes": { + "next_rotation_time": { + "type": "string", + "description": "Timestamp in UTC at which the Secret is scheduled to rotate.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "optional": true + }, + "rotation_period": { + "type": "string", + "description": "The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years).\nIf rotationPeriod is set, 'next_rotation_time' must be set. 'next_rotation_time' will be advanced by this period when the service automatically sends rotation notifications.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The rotation time and period for a Secret. At 'next_rotation_time', Secret Manager will send a Pub/Sub notification to the topics configured on the Secret. 'topics' must be set to configure rotation.", + "description_kind": "plain" + }, + "max_items": 1 + }, "timeouts": { "nesting_mode": "single", "block": { @@ -141672,6 +154689,141 @@ "description_kind": "plain" } }, + "google_secure_source_manager_branch_rule": { + "version": 0, + "block": { + "attributes": { + "allow_stale_reviews": { + "type": "bool", + "description": "Determines if allow stale reviews or approvals before merging to the branch.", + "description_kind": "plain", + "optional": true + }, + "branch_rule_id": { + "type": "string", + "description": "The ID for the BranchRule.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "Time the BranchRule was created in UTC.", + "description_kind": "plain", + "computed": true + }, + "disabled": { + "type": "bool", + "description": "Determines if the branch rule is disabled or not.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "include_pattern": { + "type": "string", + "description": "The BranchRule matches branches based on the specified regular expression. Use .* to match all branches.", + "description_kind": "plain", + "required": true + }, + "location": { + "type": "string", + "description": "The location for the Repository.", + "description_kind": "plain", + "required": true + }, + "minimum_approvals_count": { + "type": "number", + "description": "The minimum number of approvals required for the branch rule to be matched.", + "description_kind": "plain", + "optional": true + }, + "minimum_reviews_count": { + "type": "number", + "description": "The minimum number of reviews required for the branch rule to be matched.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "The resource name for the BranchRule.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "repository_id": { + "type": "string", + "description": "The ID for the Repository.", + "description_kind": "plain", + "required": true + }, + "require_comments_resolved": { + "type": "bool", + "description": "Determines if require comments resolved before merging to the branch.", + "description_kind": "plain", + "optional": true + }, + "require_linear_history": { + "type": "bool", + "description": "Determines if require linear history before merging to the branch.", + "description_kind": "plain", + "optional": true + }, + "require_pull_request": { + "type": "bool", + "description": "Determines if the branch rule requires a pull request or not.", + "description_kind": "plain", + "optional": true + }, + "uid": { + "type": "string", + "description": "Unique identifier of the BranchRule.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Time the BranchRule was updated in UTC.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_secure_source_manager_instance": { "version": 0, "block": { @@ -141838,6 +154990,22 @@ }, "description_kind": "plain" } + }, + "workforce_identity_federation_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "enabled": { + "type": "bool", + "description": "'Whether Workforce Identity Federation is enabled.'", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for Workforce Identity Federation to support third party identity provider.\nIf unset, defaults to the Google OIDC IdP.", + "description_kind": "plain" + }, + "max_items": 1 } }, "description_kind": "plain" @@ -143688,10 +156856,140 @@ "description_kind": "plain" } }, + "google_site_verification_owner": { + "version": 0, + "block": { + "attributes": { + "email": { + "type": "string", + "description": "The email address of the owner.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "web_resource_id": { + "type": "string", + "description": "The id of the Web Resource to add this owner to, in the form \"webResource/\".", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_site_verification_web_resource": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "owners": { + "type": [ + "list", + "string" + ], + "description": "The email addresses of all direct, verified owners of this exact property. Indirect owners —\nfor example verified owners of the containing domain—are not included in this list.", + "description_kind": "plain", + "computed": true + }, + "verification_method": { + "type": "string", + "description": "The verification method for the Site Verification system to use to verify\nthis site or domain. Possible values: [\"ANALYTICS\", \"DNS_CNAME\", \"DNS_TXT\", \"FILE\", \"META\", \"TAG_MANAGER\"]", + "description_kind": "plain", + "required": true + }, + "web_resource_id": { + "type": "string", + "description": "The string used to identify this web resource.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "site": { + "nesting_mode": "list", + "block": { + "attributes": { + "identifier": { + "type": "string", + "description": "The site identifier. If the type is set to SITE, the identifier is a URL. If the type is\nset to INET_DOMAIN, the identifier is a domain name.", + "description_kind": "plain", + "required": true + }, + "type": { + "type": "string", + "description": "The type of resource to be verified. Possible values: [\"INET_DOMAIN\", \"SITE\"]", + "description_kind": "plain", + "required": true + } + }, + "description": "Container for the address and type of a site for which a verification token will be verified.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_sourcerepo_repository": { "version": 0, "block": { "attributes": { + "create_ignore_already_exists": { + "type": "bool", + "description": "If set to true, skip repository creation if a repository with the same name already exists.", + "description_kind": "plain", + "optional": true + }, "id": { "type": "string", "description_kind": "plain", @@ -143948,6 +157246,117 @@ "description_kind": "plain" } }, + "google_spanner_backup_schedule": { + "version": 0, + "block": { + "attributes": { + "database": { + "type": "string", + "description": "The database to create the backup schedule on.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "instance": { + "type": "string", + "description": "The instance to create the database on.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "A unique identifier for the backup schedule, which cannot be changed after\nthe backup schedule is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].", + "description_kind": "plain", + "optional": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "retention_duration": { + "type": "string", + "description": "At what relative time in the future, compared to its creation time, the backup should be deleted, e.g. keep backups for 7 days.\nA duration in seconds with up to nine fractional digits, ending with 's'. Example: '3.5s'.\nYou can set this to a value up to 366 days.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "full_backup_spec": { + "nesting_mode": "list", + "block": { + "description": "The schedule creates only full backups..", + "description_kind": "plain" + }, + "max_items": 1 + }, + "incremental_backup_spec": { + "nesting_mode": "list", + "block": { + "description": "The schedule creates incremental backup chains.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "spec": { + "nesting_mode": "list", + "block": { + "block_types": { + "cron_spec": { + "nesting_mode": "list", + "block": { + "attributes": { + "text": { + "type": "string", + "description": "Textual representation of the crontab. User can customize the\nbackup frequency and the backup version time using the cron\nexpression. The version time must be in UTC timzeone.\nThe backup will contain an externally consistent copy of the\ndatabase at the version time. Allowed frequencies are 12 hour, 1 day,\n1 week and 1 month. Examples of valid cron specifications:\n 0 2/12 * * * : every 12 hours at (2, 14) hours past midnight in UTC.\n 0 2,14 * * * : every 12 hours at (2,14) hours past midnight in UTC.\n 0 2 * * * : once a day at 2 past midnight in UTC.\n 0 2 * * 0 : once a week every Sunday at 2 past midnight in UTC.\n 0 2 8 * * : once a month on 8th day at 2 past midnight in UTC.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Cron style schedule specification..", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Defines specifications of the backup schedule.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_spanner_database": { "version": 0, "block": { @@ -143994,7 +157403,7 @@ }, "name": { "type": "string", - "description": "A unique identifier for the database, which cannot be changed after\nthe instance is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].", + "description": "A unique identifier for the database, which cannot be changed after the\ninstance is created. Values are of the form '[a-z][-_a-z0-9]*[a-z0-9]'.", "description_kind": "plain", "required": true }, @@ -144027,7 +157436,16 @@ "type": "string", "description": "Fully qualified name of the KMS key to use to encrypt this database. This key must exist\nin the same location as the Spanner Database.", "description_kind": "plain", - "required": true + "optional": true + }, + "kms_key_names": { + "type": [ + "list", + "string" + ], + "description": "Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist\nin the same locations as the Spanner Database.", + "description_kind": "plain", + "optional": true } }, "description": "Encryption configuration for the database", @@ -144270,6 +157688,13 @@ "description_kind": "plain", "required": true }, + "edition": { + "type": "string", + "description": "The edition selected for this instance. Different editions provide different capabilities at different price points. Possible values: [\"EDITION_UNSPECIFIED\", \"STANDARD\", \"ENTERPRISE\", \"ENTERPRISE_PLUS\"]", + "description_kind": "plain", + "optional": true, + "computed": true + }, "effective_labels": { "type": [ "map", @@ -144348,6 +157773,66 @@ "nesting_mode": "list", "block": { "block_types": { + "asymmetric_autoscaling_options": { + "nesting_mode": "list", + "block": { + "block_types": { + "overrides": { + "nesting_mode": "list", + "block": { + "block_types": { + "autoscaling_limits": { + "nesting_mode": "list", + "block": { + "attributes": { + "max_nodes": { + "type": "number", + "description": "The maximum number of nodes for this specific replica.", + "description_kind": "plain", + "required": true + }, + "min_nodes": { + "type": "number", + "description": "The minimum number of nodes for this specific replica.", + "description_kind": "plain", + "required": true + } + }, + "description": "A nested object resource.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + } + }, + "description": "A nested object resource.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + }, + "replica_selection": { + "nesting_mode": "list", + "block": { + "attributes": { + "location": { + "type": "string", + "description": "The location of the replica to apply asymmetric autoscaling options.", + "description_kind": "plain", + "required": true + } + }, + "description": "A nested object resource.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + } + }, + "description": "Asymmetric autoscaling options for specific replicas.", + "description_kind": "plain" + } + }, "autoscaling_limits": { "nesting_mode": "list", "block": { @@ -144836,7 +158321,7 @@ }, "database_version": { "type": "string", - "description": "The MySQL, PostgreSQL or SQL Server (beta) version to use. Supported values include MYSQL_5_6, MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6, POSTGRES_10, POSTGRES_11, POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. Database Version Policies includes an up-to-date reference of supported versions.", + "description": "The MySQL, PostgreSQL or SQL Server (beta) version to use. Supported values include MYSQL_5_6, MYSQL_5_7, MYSQL_8_0, MYSQL_8_4, POSTGRES_9_6, POSTGRES_10, POSTGRES_11, POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, POSTGRES_16, POSTGRES_17, SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. Database Version Policies includes an up-to-date reference of supported versions.", "description_kind": "plain", "required": true }, @@ -144945,6 +158430,16 @@ "optional": true, "computed": true }, + "replica_names": { + "type": [ + "list", + "string" + ], + "description": "The replicas of the instance.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "root_password": { "type": "string", "description": "Initial root password. Required for MS SQL Server.", @@ -145037,6 +158532,12 @@ "description_kind": "plain", "optional": true }, + "cascadable_replica": { + "type": "bool", + "description": "Specifies if a SQL Server replica is a cascadable replica. A cascadable replica is a SQL Server cross region replica that supports replica(s) under it.", + "description_kind": "plain", + "optional": true + }, "client_certificate": { "type": "string", "description": "PEM representation of the replica's x509 certificate.", @@ -145195,7 +158696,8 @@ "type": "string", "description": "The edition of the instance, can be ENTERPRISE or ENTERPRISE_PLUS.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "enable_dataplex_integration": { "type": "bool", @@ -145477,13 +158979,6 @@ "description_kind": "plain", "optional": true }, - "require_ssl": { - "type": "bool", - "description": "Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in ssl_mode if it has been set too.", - "description_kind": "plain", - "deprecated": true, - "optional": true - }, "server_ca_mode": { "type": "string", "description": "Specify how the server certificate's Certificate Authority is hosted.", @@ -145491,9 +158986,15 @@ "optional": true, "computed": true }, + "server_ca_pool": { + "type": "string", + "description": "The resource name of the server CA pool for an instance with \"CUSTOMER_MANAGED_CAS_CA\" as the \"server_ca_mode\".", + "description_kind": "plain", + "optional": true + }, "ssl_mode": { "type": "string", - "description": "Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcement options compared to require_ssl. To change this field, also set the correspoding value in require_ssl until next major release.", + "description": "Specify how SSL connection should be enforced in DB connections.", "description_kind": "plain", "optional": true, "computed": true @@ -145543,6 +159044,29 @@ "optional": true } }, + "block_types": { + "psc_auto_connections": { + "nesting_mode": "list", + "block": { + "attributes": { + "consumer_network": { + "type": "string", + "description": "The consumer network of this consumer endpoint. This must be a resource path that includes both the host project and the network name. The consumer host project of this network might be different from the consumer service project.", + "description_kind": "plain", + "required": true + }, + "consumer_service_project_id": { + "type": "string", + "description": "The project ID of consumer service project of this consumer endpoint.", + "description_kind": "plain", + "optional": true + } + }, + "description": "A comma-separated list of networks or a comma-separated list of network-project pairs. Each project in this list is represented by a project number (numeric) or by a project ID (alphanumeric). This allows Private Service Connect connections to be created automatically for the specified networks.", + "description_kind": "plain" + } + } + }, "description": "PSC settings for a Cloud SQL instance.", "description_kind": "plain" } @@ -146067,7 +159591,7 @@ } }, "google_storage_bucket": { - "version": 2, + "version": 3, "block": { "attributes": { "default_event_based_hold": { @@ -146293,6 +159817,22 @@ }, "max_items": 1 }, + "hierarchical_namespace": { + "nesting_mode": "list", + "block": { + "attributes": { + "enabled": { + "type": "bool", + "description": "Set this field true to organize bucket with logical file system structure.", + "description_kind": "plain", + "required": true + } + }, + "description": "The bucket's HNS configuration, which defines bucket can organize folders in logical file system structure.", + "description_kind": "plain" + }, + "max_items": 1 + }, "lifecycle_rule": { "nesting_mode": "list", "block": { @@ -146381,13 +159921,6 @@ "description_kind": "plain", "optional": true }, - "no_age": { - "type": "bool", - "description": "While set true, age value will be omitted.Required to set true when age is unset in the config file.", - "description_kind": "plain", - "deprecated": true, - "optional": true - }, "noncurrent_time_before": { "type": "string", "description": "Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.", @@ -147186,6 +160719,86 @@ "description_kind": "plain" } }, + "google_storage_folder": { + "version": 0, + "block": { + "attributes": { + "bucket": { + "type": "string", + "description": "The name of the bucket that contains the folder.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The timestamp at which this folder was created.", + "description_kind": "plain", + "computed": true + }, + "force_destroy": { + "type": "bool", + "description": "If set to true, items within folder if any will be force destroyed.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "metageneration": { + "type": "string", + "description": "The metadata generation of the folder.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "The name of the folder expressed as a path. Must include\ntrailing '/'. For example, 'example_dir/example_dir2/', 'example@#/', 'a-b/d-f/'.", + "description_kind": "plain", + "required": true + }, + "self_link": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The timestamp at which this folder was most recently updated.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, "google_storage_hmac_key": { "version": 0, "block": { @@ -147458,7 +161071,7 @@ "optional": true } }, - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain" }, "max_items": 1 @@ -148144,6 +161757,148 @@ }, "max_items": 1 }, + "replication_spec": { + "nesting_mode": "list", + "block": { + "block_types": { + "gcs_data_sink": { + "nesting_mode": "list", + "block": { + "attributes": { + "bucket_name": { + "type": "string", + "description": "Google Cloud Storage bucket name.", + "description_kind": "plain", + "required": true + }, + "path": { + "type": "string", + "description": "Google Cloud Storage path in bucket to transfer", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "A Google Cloud Storage data sink.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "gcs_data_source": { + "nesting_mode": "list", + "block": { + "attributes": { + "bucket_name": { + "type": "string", + "description": "Google Cloud Storage bucket name.", + "description_kind": "plain", + "required": true + }, + "path": { + "type": "string", + "description": "Google Cloud Storage path in bucket to transfer", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "A Google Cloud Storage data source.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "object_conditions": { + "nesting_mode": "list", + "block": { + "attributes": { + "exclude_prefixes": { + "type": [ + "list", + "string" + ], + "description": "exclude_prefixes must follow the requirements described for include_prefixes.", + "description_kind": "plain", + "optional": true + }, + "include_prefixes": { + "type": [ + "list", + "string" + ], + "description": "If include_refixes is specified, objects that satisfy the object conditions must have names that start with one of the include_prefixes and that do not start with any of the exclude_prefixes. If include_prefixes is not specified, all objects except those that have names starting with one of the exclude_prefixes must satisfy the object conditions.", + "description_kind": "plain", + "optional": true + }, + "last_modified_before": { + "type": "string", + "description": "If specified, only objects with a \"last modification time\" before this timestamp and objects that don't have a \"last modification time\" are transferred. A timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "optional": true + }, + "last_modified_since": { + "type": "string", + "description": "If specified, only objects with a \"last modification time\" on or after this timestamp and objects that don't have a \"last modification time\" are transferred. A timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "optional": true + }, + "max_time_elapsed_since_last_modification": { + "type": "string", + "description": "A duration in seconds with up to nine fractional digits, terminated by 's'. Example: \"3.5s\".", + "description_kind": "plain", + "optional": true + }, + "min_time_elapsed_since_last_modification": { + "type": "string", + "description": "A duration in seconds with up to nine fractional digits, terminated by 's'. Example: \"3.5s\".", + "description_kind": "plain", + "optional": true + } + }, + "description": "Only objects that satisfy these object conditions are included in the set of data source and data sink objects. Object conditions based on objects' last_modification_time do not exclude objects in a data sink.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "transfer_options": { + "nesting_mode": "list", + "block": { + "attributes": { + "delete_objects_from_source_after_transfer": { + "type": "bool", + "description": "Whether objects should be deleted from the source after they are transferred to the sink. Note that this option and delete_objects_unique_in_sink are mutually exclusive.", + "description_kind": "plain", + "optional": true + }, + "delete_objects_unique_in_sink": { + "type": "bool", + "description": "Whether objects that exist only in the sink should be deleted. Note that this option and delete_objects_from_source_after_transfer are mutually exclusive.", + "description_kind": "plain", + "optional": true + }, + "overwrite_objects_already_existing_in_sink": { + "type": "bool", + "description": "Whether overwriting objects that already exist in the sink is allowed.", + "description_kind": "plain", + "optional": true + }, + "overwrite_when": { + "type": "string", + "description": "When to overwrite objects that already exist in the sink. If not set, overwrite behavior is determined by overwriteObjectsAlreadyExistingInSink.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Characteristics of how to treat files from datasource and sink during job. If the option delete_objects_unique_in_sink is true, object conditions based on objects' last_modification_time are ignored and do not exclude objects in a data source or a data sink.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Replication specification.", + "description_kind": "plain" + }, + "max_items": 1 + }, "schedule": { "nesting_mode": "list", "block": { @@ -148422,6 +162177,22 @@ }, "max_items": 1 }, + "hdfs_data_source": { + "nesting_mode": "list", + "block": { + "attributes": { + "path": { + "type": "string", + "description": "Directory path to the filesystem.", + "description_kind": "plain", + "required": true + } + }, + "description": "An HDFS Storage data source.", + "description_kind": "plain" + }, + "max_items": 1 + }, "http_data_source": { "nesting_mode": "list", "block": { @@ -148560,7 +162331,6 @@ "description": "Transfer specification.", "description_kind": "plain" }, - "min_items": 1, "max_items": 1 } }, @@ -148734,7 +162504,7 @@ }, "short_name": { "type": "string", - "description": "Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace.\n\nThe short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between.", + "description": "Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace.\n\nThe short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes (\"), backslashes (\\\\), and forward slashes (/).", "description_kind": "plain", "required": true }, @@ -148967,7 +162737,7 @@ }, "short_name": { "type": "string", - "description": "Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey.\n\nThe short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between.", + "description": "Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey.\n\nThe short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes (\"), backslashes (\\\\), and forward slashes (/).", "description_kind": "plain", "required": true }, @@ -149318,22 +163088,16 @@ "description_kind": "plain" } }, - "google_vertex_ai_dataset": { + "google_transcoder_job": { "version": 0, "block": { "attributes": { "create_time": { "type": "string", - "description": "The timestamp of when the dataset was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description": "The time the job was created.", "description_kind": "plain", "computed": true }, - "display_name": { - "type": "string", - "description": "The user-defined name of the Dataset. The name can be up to 128 characters long and can be consist of any UTF-8 characters.", - "description_kind": "plain", - "required": true - }, "effective_labels": { "type": [ "map", @@ -149343,6 +163107,12 @@ "description_kind": "plain", "computed": true }, + "end_time": { + "type": "string", + "description": "The time the transcoding finished.", + "description_kind": "plain", + "computed": true + }, "id": { "type": "string", "description_kind": "plain", @@ -149354,19 +163124,19 @@ "map", "string" ], - "description": "A set of key/value label pairs to assign to this Workflow.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description": "The labels associated with this job. You can use these to organize and group your jobs.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "optional": true }, - "metadata_schema_uri": { + "location": { "type": "string", - "description": "Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/.", + "description": "The location of the transcoding job resource.", "description_kind": "plain", "required": true }, "name": { "type": "string", - "description": "The resource name of the Dataset. This value is set by Google.", + "description": "The resource name of the job.", "description_kind": "plain", "computed": true }, @@ -149376,543 +163146,635 @@ "optional": true, "computed": true }, - "region": { - "type": "string", - "description": "The region of the dataset. eg us-central1", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "terraform_labels": { - "type": [ - "map", - "string" - ], - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", - "description_kind": "plain", - "computed": true - }, - "update_time": { + "start_time": { "type": "string", - "description": "The timestamp of when the dataset was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description": "The time the transcoding started.", "description_kind": "plain", "computed": true - } - }, - "block_types": { - "encryption_spec": { - "nesting_mode": "list", - "block": { - "attributes": { - "kms_key_name": { - "type": "string", - "description": "Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource.\nHas the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.", - "description_kind": "plain", - "optional": true - } - }, - "description": "Customer-managed encryption key spec for a Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key.", - "description_kind": "plain" - }, - "max_items": 1 }, - "timeouts": { - "nesting_mode": "single", - "block": { - "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "delete": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "update": { - "type": "string", - "description_kind": "plain", - "optional": true - } - }, - "description_kind": "plain" - } - } - }, - "description_kind": "plain" - } - }, - "google_vertex_ai_deployment_resource_pool": { - "version": 0, - "block": { - "attributes": { - "create_time": { + "state": { "type": "string", - "description": "A timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description": "The current state of the job.", "description_kind": "plain", "computed": true }, - "id": { + "template_id": { "type": "string", + "description": "Specify the templateId to use for populating Job.config.\nThe default is preset/web-hd, which is the only supported preset.", "description_kind": "plain", "optional": true, "computed": true }, - "name": { - "type": "string", - "description": "The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are '/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/'.", - "description_kind": "plain", - "required": true - }, - "project": { - "type": "string", + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", "description_kind": "plain", - "optional": true, "computed": true - }, - "region": { - "type": "string", - "description": "The region of deployment resource pool. eg us-central1", - "description_kind": "plain", - "optional": true } }, "block_types": { - "dedicated_resources": { + "config": { "nesting_mode": "list", "block": { - "attributes": { - "max_replica_count": { - "type": "number", - "description": "The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).", - "description_kind": "plain", - "optional": true - }, - "min_replica_count": { - "type": "number", - "description": "The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.", - "description_kind": "plain", - "required": true - } - }, "block_types": { - "autoscaling_metric_specs": { + "ad_breaks": { "nesting_mode": "list", "block": { "attributes": { - "metric_name": { + "start_time_offset": { "type": "string", - "description": "The resource metric name. Supported metrics: For Online Prediction: * 'aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle' * 'aiplatform.googleapis.com/prediction/online/cpu/utilization'", + "description": "Start time in seconds for the ad break, relative to the output file timeline", "description_kind": "plain", - "required": true + "optional": true, + "computed": true + } + }, + "description": "Ad break.", + "description_kind": "plain" + } + }, + "edit_list": { + "nesting_mode": "list", + "block": { + "attributes": { + "inputs": { + "type": [ + "list", + "string" + ], + "description": "List of values identifying files that should be used in this atom.", + "description_kind": "plain", + "optional": true, + "computed": true }, - "target": { - "type": "number", - "description": "The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.", + "key": { + "type": "string", + "description": "A unique key for this atom.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true + }, + "start_time_offset": { + "type": "string", + "description": "Start time in seconds for the atom, relative to the input file timeline. The default is '0s'.", + "description_kind": "plain", + "optional": true, + "computed": true } }, - "description": "A list of the metric specifications that overrides a resource utilization metric.", + "description": "List of input assets stored in Cloud Storage.", "description_kind": "plain" } }, - "machine_spec": { + "elementary_streams": { "nesting_mode": "list", "block": { "attributes": { - "accelerator_count": { - "type": "number", - "description": "The number of accelerators to attach to the machine.", + "key": { + "type": "string", + "description": "A unique key for this atom.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true + } + }, + "block_types": { + "audio_stream": { + "nesting_mode": "list", + "block": { + "attributes": { + "bitrate_bps": { + "type": "number", + "description": "Audio bitrate in bits per second.", + "description_kind": "plain", + "required": true + }, + "channel_count": { + "type": "number", + "description": "Number of audio channels. The default is '2'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "channel_layout": { + "type": [ + "list", + "string" + ], + "description": "A list of channel names specifying layout of the audio channels. The default is [\"fl\", \"fr\"].", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "codec": { + "type": "string", + "description": "The codec for this audio stream. The default is 'aac'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "sample_rate_hertz": { + "type": "number", + "description": "The audio sample rate in Hertz. The default is '48000'.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Encoding of an audio stream.", + "description_kind": "plain" + }, + "max_items": 1 }, - "accelerator_type": { + "video_stream": { + "nesting_mode": "list", + "block": { + "block_types": { + "h264": { + "nesting_mode": "list", + "block": { + "attributes": { + "bitrate_bps": { + "type": "number", + "description": "The video bitrate in bits per second.", + "description_kind": "plain", + "required": true + }, + "crf_level": { + "type": "number", + "description": "Target CRF level. The default is '21'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "entropy_coder": { + "type": "string", + "description": "The entropy coder to use. The default is 'cabac'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "frame_rate": { + "type": "number", + "description": "The target video frame rate in frames per second (FPS).", + "description_kind": "plain", + "required": true + }, + "gop_duration": { + "type": "string", + "description": "Select the GOP size based on the specified duration. The default is '3s'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "height_pixels": { + "type": "number", + "description": "The height of the video in pixels.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "pixel_format": { + "type": "string", + "description": "Pixel format to use. The default is 'yuv420p'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "preset": { + "type": "string", + "description": "Enforces the specified codec preset. The default is 'veryfast'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "profile": { + "type": "string", + "description": "Enforces the specified codec profile.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "rate_control_mode": { + "type": "string", + "description": "Specify the mode. The default is 'vbr'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "vbv_fullness_bits": { + "type": "number", + "description": "Initial fullness of the Video Buffering Verifier (VBV) buffer in bits.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "vbv_size_bits": { + "type": "number", + "description": "Size of the Video Buffering Verifier (VBV) buffer in bits.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "width_pixels": { + "type": "number", + "description": "The width of the video in pixels.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "hlg": { + "nesting_mode": "list", + "block": { + "description": "HLG color format setting for H264.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "sdr": { + "nesting_mode": "list", + "block": { + "description": "SDR color format setting for H264.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "H264 codec settings", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Encoding of a video stream.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "List of input assets stored in Cloud Storage.", + "description_kind": "plain" + } + }, + "encryptions": { + "nesting_mode": "list", + "block": { + "attributes": { + "id": { "type": "string", - "description": "The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).", + "description": "Identifier for this set of encryption options.", "description_kind": "plain", - "optional": true + "required": true + } + }, + "block_types": { + "aes128": { + "nesting_mode": "list", + "block": { + "description": "Configuration for AES-128 encryption.", + "description_kind": "plain" + }, + "max_items": 1 }, - "machine_type": { + "drm_systems": { + "nesting_mode": "list", + "block": { + "block_types": { + "clearkey": { + "nesting_mode": "list", + "block": { + "description": "Clearkey configuration.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "fairplay": { + "nesting_mode": "list", + "block": { + "description": "Fairplay configuration.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "playready": { + "nesting_mode": "list", + "block": { + "description": "Playready configuration.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "widevine": { + "nesting_mode": "list", + "block": { + "description": "Widevine configuration.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "DRM system(s) to use; at least one must be specified. If a DRM system is omitted, it is considered disabled.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "mpeg_cenc": { + "nesting_mode": "list", + "block": { + "attributes": { + "scheme": { + "type": "string", + "description": "Specify the encryption scheme.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for MPEG Common Encryption (MPEG-CENC).", + "description_kind": "plain" + }, + "max_items": 1 + }, + "sample_aes": { + "nesting_mode": "list", + "block": { + "description": "Configuration for SAMPLE-AES encryption.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "secret_manager_key_source": { + "nesting_mode": "list", + "block": { + "attributes": { + "secret_version": { + "type": "string", + "description": "The name of the Secret Version containing the encryption key in the following format: projects/{project}/secrets/{secret_id}/versions/{version_number}.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for secrets stored in Google Secret Manager.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "List of encryption configurations for the content.", + "description_kind": "plain" + } + }, + "inputs": { + "nesting_mode": "list", + "block": { + "attributes": { + "key": { "type": "string", - "description": "The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).", + "description": "A unique key for this input. Must be specified when using advanced mapping and edit lists.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true + }, + "uri": { + "type": "string", + "description": "URI of the media. Input files must be at least 5 seconds in duration and stored in Cloud Storage (for example, gs://bucket/inputs/file.mp4).\nIf empty, the value is populated from Job.input_uri.", + "description_kind": "plain", + "optional": true, + "computed": true } }, - "description": "The specification of a single machine used by the prediction", + "description": "List of input assets stored in Cloud Storage.", "description_kind": "plain" - }, - "min_items": 1, - "max_items": 1 - } - }, - "description": "The underlying dedicated resources that the deployment resource pool uses.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "timeouts": { - "nesting_mode": "single", - "block": { - "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true + } }, - "delete": { - "type": "string", - "description_kind": "plain", - "optional": true - } - }, - "description_kind": "plain" - } - } - }, - "description_kind": "plain" - } - }, - "google_vertex_ai_endpoint": { - "version": 0, - "block": { - "attributes": { - "create_time": { - "type": "string", - "description": "Output only. Timestamp when this Endpoint was created.", - "description_kind": "plain", - "computed": true - }, - "deployed_models": { - "type": [ - "list", - [ - "object", - { - "automatic_resources": [ - "list", - [ - "object", - { - "max_replica_count": "number", - "min_replica_count": "number" - } - ] - ], - "create_time": "string", - "dedicated_resources": [ - "list", - [ - "object", - { - "autoscaling_metric_specs": [ + "manifests": { + "nesting_mode": "list", + "block": { + "attributes": { + "file_name": { + "type": "string", + "description": "The name of the generated file. The default is 'manifest'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "mux_streams": { + "type": [ "list", - [ - "object", - { - "metric_name": "string", - "target": "number" - } - ] + "string" ], - "machine_spec": [ + "description": "List of user supplied MuxStream.key values that should appear in this manifest.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "type": { + "type": "string", + "description": "Type of the manifest. Possible values: [\"MANIFEST_TYPE_UNSPECIFIED\", \"HLS\", \"DASH\"]", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Manifest configuration.", + "description_kind": "plain" + } + }, + "mux_streams": { + "nesting_mode": "list", + "block": { + "attributes": { + "container": { + "type": "string", + "description": "The container format. The default is 'mp4'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "elementary_streams": { + "type": [ "list", - [ - "object", - { - "accelerator_count": "number", - "accelerator_type": "string", - "machine_type": "string" - } - ] + "string" ], - "max_replica_count": "number", - "min_replica_count": "number" + "description": "List of ElementaryStream.key values multiplexed in this stream.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "encryption_id": { + "type": "string", + "description": "Identifier of the encryption configuration to use.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "file_name": { + "type": "string", + "description": "The name of the generated file.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "key": { + "type": "string", + "description": "A unique key for this multiplexed stream.", + "description_kind": "plain", + "optional": true, + "computed": true } - ] - ], - "display_name": "string", - "enable_access_logging": "bool", - "enable_container_logging": "bool", - "id": "string", - "model": "string", - "model_version_id": "string", - "private_endpoints": [ - "list", - [ - "object", - { - "explain_http_uri": "string", - "health_http_uri": "string", - "predict_http_uri": "string", - "service_attachment": "string" + }, + "block_types": { + "segment_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "segment_duration": { + "type": "string", + "description": "Duration of the segments in seconds. The default is '6.0s'.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Segment settings for ts, fmp4 and vtt.", + "description_kind": "plain" + }, + "max_items": 1 } - ] - ], - "service_account": "string", - "shared_resources": "string" - } - ] - ], - "description": "Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. Models can also be deployed and undeployed using the [Cloud Console](https://console.cloud.google.com/vertex-ai/).", - "description_kind": "plain", - "computed": true - }, - "description": { - "type": "string", - "description": "The description of the Endpoint.", - "description_kind": "plain", - "optional": true - }, - "display_name": { - "type": "string", - "description": "Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters.", - "description_kind": "plain", - "required": true - }, - "effective_labels": { - "type": [ - "map", - "string" - ], - "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", - "description_kind": "plain", - "computed": true - }, - "etag": { - "type": "string", - "description": "Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "labels": { - "type": [ - "map", - "string" - ], - "description": "The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", - "description_kind": "plain", - "optional": true - }, - "location": { - "type": "string", - "description": "The location for the resource", - "description_kind": "plain", - "required": true - }, - "model_deployment_monitoring_job": { - "type": "string", - "description": "Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by CreateModelDeploymentMonitoringJob. Format: 'projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}'", - "description_kind": "plain", - "computed": true - }, - "name": { - "type": "string", - "description": "The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits.", - "description_kind": "plain", - "required": true - }, - "network": { - "type": "string", - "description": "The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): 'projects/{project}/global/networks/{network}'. Where '{project}' is a project number, as in '12345', and '{network}' is network name.", - "description_kind": "plain", - "optional": true - }, - "project": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "region": { - "type": "string", - "description": "The region for the resource", - "description_kind": "plain", - "optional": true - }, - "terraform_labels": { - "type": [ - "map", - "string" - ], - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", - "description_kind": "plain", - "computed": true - }, - "update_time": { - "type": "string", - "description": "Output only. Timestamp when this Endpoint was last updated.", - "description_kind": "plain", - "computed": true - } - }, - "block_types": { - "encryption_spec": { - "nesting_mode": "list", - "block": { - "attributes": { - "kms_key_name": { - "type": "string", - "description": "Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: 'projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key'. The key needs to be in the same region as where the compute resource is created.", - "description_kind": "plain", - "required": true - } - }, - "description": "Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "timeouts": { - "nesting_mode": "single", - "block": { - "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true + }, + "description": "Multiplexing settings for output stream.", + "description_kind": "plain" + } }, - "delete": { - "type": "string", - "description_kind": "plain", - "optional": true + "output": { + "nesting_mode": "list", + "block": { + "attributes": { + "uri": { + "type": "string", + "description": "URI for the output file(s). For example, gs://my-bucket/outputs/.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Location of output file(s) in a Cloud Storage bucket.", + "description_kind": "plain" + }, + "max_items": 1 }, - "update": { - "type": "string", - "description_kind": "plain", - "optional": true - } - }, - "description_kind": "plain" - } - } - }, - "description_kind": "plain" - } - }, - "google_vertex_ai_feature_group": { - "version": 0, - "block": { - "attributes": { - "create_time": { - "type": "string", - "description": "The timestamp of when the FeatureGroup was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", - "description_kind": "plain", - "computed": true - }, - "description": { - "type": "string", - "description": "The description of the FeatureGroup.", - "description_kind": "plain", - "optional": true - }, - "effective_labels": { - "type": [ - "map", - "string" - ], - "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", - "description_kind": "plain", - "computed": true - }, - "etag": { - "type": "string", - "description": "Used to perform consistent read-modify-write updates.", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "labels": { - "type": [ - "map", - "string" - ], - "description": "The labels with user-defined metadata to organize your FeatureGroup.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", - "description_kind": "plain", - "optional": true - }, - "name": { - "type": "string", - "description": "The resource name of the Feature Group.", - "description_kind": "plain", - "optional": true - }, - "project": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "region": { - "type": "string", - "description": "The region of feature group. eg us-central1", - "description_kind": "plain", - "optional": true - }, - "terraform_labels": { - "type": [ - "map", - "string" - ], - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", - "description_kind": "plain", - "computed": true - }, - "update_time": { - "type": "string", - "description": "The timestamp of when the FeatureGroup was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", - "description_kind": "plain", - "computed": true - } - }, - "block_types": { - "big_query": { - "nesting_mode": "list", - "block": { - "attributes": { - "entity_id_columns": { - "type": [ - "list", - "string" - ], - "description": "Columns to construct entityId / row keys. If not provided defaults to entityId.", - "description_kind": "plain", - "optional": true - } - }, - "block_types": { - "big_query_source": { + "overlays": { + "nesting_mode": "list", + "block": { + "block_types": { + "animations": { + "nesting_mode": "list", + "block": { + "block_types": { + "animation_fade": { + "nesting_mode": "list", + "block": { + "attributes": { + "end_time_offset": { + "type": "string", + "description": "The time to end the fade animation, in seconds.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "fade_type": { + "type": "string", + "description": "Required. Type of fade animation: 'FADE_IN' or 'FADE_OUT'.\nThe possible values are:\n\n* 'FADE_TYPE_UNSPECIFIED': The fade type is not specified.\n\n* 'FADE_IN': Fade the overlay object into view.\n\n* 'FADE_OUT': Fade the overlay object out of view. Possible values: [\"FADE_TYPE_UNSPECIFIED\", \"FADE_IN\", \"FADE_OUT\"]", + "description_kind": "plain", + "required": true + }, + "start_time_offset": { + "type": "string", + "description": "The time to start the fade animation, in seconds.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "xy": { + "nesting_mode": "list", + "block": { + "attributes": { + "x": { + "type": "number", + "description": "Normalized x coordinate.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "y": { + "type": "number", + "description": "Normalized y coordinate.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Normalized coordinates based on output video resolution.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Display overlay object with fade animation.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "List of animations. The list should be chronological, without any time overlap.", + "description_kind": "plain" + } + }, + "image": { + "nesting_mode": "list", + "block": { + "attributes": { + "uri": { + "type": "string", + "description": "URI of the image in Cloud Storage. For example, gs://bucket/inputs/image.png.", + "description_kind": "plain", + "required": true + } + }, + "description": "Image overlay.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "List of overlays on the output video, in descending Z-order.", + "description_kind": "plain" + } + }, + "pubsub_destination": { "nesting_mode": "list", "block": { "attributes": { - "input_uri": { + "topic": { "type": "string", - "description": "BigQuery URI to a table, up to 2000 characters long. For example: 'bq://projectId.bqDatasetId.bqTableId.'", + "description": "The name of the Pub/Sub topic to publish job completion notification to. For example: projects/{project}/topics/{topic}.", "description_kind": "plain", - "required": true + "optional": true } }, - "description": "The BigQuery source URI that points to either a BigQuery Table or View.", + "description": "Pub/Sub destination.", "description_kind": "plain" }, - "min_items": 1, "max_items": 1 } }, - "description": "Indicates that features for this group come from BigQuery Table/View. By default treats the source as a sparse time series source, which is required to have an entityId and a feature_timestamp column in the source.", + "description": "The configuration for this template.", "description_kind": "plain" }, "max_items": 1 @@ -149944,22 +163806,10 @@ "description_kind": "plain" } }, - "google_vertex_ai_feature_group_feature": { + "google_transcoder_job_template": { "version": 0, "block": { "attributes": { - "create_time": { - "type": "string", - "description": "The timestamp of when the FeatureGroup was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", - "description_kind": "plain", - "computed": true - }, - "description": { - "type": "string", - "description": "The description of the FeatureGroup.", - "description_kind": "plain", - "optional": true - }, "effective_labels": { "type": [ "map", @@ -149969,44 +163819,44 @@ "description_kind": "plain", "computed": true }, - "feature_group": { - "type": "string", - "description": "The name of the Feature Group.", - "description_kind": "plain", - "required": true - }, "id": { "type": "string", "description_kind": "plain", "optional": true, "computed": true }, + "job_template_id": { + "type": "string", + "description": "ID to use for the Transcoding job template.", + "description_kind": "plain", + "required": true + }, "labels": { "type": [ "map", "string" ], - "description": "The labels with user-defined metadata to organize your FeatureGroup.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description": "The labels associated with this job template. You can use these to organize and group your job templates.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "optional": true }, - "name": { + "location": { "type": "string", - "description": "The resource name of the Feature Group Feature.", + "description": "The location of the transcoding job template resource.", "description_kind": "plain", "required": true }, - "project": { + "name": { "type": "string", + "description": "The resource name of the job template.", "description_kind": "plain", - "optional": true, "computed": true }, - "region": { + "project": { "type": "string", - "description": "The region for the resource. It should be the same as the feature group's region.", "description_kind": "plain", - "required": true + "optional": true, + "computed": true }, "terraform_labels": { "type": [ @@ -150016,35 +163866,625 @@ "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", "description_kind": "plain", "computed": true - }, - "update_time": { - "type": "string", - "description": "The timestamp of when the FeatureGroup was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", - "description_kind": "plain", - "computed": true - }, - "version_column_name": { - "type": "string", - "description": "The name of the BigQuery Table/View column hosting data for this version. If no value is provided, will use featureId.", - "description_kind": "plain", - "optional": true, - "computed": true } }, "block_types": { - "timeouts": { - "nesting_mode": "single", + "config": { + "nesting_mode": "list", "block": { - "attributes": { - "create": { - "type": "string", - "description_kind": "plain", - "optional": true + "block_types": { + "ad_breaks": { + "nesting_mode": "list", + "block": { + "attributes": { + "start_time_offset": { + "type": "string", + "description": "Start time in seconds for the ad break, relative to the output file timeline", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Ad break.", + "description_kind": "plain" + } }, - "delete": { - "type": "string", - "description_kind": "plain", - "optional": true + "edit_list": { + "nesting_mode": "list", + "block": { + "attributes": { + "inputs": { + "type": [ + "list", + "string" + ], + "description": "List of values identifying files that should be used in this atom.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "key": { + "type": "string", + "description": "A unique key for this atom.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "start_time_offset": { + "type": "string", + "description": "Start time in seconds for the atom, relative to the input file timeline. The default is '0s'.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "List of input assets stored in Cloud Storage.", + "description_kind": "plain" + } + }, + "elementary_streams": { + "nesting_mode": "list", + "block": { + "attributes": { + "key": { + "type": "string", + "description": "A unique key for this atom.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "audio_stream": { + "nesting_mode": "list", + "block": { + "attributes": { + "bitrate_bps": { + "type": "number", + "description": "Audio bitrate in bits per second.", + "description_kind": "plain", + "required": true + }, + "channel_count": { + "type": "number", + "description": "Number of audio channels. The default is '2'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "channel_layout": { + "type": [ + "list", + "string" + ], + "description": "A list of channel names specifying layout of the audio channels. The default is [\"fl\", \"fr\"].", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "codec": { + "type": "string", + "description": "The codec for this audio stream. The default is 'aac'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "sample_rate_hertz": { + "type": "number", + "description": "The audio sample rate in Hertz. The default is '48000'.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Encoding of an audio stream.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "video_stream": { + "nesting_mode": "list", + "block": { + "block_types": { + "h264": { + "nesting_mode": "list", + "block": { + "attributes": { + "bitrate_bps": { + "type": "number", + "description": "The video bitrate in bits per second.", + "description_kind": "plain", + "required": true + }, + "crf_level": { + "type": "number", + "description": "Target CRF level. The default is '21'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "entropy_coder": { + "type": "string", + "description": "The entropy coder to use. The default is 'cabac'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "frame_rate": { + "type": "number", + "description": "The target video frame rate in frames per second (FPS).", + "description_kind": "plain", + "required": true + }, + "gop_duration": { + "type": "string", + "description": "Select the GOP size based on the specified duration. The default is '3s'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "height_pixels": { + "type": "number", + "description": "The height of the video in pixels.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "pixel_format": { + "type": "string", + "description": "Pixel format to use. The default is 'yuv420p'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "preset": { + "type": "string", + "description": "Enforces the specified codec preset. The default is 'veryfast'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "profile": { + "type": "string", + "description": "Enforces the specified codec profile.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "rate_control_mode": { + "type": "string", + "description": "Specify the mode. The default is 'vbr'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "vbv_fullness_bits": { + "type": "number", + "description": "Initial fullness of the Video Buffering Verifier (VBV) buffer in bits.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "vbv_size_bits": { + "type": "number", + "description": "Size of the Video Buffering Verifier (VBV) buffer in bits.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "width_pixels": { + "type": "number", + "description": "The width of the video in pixels.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "hlg": { + "nesting_mode": "list", + "block": { + "description": "HLG color format setting for H264.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "sdr": { + "nesting_mode": "list", + "block": { + "description": "SDR color format setting for H264.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "H264 codec settings", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Encoding of a video stream.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "List of input assets stored in Cloud Storage.", + "description_kind": "plain" + } + }, + "encryptions": { + "nesting_mode": "list", + "block": { + "attributes": { + "id": { + "type": "string", + "description": "Identifier for this set of encryption options.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "aes128": { + "nesting_mode": "list", + "block": { + "description": "Configuration for AES-128 encryption.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "drm_systems": { + "nesting_mode": "list", + "block": { + "block_types": { + "clearkey": { + "nesting_mode": "list", + "block": { + "description": "Clearkey configuration.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "fairplay": { + "nesting_mode": "list", + "block": { + "description": "Fairplay configuration.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "playready": { + "nesting_mode": "list", + "block": { + "description": "Playready configuration.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "widevine": { + "nesting_mode": "list", + "block": { + "description": "Widevine configuration.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "DRM system(s) to use; at least one must be specified. If a DRM system is omitted, it is considered disabled.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "mpeg_cenc": { + "nesting_mode": "list", + "block": { + "attributes": { + "scheme": { + "type": "string", + "description": "Specify the encryption scheme.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for MPEG Common Encryption (MPEG-CENC).", + "description_kind": "plain" + }, + "max_items": 1 + }, + "sample_aes": { + "nesting_mode": "list", + "block": { + "description": "Configuration for SAMPLE-AES encryption.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "secret_manager_key_source": { + "nesting_mode": "list", + "block": { + "attributes": { + "secret_version": { + "type": "string", + "description": "The name of the Secret Version containing the encryption key in the following format: projects/{project}/secrets/{secret_id}/versions/{version_number}.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configuration for secrets stored in Google Secret Manager.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "List of encryption configurations for the content.", + "description_kind": "plain" + } + }, + "inputs": { + "nesting_mode": "list", + "block": { + "attributes": { + "key": { + "type": "string", + "description": "A unique key for this input. Must be specified when using advanced mapping and edit lists.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "uri": { + "type": "string", + "description": "URI of the media. Input files must be at least 5 seconds in duration and stored in Cloud Storage (for example, gs://bucket/inputs/file.mp4).\nIf empty, the value is populated from Job.input_uri.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "List of input assets stored in Cloud Storage.", + "description_kind": "plain" + } + }, + "manifests": { + "nesting_mode": "list", + "block": { + "attributes": { + "file_name": { + "type": "string", + "description": "The name of the generated file. The default is 'manifest'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "mux_streams": { + "type": [ + "list", + "string" + ], + "description": "List of user supplied MuxStream.key values that should appear in this manifest.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "type": { + "type": "string", + "description": "Type of the manifest. Possible values: [\"MANIFEST_TYPE_UNSPECIFIED\", \"HLS\", \"DASH\"]", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Manifest configuration.", + "description_kind": "plain" + } + }, + "mux_streams": { + "nesting_mode": "list", + "block": { + "attributes": { + "container": { + "type": "string", + "description": "The container format. The default is 'mp4'.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "elementary_streams": { + "type": [ + "list", + "string" + ], + "description": "List of ElementaryStream.key values multiplexed in this stream.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "encryption_id": { + "type": "string", + "description": "Identifier of the encryption configuration to use.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "file_name": { + "type": "string", + "description": "The name of the generated file.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "key": { + "type": "string", + "description": "A unique key for this multiplexed stream.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "segment_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "segment_duration": { + "type": "string", + "description": "Duration of the segments in seconds. The default is '6.0s'.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Segment settings for ts, fmp4 and vtt.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Multiplexing settings for output stream.", + "description_kind": "plain" + } + }, + "output": { + "nesting_mode": "list", + "block": { + "attributes": { + "uri": { + "type": "string", + "description": "URI for the output file(s). For example, gs://my-bucket/outputs/.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Location of output file(s) in a Cloud Storage bucket.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "overlays": { + "nesting_mode": "list", + "block": { + "block_types": { + "animations": { + "nesting_mode": "list", + "block": { + "block_types": { + "animation_fade": { + "nesting_mode": "list", + "block": { + "attributes": { + "end_time_offset": { + "type": "string", + "description": "The time to end the fade animation, in seconds.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "fade_type": { + "type": "string", + "description": "Required. Type of fade animation: 'FADE_IN' or 'FADE_OUT'.\nThe possible values are:\n\n* 'FADE_TYPE_UNSPECIFIED': The fade type is not specified.\n\n* 'FADE_IN': Fade the overlay object into view.\n\n* 'FADE_OUT': Fade the overlay object out of view. Possible values: [\"FADE_TYPE_UNSPECIFIED\", \"FADE_IN\", \"FADE_OUT\"]", + "description_kind": "plain", + "required": true + }, + "start_time_offset": { + "type": "string", + "description": "The time to start the fade animation, in seconds.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "block_types": { + "xy": { + "nesting_mode": "list", + "block": { + "attributes": { + "x": { + "type": "number", + "description": "Normalized x coordinate.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "y": { + "type": "number", + "description": "Normalized y coordinate.", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Normalized coordinates based on output video resolution.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Display overlay object with fade animation.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "List of animations. The list should be chronological, without any time overlap.", + "description_kind": "plain" + } + }, + "image": { + "nesting_mode": "list", + "block": { + "attributes": { + "uri": { + "type": "string", + "description": "URI of the image in Cloud Storage. For example, gs://bucket/inputs/image.png.", + "description_kind": "plain", + "required": true + } + }, + "description": "Image overlay.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "List of overlays on the output video, in descending Z-order.", + "description_kind": "plain" + } + }, + "pubsub_destination": { + "nesting_mode": "list", + "block": { + "attributes": { + "topic": { + "type": "string", + "description": "The name of the Pub/Sub topic to publish job completion notification to. For example: projects/{project}/topics/{topic}.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Pub/Sub destination.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "The configuration for this template.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true }, "update": { "type": "string", @@ -150059,16 +164499,22 @@ "description_kind": "plain" } }, - "google_vertex_ai_feature_online_store": { + "google_vertex_ai_dataset": { "version": 0, "block": { "attributes": { "create_time": { "type": "string", - "description": "The timestamp of when the feature online store was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description": "The timestamp of when the dataset was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", "description_kind": "plain", "computed": true }, + "display_name": { + "type": "string", + "description": "The user-defined name of the Dataset. The name can be up to 128 characters long and can be consist of any UTF-8 characters.", + "description_kind": "plain", + "required": true + }, "effective_labels": { "type": [ "map", @@ -150078,18 +164524,6 @@ "description_kind": "plain", "computed": true }, - "etag": { - "type": "string", - "description": "Used to perform consistent read-modify-write updates.", - "description_kind": "plain", - "computed": true - }, - "force_destroy": { - "type": "bool", - "description": "If set to true, any FeatureViews and Features for this FeatureOnlineStore will also be deleted.", - "description_kind": "plain", - "optional": true - }, "id": { "type": "string", "description_kind": "plain", @@ -150101,33 +164535,33 @@ "map", "string" ], - "description": "The labels with user-defined metadata to organize your feature online stores.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description": "A set of key/value label pairs to assign to this Workflow.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "optional": true }, - "name": { + "metadata_schema_uri": { "type": "string", - "description": "The resource name of the Feature Online Store. This value may be up to 60 characters, and valid characters are [a-z0-9_]. The first character cannot be a number.", + "description": "Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/.", "description_kind": "plain", "required": true }, - "project": { + "name": { "type": "string", + "description": "The resource name of the Dataset. This value is set by Google.", "description_kind": "plain", - "optional": true, "computed": true }, - "region": { + "project": { "type": "string", - "description": "The region of feature online store. eg us-central1", "description_kind": "plain", "optional": true, "computed": true }, - "state": { + "region": { "type": "string", - "description": "The state of the Feature Online Store. See the possible states in [this link](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.featureOnlineStores#state).", + "description": "The region of the dataset. eg us-central1", "description_kind": "plain", + "optional": true, "computed": true }, "terraform_labels": { @@ -150141,105 +164575,161 @@ }, "update_time": { "type": "string", - "description": "The timestamp of when the feature online store was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description": "The timestamp of when the dataset was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", "description_kind": "plain", "computed": true } }, "block_types": { - "bigtable": { + "encryption_spec": { "nesting_mode": "list", "block": { - "block_types": { - "auto_scaling": { - "nesting_mode": "list", - "block": { - "attributes": { - "cpu_utilization_target": { - "type": "number", - "description": "A percentage of the cluster's CPU capacity. Can be from 10% to 80%. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%.", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "max_node_count": { - "type": "number", - "description": "The maximum number of nodes to scale up to. Must be greater than or equal to minNodeCount, and less than or equal to 10 times of 'minNodeCount'.", - "description_kind": "plain", - "required": true - }, - "min_node_count": { - "type": "number", - "description": "The minimum number of nodes to scale down to. Must be greater than or equal to 1.", - "description_kind": "plain", - "required": true - } - }, - "description": "Autoscaling config applied to Bigtable Instance.", - "description_kind": "plain" - }, - "min_items": 1, - "max_items": 1 + "attributes": { + "kms_key_name": { + "type": "string", + "description": "Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource.\nHas the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.", + "description_kind": "plain", + "optional": true } }, - "description": "Settings for Cloud Bigtable instance that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore.", + "description": "Customer-managed encryption key spec for a Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key.", "description_kind": "plain" }, "max_items": 1 }, - "dedicated_serving_endpoint": { - "nesting_mode": "list", + "timeouts": { + "nesting_mode": "single", "block": { "attributes": { - "public_endpoint_domain_name": { + "create": { "type": "string", - "description": "Domain name to use for this FeatureOnlineStore", "description_kind": "plain", - "computed": true + "optional": true }, - "service_attachment": { + "delete": { "type": "string", - "description": "Name of the service attachment resource. Applicable only if private service connect is enabled and after FeatureViewSync is created.", "description_kind": "plain", - "computed": true + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_vertex_ai_deployment_resource_pool": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "A timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are '/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/'.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "region": { + "type": "string", + "description": "The region of deployment resource pool. eg us-central1", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "dedicated_resources": { + "nesting_mode": "list", + "block": { + "attributes": { + "max_replica_count": { + "type": "number", + "description": "The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).", + "description_kind": "plain", + "optional": true + }, + "min_replica_count": { + "type": "number", + "description": "The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.", + "description_kind": "plain", + "required": true } }, "block_types": { - "private_service_connect_config": { + "autoscaling_metric_specs": { "nesting_mode": "list", "block": { "attributes": { - "enable_private_service_connect": { - "type": "bool", - "description": "If set to true, customers will use private service connection to send request. Otherwise, the connection will set to public endpoint.", + "metric_name": { + "type": "string", + "description": "The resource metric name. Supported metrics: For Online Prediction: * 'aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle' * 'aiplatform.googleapis.com/prediction/online/cpu/utilization'", "description_kind": "plain", "required": true }, - "project_allowlist": { - "type": [ - "list", - "string" - ], - "description": "A list of Projects from which the forwarding rule will target the service attachment.", + "target": { + "type": "number", + "description": "The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.", "description_kind": "plain", "optional": true } }, - "description": "Private service connect config.", + "description": "A list of the metric specifications that overrides a resource utilization metric.", + "description_kind": "plain" + } + }, + "machine_spec": { + "nesting_mode": "list", + "block": { + "attributes": { + "accelerator_count": { + "type": "number", + "description": "The number of accelerators to attach to the machine.", + "description_kind": "plain", + "optional": true + }, + "accelerator_type": { + "type": "string", + "description": "The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).", + "description_kind": "plain", + "optional": true + }, + "machine_type": { + "type": "string", + "description": "The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).", + "description_kind": "plain", + "optional": true + } + }, + "description": "The specification of a single machine used by the prediction", "description_kind": "plain" }, + "min_items": 1, "max_items": 1 } }, - "description": "The dedicated serving endpoint for this FeatureOnlineStore, which is different from common vertex service endpoint. Only need to be set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "optimized": { - "nesting_mode": "list", - "block": { - "description": "Settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore", + "description": "The underlying dedicated resources that the deployment resource pool uses.", "description_kind": "plain" }, "max_items": 1 @@ -150257,11 +164747,6 @@ "type": "string", "description_kind": "plain", "optional": true - }, - "update": { - "type": "string", - "description_kind": "plain", - "optional": true } }, "description_kind": "plain" @@ -150271,16 +164756,115 @@ "description_kind": "plain" } }, - "google_vertex_ai_feature_online_store_featureview": { + "google_vertex_ai_endpoint": { "version": 0, "block": { "attributes": { "create_time": { "type": "string", - "description": "The timestamp of when the featureOnlinestore was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description": "Output only. Timestamp when this Endpoint was created.", + "description_kind": "plain", + "computed": true + }, + "dedicated_endpoint_dns": { + "type": "string", + "description": "Output only. DNS of the dedicated endpoint. Will only be populated if dedicatedEndpointEnabled is true. Format: 'https://{endpointId}.{region}-{projectNumber}.prediction.vertexai.goog'.", + "description_kind": "plain", + "computed": true + }, + "dedicated_endpoint_enabled": { + "type": "bool", + "description": "If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.", + "description_kind": "plain", + "optional": true + }, + "deployed_models": { + "type": [ + "list", + [ + "object", + { + "automatic_resources": [ + "list", + [ + "object", + { + "max_replica_count": "number", + "min_replica_count": "number" + } + ] + ], + "create_time": "string", + "dedicated_resources": [ + "list", + [ + "object", + { + "autoscaling_metric_specs": [ + "list", + [ + "object", + { + "metric_name": "string", + "target": "number" + } + ] + ], + "machine_spec": [ + "list", + [ + "object", + { + "accelerator_count": "number", + "accelerator_type": "string", + "machine_type": "string" + } + ] + ], + "max_replica_count": "number", + "min_replica_count": "number" + } + ] + ], + "display_name": "string", + "enable_access_logging": "bool", + "enable_container_logging": "bool", + "id": "string", + "model": "string", + "model_version_id": "string", + "private_endpoints": [ + "list", + [ + "object", + { + "explain_http_uri": "string", + "health_http_uri": "string", + "predict_http_uri": "string", + "service_attachment": "string" + } + ] + ], + "service_account": "string", + "shared_resources": "string" + } + ] + ], + "description": "Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. Models can also be deployed and undeployed using the [Cloud Console](https://console.cloud.google.com/vertex-ai/).", "description_kind": "plain", "computed": true }, + "description": { + "type": "string", + "description": "The description of the Endpoint.", + "description_kind": "plain", + "optional": true + }, + "display_name": { + "type": "string", + "description": "Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters.", + "description_kind": "plain", + "required": true + }, "effective_labels": { "type": [ "map", @@ -150290,11 +164874,11 @@ "description_kind": "plain", "computed": true }, - "feature_online_store": { + "etag": { "type": "string", - "description": "The name of the FeatureOnlineStore to use for the featureview.", + "description": "Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", "description_kind": "plain", - "required": true + "computed": true }, "id": { "type": "string", @@ -150307,13 +164891,31 @@ "map", "string" ], - "description": "A set of key/value label pairs to assign to this FeatureView.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description": "The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "optional": true }, + "location": { + "type": "string", + "description": "The location for the resource", + "description_kind": "plain", + "required": true + }, + "model_deployment_monitoring_job": { + "type": "string", + "description": "Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by CreateModelDeploymentMonitoringJob. Format: 'projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}'", + "description_kind": "plain", + "computed": true + }, "name": { "type": "string", - "description": "Name of the FeatureView. This value may be up to 60 characters, and valid characters are [a-z0-9_]. The first character cannot be a number.", + "description": "The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits.", + "description_kind": "plain", + "required": true + }, + "network": { + "type": "string", + "description": "The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): 'projects/{project}/global/networks/{network}'. Where '{project}' is a project number, as in '12345', and '{network}' is network name. Only one of the fields, 'network' or 'privateServiceConnectConfig', can be set.", "description_kind": "plain", "optional": true }, @@ -150325,9 +164927,9 @@ }, "region": { "type": "string", - "description": "The region for the resource. It should be the same as the featureonlinestore region.", + "description": "The region for the resource", "description_kind": "plain", - "required": true + "optional": true }, "terraform_labels": { "type": [ @@ -150338,95 +164940,104 @@ "description_kind": "plain", "computed": true }, + "traffic_split": { + "type": "string", + "description": "A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.\nIf a DeployedModel's id is not listed in this map, then it receives no traffic.\nThe traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. See\nthe 'deployModel' [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and\n[documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for more information.\n\n~> **Note:** To set the map to empty, set '\"{}\"', apply, and then remove the field from your config.", + "description_kind": "plain", + "optional": true, + "computed": true + }, "update_time": { "type": "string", - "description": "The timestamp of when the featureOnlinestore was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description": "Output only. Timestamp when this Endpoint was last updated.", "description_kind": "plain", "computed": true } }, "block_types": { - "big_query_source": { + "encryption_spec": { "nesting_mode": "list", "block": { "attributes": { - "entity_id_columns": { - "type": [ - "list", - "string" - ], - "description": "Columns to construct entityId / row keys. Start by supporting 1 only.", - "description_kind": "plain", - "required": true - }, - "uri": { + "kms_key_name": { "type": "string", - "description": "The BigQuery view URI that will be materialized on each sync trigger based on FeatureView.SyncConfig.", + "description": "Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: 'projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key'. The key needs to be in the same region as where the compute resource is created.", "description_kind": "plain", "required": true } }, - "description": "Configures how data is supposed to be extracted from a BigQuery source to be loaded onto the FeatureOnlineStore.", + "description": "Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key.", "description_kind": "plain" }, "max_items": 1 }, - "feature_registry_source": { + "predict_request_response_logging_config": { "nesting_mode": "list", "block": { "attributes": { - "project_number": { - "type": "string", - "description": "The project number of the parent project of the feature Groups.", + "enabled": { + "type": "bool", + "description": "If logging is enabled or not.", + "description_kind": "plain", + "optional": true + }, + "sampling_rate": { + "type": "number", + "description": "Percentage of requests to be logged, expressed as a fraction in range(0,1]", "description_kind": "plain", "optional": true } }, "block_types": { - "feature_groups": { + "bigquery_destination": { "nesting_mode": "list", "block": { "attributes": { - "feature_group_id": { + "output_uri": { "type": "string", - "description": "Identifier of the feature group.", - "description_kind": "plain", - "required": true - }, - "feature_ids": { - "type": [ - "list", - "string" - ], - "description": "Identifiers of features under the feature group.", + "description": "BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: - BigQuery path. For example: 'bq://projectId' or 'bq://projectId.bqDatasetId' or 'bq://projectId.bqDatasetId.bqTableId'.", "description_kind": "plain", - "required": true + "optional": true } }, - "description": "List of features that need to be synced to Online Store.", + "description": "BigQuery table for logging. If only given a project, a new dataset will be created with name 'logging__' where will be made BigQuery-dataset-name compatible (e.g. most special characters will become underscores). If no table name is given, a new table will be created with name 'request_response_logging'", "description_kind": "plain" }, - "min_items": 1 + "max_items": 1 } }, - "description": "Configures the features from a Feature Registry source that need to be loaded onto the FeatureOnlineStore.", + "description": "Configures the request-response logging for online prediction.", "description_kind": "plain" }, "max_items": 1 }, - "sync_config": { + "private_service_connect_config": { "nesting_mode": "list", "block": { "attributes": { - "cron": { - "type": "string", - "description": "Cron schedule (https://en.wikipedia.org/wiki/Cron) to launch scheduled runs.\nTo explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or \"TZ=${IANA_TIME_ZONE}\".", + "enable_private_service_connect": { + "type": "bool", + "description": "Required. If true, expose the IndexEndpoint via private service connect.", "description_kind": "plain", - "optional": true, - "computed": true + "required": true + }, + "enable_secure_private_service_connect": { + "type": "bool", + "description": "If set to true, enable secure private service connect with IAM authorization. Otherwise, private service connect will be done without authorization. Note latency will be slightly increased if authorization is enabled.", + "description_kind": "plain", + "optional": true + }, + "project_allowlist": { + "type": [ + "list", + "string" + ], + "description": "A list of Projects from which the forwarding rule will target the service attachment.", + "description_kind": "plain", + "optional": true } }, - "description": "Configures when data is to be synced/updated for this FeatureView. At the end of the sync the latest featureValues for each entityId of this FeatureView are made ready for online serving.", + "description": "Configuration for private service connect. 'network' and 'privateServiceConnectConfig' are mutually exclusive.", "description_kind": "plain" }, "max_items": 1 @@ -150458,16 +165069,22 @@ "description_kind": "plain" } }, - "google_vertex_ai_featurestore": { + "google_vertex_ai_feature_group": { "version": 0, "block": { "attributes": { "create_time": { "type": "string", - "description": "The timestamp of when the featurestore was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description": "The timestamp of when the FeatureGroup was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", "description_kind": "plain", "computed": true }, + "description": { + "type": "string", + "description": "The description of the FeatureGroup.", + "description_kind": "plain", + "optional": true + }, "effective_labels": { "type": [ "map", @@ -150483,12 +165100,6 @@ "description_kind": "plain", "computed": true }, - "force_destroy": { - "type": "bool", - "description": "If set to true, any EntityTypes and Features for this Featurestore will also be deleted", - "description_kind": "plain", - "optional": true - }, "id": { "type": "string", "description_kind": "plain", @@ -150500,13 +165111,13 @@ "map", "string" ], - "description": "A set of key/value label pairs to assign to this Featurestore.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description": "The labels with user-defined metadata to organize your FeatureGroup.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "optional": true }, "name": { "type": "string", - "description": "The name of the Featurestore. This value may be up to 60 characters, and valid characters are [a-z0-9_]. The first character cannot be a number.", + "description": "The resource name of the Feature Group.", "description_kind": "plain", "optional": true }, @@ -150518,10 +165129,9 @@ }, "region": { "type": "string", - "description": "The region of the dataset. eg us-central1", + "description": "The region of feature group. eg us-central1", "description_kind": "plain", - "optional": true, - "computed": true + "optional": true }, "terraform_labels": { "type": [ @@ -150534,64 +165144,46 @@ }, "update_time": { "type": "string", - "description": "The timestamp of when the featurestore was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description": "The timestamp of when the FeatureGroup was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", "description_kind": "plain", "computed": true } }, "block_types": { - "encryption_spec": { - "nesting_mode": "list", - "block": { - "attributes": { - "kms_key_name": { - "type": "string", - "description": "The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the compute resource is created.", - "description_kind": "plain", - "required": true - } - }, - "description": "If set, both of the online and offline data storage will be secured by this key.", - "description_kind": "plain" - }, - "max_items": 1 - }, - "online_serving_config": { + "big_query": { "nesting_mode": "list", "block": { "attributes": { - "fixed_node_count": { - "type": "number", - "description": "The number of nodes for each cluster. The number of nodes will not scale automatically but can be scaled manually by providing different values when updating.", + "entity_id_columns": { + "type": [ + "list", + "string" + ], + "description": "Columns to construct entityId / row keys. If not provided defaults to entityId.", "description_kind": "plain", "optional": true } }, "block_types": { - "scaling": { + "big_query_source": { "nesting_mode": "list", "block": { "attributes": { - "max_node_count": { - "type": "number", - "description": "The maximum number of nodes to scale up to. Must be greater than minNodeCount, and less than or equal to 10 times of 'minNodeCount'.", - "description_kind": "plain", - "required": true - }, - "min_node_count": { - "type": "number", - "description": "The minimum number of nodes to scale down to. Must be greater than or equal to 1.", + "input_uri": { + "type": "string", + "description": "BigQuery URI to a table, up to 2000 characters long. For example: 'bq://projectId.bqDatasetId.bqTableId.'", "description_kind": "plain", "required": true } }, - "description": "Online serving scaling configuration. Only one of fixedNodeCount and scaling can be set. Setting one will reset the other.", + "description": "The BigQuery source URI that points to either a BigQuery Table or View.", "description_kind": "plain" }, + "min_items": 1, "max_items": 1 } }, - "description": "Config for online serving resources.", + "description": "Indicates that features for this group come from BigQuery Table/View. By default treats the source as a sparse time series source, which is required to have an entityId and a feature_timestamp column in the source.", "description_kind": "plain" }, "max_items": 1 @@ -150623,19 +165215,19 @@ "description_kind": "plain" } }, - "google_vertex_ai_featurestore_entitytype": { + "google_vertex_ai_feature_group_feature": { "version": 0, "block": { "attributes": { "create_time": { "type": "string", - "description": "The timestamp of when the featurestore was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description": "The timestamp of when the FeatureGroup was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", "description_kind": "plain", "computed": true }, "description": { "type": "string", - "description": "Optional. Description of the EntityType.", + "description": "The description of the FeatureGroup.", "description_kind": "plain", "optional": true }, @@ -150648,15 +165240,9 @@ "description_kind": "plain", "computed": true }, - "etag": { - "type": "string", - "description": "Used to perform consistent read-modify-write updates.", - "description_kind": "plain", - "computed": true - }, - "featurestore": { + "feature_group": { "type": "string", - "description": "The name of the Featurestore to use, in the format projects/{project}/locations/{location}/featurestores/{featurestore}.", + "description": "The name of the Feature Group.", "description_kind": "plain", "required": true }, @@ -150671,22 +165257,28 @@ "map", "string" ], - "description": "A set of key/value label pairs to assign to this EntityType.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description": "The labels with user-defined metadata to organize your FeatureGroup.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "optional": true }, "name": { "type": "string", - "description": "The name of the EntityType. This value may be up to 60 characters, and valid characters are [a-z0-9_]. The first character cannot be a number.", + "description": "The resource name of the Feature Group Feature.", "description_kind": "plain", - "optional": true + "required": true }, - "region": { + "project": { "type": "string", - "description": "The region of the EntityType.", "description_kind": "plain", + "optional": true, "computed": true }, + "region": { + "type": "string", + "description": "The region for the resource. It should be the same as the feature group's region.", + "description_kind": "plain", + "required": true + }, "terraform_labels": { "type": [ "map", @@ -150698,47 +165290,726 @@ }, "update_time": { "type": "string", - "description": "The timestamp of when the featurestore was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description": "The timestamp of when the FeatureGroup was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + }, + "version_column_name": { + "type": "string", + "description": "The name of the BigQuery Table/View column hosting data for this version. If no value is provided, will use featureId.", "description_kind": "plain", + "optional": true, "computed": true } }, "block_types": { - "monitoring_config": { - "nesting_mode": "list", + "timeouts": { + "nesting_mode": "single", "block": { - "block_types": { - "categorical_threshold_config": { - "nesting_mode": "list", - "block": { - "attributes": { - "value": { - "type": "number", - "description": "Specify a threshold value that can trigger the alert. For categorical feature, the distribution distance is calculated by L-inifinity norm. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. The default value is 0.3.", - "description_kind": "plain", - "required": true - } - }, - "description": "Threshold for categorical features of anomaly detection. This is shared by all types of Featurestore Monitoring for categorical features (i.e. Features with type (Feature.ValueType) BOOL or STRING).", - "description_kind": "plain" - }, - "max_items": 1 + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true }, - "import_features_analysis": { - "nesting_mode": "list", - "block": { - "attributes": { - "anomaly_detection_baseline": { - "type": "string", - "description": "Defines the baseline to do anomaly detection for feature values imported by each [entityTypes.importFeatureValues][] operation. The value must be one of the values below:\n* LATEST_STATS: Choose the later one statistics generated by either most recent snapshot analysis or previous import features analysis. If non of them exists, skip anomaly detection and only generate a statistics.\n* MOST_RECENT_SNAPSHOT_STATS: Use the statistics generated by the most recent snapshot analysis if exists.\n* PREVIOUS_IMPORT_FEATURES_STATS: Use the statistics generated by the previous import features analysis if exists.", - "description_kind": "plain", - "optional": true - }, - "state": { - "type": "string", - "description": "Whether to enable / disable / inherite default hebavior for import features analysis. The value must be one of the values below:\n* DEFAULT: The default behavior of whether to enable the monitoring. EntityType-level config: disabled.\n* ENABLED: Explicitly enables import features analysis. EntityType-level config: by default enables import features analysis for all Features under it.\n* DISABLED: Explicitly disables import features analysis. EntityType-level config: by default disables import features analysis for all Features under it.", - "description_kind": "plain", - "optional": true + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_vertex_ai_feature_online_store": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "The timestamp of when the feature online store was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "etag": { + "type": "string", + "description": "Used to perform consistent read-modify-write updates.", + "description_kind": "plain", + "computed": true + }, + "force_destroy": { + "type": "bool", + "description": "If set to true, any FeatureViews and Features for this FeatureOnlineStore will also be deleted.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "The labels with user-defined metadata to organize your feature online stores.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "The resource name of the Feature Online Store. This value may be up to 60 characters, and valid characters are [a-z0-9_]. The first character cannot be a number.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "region": { + "type": "string", + "description": "The region of feature online store. eg us-central1", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "state": { + "type": "string", + "description": "The state of the Feature Online Store. See the possible states in [this link](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.featureOnlineStores#state).", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The timestamp of when the feature online store was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "bigtable": { + "nesting_mode": "list", + "block": { + "block_types": { + "auto_scaling": { + "nesting_mode": "list", + "block": { + "attributes": { + "cpu_utilization_target": { + "type": "number", + "description": "A percentage of the cluster's CPU capacity. Can be from 10% to 80%. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "max_node_count": { + "type": "number", + "description": "The maximum number of nodes to scale up to. Must be greater than or equal to minNodeCount, and less than or equal to 10 times of 'minNodeCount'.", + "description_kind": "plain", + "required": true + }, + "min_node_count": { + "type": "number", + "description": "The minimum number of nodes to scale down to. Must be greater than or equal to 1.", + "description_kind": "plain", + "required": true + } + }, + "description": "Autoscaling config applied to Bigtable Instance.", + "description_kind": "plain" + }, + "min_items": 1, + "max_items": 1 + } + }, + "description": "Settings for Cloud Bigtable instance that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "dedicated_serving_endpoint": { + "nesting_mode": "list", + "block": { + "attributes": { + "public_endpoint_domain_name": { + "type": "string", + "description": "Domain name to use for this FeatureOnlineStore", + "description_kind": "plain", + "computed": true + }, + "service_attachment": { + "type": "string", + "description": "Name of the service attachment resource. Applicable only if private service connect is enabled and after FeatureViewSync is created.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "private_service_connect_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "enable_private_service_connect": { + "type": "bool", + "description": "If set to true, customers will use private service connection to send request. Otherwise, the connection will set to public endpoint.", + "description_kind": "plain", + "required": true + }, + "project_allowlist": { + "type": [ + "list", + "string" + ], + "description": "A list of Projects from which the forwarding rule will target the service attachment.", + "description_kind": "plain", + "optional": true + } + }, + "description": "Private service connect config.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "The dedicated serving endpoint for this FeatureOnlineStore, which is different from common vertex service endpoint. Only need to be set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "optimized": { + "nesting_mode": "list", + "block": { + "description": "Settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_vertex_ai_feature_online_store_featureview": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "The timestamp of when the featureOnlinestore was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "feature_online_store": { + "type": "string", + "description": "The name of the FeatureOnlineStore to use for the featureview.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "A set of key/value label pairs to assign to this FeatureView.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "Name of the FeatureView. This value may be up to 60 characters, and valid characters are [a-z0-9_]. The first character cannot be a number.", + "description_kind": "plain", + "optional": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "region": { + "type": "string", + "description": "The region for the resource. It should be the same as the featureonlinestore region.", + "description_kind": "plain", + "required": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The timestamp of when the featureOnlinestore was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "big_query_source": { + "nesting_mode": "list", + "block": { + "attributes": { + "entity_id_columns": { + "type": [ + "list", + "string" + ], + "description": "Columns to construct entityId / row keys. Start by supporting 1 only.", + "description_kind": "plain", + "required": true + }, + "uri": { + "type": "string", + "description": "The BigQuery view URI that will be materialized on each sync trigger based on FeatureView.SyncConfig.", + "description_kind": "plain", + "required": true + } + }, + "description": "Configures how data is supposed to be extracted from a BigQuery source to be loaded onto the FeatureOnlineStore.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "feature_registry_source": { + "nesting_mode": "list", + "block": { + "attributes": { + "project_number": { + "type": "string", + "description": "The project number of the parent project of the feature Groups.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "feature_groups": { + "nesting_mode": "list", + "block": { + "attributes": { + "feature_group_id": { + "type": "string", + "description": "Identifier of the feature group.", + "description_kind": "plain", + "required": true + }, + "feature_ids": { + "type": [ + "list", + "string" + ], + "description": "Identifiers of features under the feature group.", + "description_kind": "plain", + "required": true + } + }, + "description": "List of features that need to be synced to Online Store.", + "description_kind": "plain" + }, + "min_items": 1 + } + }, + "description": "Configures the features from a Feature Registry source that need to be loaded onto the FeatureOnlineStore.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "sync_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "cron": { + "type": "string", + "description": "Cron schedule (https://en.wikipedia.org/wiki/Cron) to launch scheduled runs.\nTo explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or \"TZ=${IANA_TIME_ZONE}\".", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description": "Configures when data is to be synced/updated for this FeatureView. At the end of the sync the latest featureValues for each entityId of this FeatureView are made ready for online serving.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_vertex_ai_featurestore": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "The timestamp of when the featurestore was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "etag": { + "type": "string", + "description": "Used to perform consistent read-modify-write updates.", + "description_kind": "plain", + "computed": true + }, + "force_destroy": { + "type": "bool", + "description": "If set to true, any EntityTypes and Features for this Featurestore will also be deleted", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "A set of key/value label pairs to assign to this Featurestore.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "The name of the Featurestore. This value may be up to 60 characters, and valid characters are [a-z0-9_]. The first character cannot be a number.", + "description_kind": "plain", + "optional": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "region": { + "type": "string", + "description": "The region of the dataset. eg us-central1", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The timestamp of when the featurestore was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "encryption_spec": { + "nesting_mode": "list", + "block": { + "attributes": { + "kms_key_name": { + "type": "string", + "description": "The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the compute resource is created.", + "description_kind": "plain", + "required": true + } + }, + "description": "If set, both of the online and offline data storage will be secured by this key.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "online_serving_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "fixed_node_count": { + "type": "number", + "description": "The number of nodes for each cluster. The number of nodes will not scale automatically but can be scaled manually by providing different values when updating.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "scaling": { + "nesting_mode": "list", + "block": { + "attributes": { + "max_node_count": { + "type": "number", + "description": "The maximum number of nodes to scale up to. Must be greater than minNodeCount, and less than or equal to 10 times of 'minNodeCount'.", + "description_kind": "plain", + "required": true + }, + "min_node_count": { + "type": "number", + "description": "The minimum number of nodes to scale down to. Must be greater than or equal to 1.", + "description_kind": "plain", + "required": true + } + }, + "description": "Online serving scaling configuration. Only one of fixedNodeCount and scaling can be set. Setting one will reset the other.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "Config for online serving resources.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "timeouts": { + "nesting_mode": "single", + "block": { + "attributes": { + "create": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "delete": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "update": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "description_kind": "plain" + } + }, + "google_vertex_ai_featurestore_entitytype": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "The timestamp of when the featurestore was created in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + }, + "description": { + "type": "string", + "description": "Optional. Description of the EntityType.", + "description_kind": "plain", + "optional": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "etag": { + "type": "string", + "description": "Used to perform consistent read-modify-write updates.", + "description_kind": "plain", + "computed": true + }, + "featurestore": { + "type": "string", + "description": "The name of the Featurestore to use, in the format projects/{project}/locations/{location}/featurestores/{featurestore}.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "A set of key/value label pairs to assign to this EntityType.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "The name of the EntityType. This value may be up to 60 characters, and valid characters are [a-z0-9_]. The first character cannot be a number.", + "description_kind": "plain", + "optional": true + }, + "region": { + "type": "string", + "description": "The region of the EntityType.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The timestamp of when the featurestore was last updated in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.", + "description_kind": "plain", + "computed": true + } + }, + "block_types": { + "monitoring_config": { + "nesting_mode": "list", + "block": { + "block_types": { + "categorical_threshold_config": { + "nesting_mode": "list", + "block": { + "attributes": { + "value": { + "type": "number", + "description": "Specify a threshold value that can trigger the alert. For categorical feature, the distribution distance is calculated by L-inifinity norm. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. The default value is 0.3.", + "description_kind": "plain", + "required": true + } + }, + "description": "Threshold for categorical features of anomaly detection. This is shared by all types of Featurestore Monitoring for categorical features (i.e. Features with type (Feature.ValueType) BOOL or STRING).", + "description_kind": "plain" + }, + "max_items": 1 + }, + "import_features_analysis": { + "nesting_mode": "list", + "block": { + "attributes": { + "anomaly_detection_baseline": { + "type": "string", + "description": "Defines the baseline to do anomaly detection for feature values imported by each [entityTypes.importFeatureValues][] operation. The value must be one of the values below:\n* LATEST_STATS: Choose the later one statistics generated by either most recent snapshot analysis or previous import features analysis. If non of them exists, skip anomaly detection and only generate a statistics.\n* MOST_RECENT_SNAPSHOT_STATS: Use the statistics generated by the most recent snapshot analysis if exists.\n* PREVIOUS_IMPORT_FEATURES_STATS: Use the statistics generated by the previous import features analysis if exists.", + "description_kind": "plain", + "optional": true + }, + "state": { + "type": "string", + "description": "Whether to enable / disable / inherite default hebavior for import features analysis. The value must be one of the values below:\n* DEFAULT: The default behavior of whether to enable the monitoring. EntityType-level config: disabled.\n* ENABLED: Explicitly enables import features analysis. EntityType-level config: by default enables import features analysis for all Features under it.\n* DISABLED: Explicitly disables import features analysis. EntityType-level config: by default disables import features analysis for all Features under it.", + "description_kind": "plain", + "optional": true } }, "description": "The config for ImportFeatures Analysis Based Feature Monitoring.", @@ -151074,7 +166345,7 @@ "type": "string", "description": "Allows inserting, updating or deleting the contents of the Matching Engine Index.\nThe string must be a valid Cloud Storage directory path. If this\nfield is set when calling IndexService.UpdateIndex, then no other\nIndex field can be also updated as part of the same call.\nThe expected structure and format of the files this URI points to is\ndescribed at https://cloud.google.com/vertex-ai/docs/matching-engine/using-matching-engine#input-data-format", "description_kind": "plain", - "required": true + "optional": true }, "is_complete_overwrite": { "type": "bool", @@ -151768,6 +167039,131 @@ } }, "block_types": { + "autoscaling_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "cool_down_period": { + "type": "string", + "description": "The minimum duration between consecutive autoscale operations.\nIt starts once addition or removal of nodes is fully completed.\nMinimum cool down period is 30m.\nCool down period must be in whole minutes (for example, 30m, 31m, 50m).\nMandatory for successful addition of autoscaling settings in cluster.", + "description_kind": "plain", + "optional": true + }, + "max_cluster_node_count": { + "type": "number", + "description": "Maximum number of nodes of any type in a cluster.\nMandatory for successful addition of autoscaling settings in cluster.", + "description_kind": "plain", + "optional": true + }, + "min_cluster_node_count": { + "type": "number", + "description": "Minimum number of nodes of any type in a cluster.\nMandatory for successful addition of autoscaling settings in cluster.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "autoscaling_policies": { + "nesting_mode": "set", + "block": { + "attributes": { + "autoscale_policy_id": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "node_type_id": { + "type": "string", + "description": "The canonical identifier of the node type to add or remove.", + "description_kind": "plain", + "required": true + }, + "scale_out_size": { + "type": "number", + "description": "Number of nodes to add to a cluster during a scale-out operation.\nMust be divisible by 2 for stretched clusters.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "consumed_memory_thresholds": { + "nesting_mode": "list", + "block": { + "attributes": { + "scale_in": { + "type": "number", + "description": "The utilization triggering the scale-in operation in percent.", + "description_kind": "plain", + "required": true + }, + "scale_out": { + "type": "number", + "description": "The utilization triggering the scale-out operation in percent.", + "description_kind": "plain", + "required": true + } + }, + "description": "Utilization thresholds pertaining to amount of consumed memory.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "cpu_thresholds": { + "nesting_mode": "list", + "block": { + "attributes": { + "scale_in": { + "type": "number", + "description": "The utilization triggering the scale-in operation in percent.", + "description_kind": "plain", + "required": true + }, + "scale_out": { + "type": "number", + "description": "The utilization triggering the scale-out operation in percent.", + "description_kind": "plain", + "required": true + } + }, + "description": "Utilization thresholds pertaining to CPU utilization.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "storage_thresholds": { + "nesting_mode": "list", + "block": { + "attributes": { + "scale_in": { + "type": "number", + "description": "The utilization triggering the scale-in operation in percent.", + "description_kind": "plain", + "required": true + }, + "scale_out": { + "type": "number", + "description": "The utilization triggering the scale-out operation in percent.", + "description_kind": "plain", + "required": true + } + }, + "description": "Utilization thresholds pertaining to amount of consumed storage.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "The map with autoscaling policies applied to the cluster.\nThe key is the identifier of the policy.\nIt must meet the following requirements:\n * Only contains 1-63 alphanumeric characters and hyphens\n * Begins with an alphabetical character\n * Ends with a non-hyphen character\n * Not formatted as a UUID\n * Complies with [RFC 1034](https://datatracker.ietf.org/doc/html/rfc1034) (section 3.5)\n\nCurrently the map must contain only one element\nthat describes the autoscaling policy for compute nodes.", + "description_kind": "plain" + }, + "min_items": 1 + } + }, + "description": "Configuration of the autoscaling applied to this cluster", + "description_kind": "plain" + }, + "max_items": 1 + }, "node_type_configs": { "nesting_mode": "set", "block": { @@ -152578,6 +167974,131 @@ } }, "block_types": { + "autoscaling_settings": { + "nesting_mode": "list", + "block": { + "attributes": { + "cool_down_period": { + "type": "string", + "description": "The minimum duration between consecutive autoscale operations.\nIt starts once addition or removal of nodes is fully completed.\nMinimum cool down period is 30m.\nCool down period must be in whole minutes (for example, 30m, 31m, 50m).\nMandatory for successful addition of autoscaling settings in cluster.", + "description_kind": "plain", + "optional": true + }, + "max_cluster_node_count": { + "type": "number", + "description": "Maximum number of nodes of any type in a cluster.\nMandatory for successful addition of autoscaling settings in cluster.", + "description_kind": "plain", + "optional": true + }, + "min_cluster_node_count": { + "type": "number", + "description": "Minimum number of nodes of any type in a cluster.\nMandatory for successful addition of autoscaling settings in cluster.", + "description_kind": "plain", + "optional": true + } + }, + "block_types": { + "autoscaling_policies": { + "nesting_mode": "set", + "block": { + "attributes": { + "autoscale_policy_id": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "node_type_id": { + "type": "string", + "description": "The canonical identifier of the node type to add or remove.", + "description_kind": "plain", + "required": true + }, + "scale_out_size": { + "type": "number", + "description": "Number of nodes to add to a cluster during a scale-out operation.\nMust be divisible by 2 for stretched clusters.", + "description_kind": "plain", + "required": true + } + }, + "block_types": { + "consumed_memory_thresholds": { + "nesting_mode": "list", + "block": { + "attributes": { + "scale_in": { + "type": "number", + "description": "The utilization triggering the scale-in operation in percent.", + "description_kind": "plain", + "required": true + }, + "scale_out": { + "type": "number", + "description": "The utilization triggering the scale-out operation in percent.", + "description_kind": "plain", + "required": true + } + }, + "description": "Utilization thresholds pertaining to amount of consumed memory.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "cpu_thresholds": { + "nesting_mode": "list", + "block": { + "attributes": { + "scale_in": { + "type": "number", + "description": "The utilization triggering the scale-in operation in percent.", + "description_kind": "plain", + "required": true + }, + "scale_out": { + "type": "number", + "description": "The utilization triggering the scale-out operation in percent.", + "description_kind": "plain", + "required": true + } + }, + "description": "Utilization thresholds pertaining to CPU utilization.", + "description_kind": "plain" + }, + "max_items": 1 + }, + "storage_thresholds": { + "nesting_mode": "list", + "block": { + "attributes": { + "scale_in": { + "type": "number", + "description": "The utilization triggering the scale-in operation in percent.", + "description_kind": "plain", + "required": true + }, + "scale_out": { + "type": "number", + "description": "The utilization triggering the scale-out operation in percent.", + "description_kind": "plain", + "required": true + } + }, + "description": "Utilization thresholds pertaining to amount of consumed storage.", + "description_kind": "plain" + }, + "max_items": 1 + } + }, + "description": "The map with autoscaling policies applied to the cluster.\nThe key is the identifier of the policy.\nIt must meet the following requirements:\n * Only contains 1-63 alphanumeric characters and hyphens\n * Begins with an alphabetical character\n * Ends with a non-hyphen character\n * Not formatted as a UUID\n * Complies with [RFC 1034](https://datatracker.ietf.org/doc/html/rfc1034) (section 3.5)\n\nCurrently the map must contain only one element\nthat describes the autoscaling policy for compute nodes.", + "description_kind": "plain" + }, + "min_items": 1 + } + }, + "description": "Configuration of the autoscaling applied to this cluster\nPrivate cloud must have a minimum of 3 nodes to add autoscale settings", + "description_kind": "plain" + }, + "max_items": 1 + }, "node_type_configs": { "nesting_mode": "set", "block": { @@ -152867,9 +168388,10 @@ }, "max_throughput": { "type": "number", - "description": "Maximum throughput of the connector in Mbps, must be greater than 'min_throughput'. Default is 300. Refers to the expected throughput\nwhen using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by\nmin_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of\nmax_throughput is discouraged in favor of max_instances.", + "description": "Maximum throughput of the connector in Mbps, must be greater than 'min_throughput'. Default is 300. Refers to the expected throughput\nwhen using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by\nmin_throughput. Only one of 'max_throughput' and 'max_instances' can be specified. The use of max_throughput is discouraged in favor of max_instances.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "min_instances": { "type": "number", @@ -152880,9 +168402,10 @@ }, "min_throughput": { "type": "number", - "description": "Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type.\nValue must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and\nmin_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances.", + "description": "Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type.\nValue must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput.\nOnly one of 'min_throughput' and 'min_instances' can be specified. The use of min_throughput is discouraged in favor of min_instances.", "description_kind": "plain", - "optional": true + "optional": true, + "computed": true }, "name": { "type": "string", @@ -153041,7 +168564,7 @@ "list", "string" ], - "description": "'Optional. Input only. The owner of this instance after creation. Format:\n'alias@example.com' Currently supports one owner only. If not specified, all of\nthe service account users of your VM instance''s service account can use the instance.'", + "description": "'Optional. Input only. The owner of this instance after creation. Format:\n'alias@example.com' Currently supports one owner only. If not specified, all of\nthe service account users of your VM instance''s service account can use the instance.\nIf specified, sets the access mode to 'Single user'. For more details, see\nhttps://cloud.google.com/vertex-ai/docs/workbench/instances/manage-access-jupyterlab'", "description_kind": "plain", "optional": true }, @@ -153655,6 +169178,12 @@ "description_kind": "plain", "optional": true }, + "deletion_protection": { + "type": "bool", + "description": "Whether Terraform will be prevented from destroying the workflow. Defaults to true.\nWhen a'terraform destroy' or 'terraform apply' would delete the workflow,\nthe command will fail if this field is not set to false in Terraform state.\nWhen the field is set to true or unset in Terraform state, a 'terraform apply'\nor 'terraform destroy' that would delete the workflow will fail.\nWhen the field is set to false, deleting the workflow is allowed.", + "description_kind": "plain", + "optional": true + }, "description": { "type": "string", "description": "Description of the workflow provided by the user. Must be at most 1000 unicode characters long.", @@ -153878,6 +169407,43 @@ "description_kind": "plain" } }, + "google_access_context_manager_access_policy": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "parent": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "scopes": { + "type": [ + "list", + "string" + ], + "description_kind": "plain", + "optional": true + }, + "title": { + "type": "string", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, "google_access_context_manager_access_policy_iam_policy": { "version": 0, "block": { @@ -154648,7 +170214,7 @@ }, "location": { "type": "string", - "description": "The name of the location this repository is located in.", + "description": "The name of the repository's location. In addition to specific regions,\nspecial values for multi-region locations are 'asia', 'europe', and 'us'.\nSee [here](https://cloud.google.com/artifact-registry/docs/repositories/repo-locations),\nor use the\n[google_artifact_registry_locations](https://registry.terraform.io/providers/hashicorp/google/latest/docs/data-sources/artifact_registry_locations)\ndata source for possible values.", "description_kind": "plain", "required": true }, @@ -154708,6 +170274,15 @@ } ] ], + "common_repository": [ + "list", + [ + "object", + { + "uri": "string" + } + ] + ], "description": "string", "disable_upstream_validation": "bool", "docker_repository": [ @@ -154869,351 +170444,20 @@ "description": "Configuration specific for a Virtual Repository.", "description_kind": "plain", "computed": true - } - }, - "description_kind": "plain" - } - }, - "google_artifact_registry_repository_iam_policy": { - "version": 0, - "block": { - "attributes": { - "etag": { - "type": "string", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "location": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "policy_data": { - "type": "string", - "description_kind": "plain", - "computed": true - }, - "project": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "repository": { - "type": "string", - "description_kind": "plain", - "required": true - } - }, - "description_kind": "plain" - } - }, - "google_beyondcorp_app_connection": { - "version": 0, - "block": { - "attributes": { - "application_endpoint": { - "type": [ - "list", - [ - "object", - { - "host": "string", - "port": "number" - } - ] - ], - "description": "Address of the remote application endpoint for the BeyondCorp AppConnection.", - "description_kind": "plain", - "computed": true - }, - "connectors": { - "type": [ - "list", - "string" - ], - "description": "List of AppConnectors that are authorised to be associated with this AppConnection", - "description_kind": "plain", - "computed": true - }, - "display_name": { - "type": "string", - "description": "An arbitrary user-provided name for the AppConnection.", - "description_kind": "plain", - "computed": true - }, - "effective_labels": { - "type": [ - "map", - "string" - ], - "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", - "description_kind": "plain", - "computed": true - }, - "gateway": { - "type": [ - "list", - [ - "object", - { - "app_gateway": "string", - "ingress_port": "number", - "type": "string", - "uri": "string" - } - ] - ], - "description": "Gateway used by the AppConnection.", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "labels": { - "type": [ - "map", - "string" - ], - "description": "Resource labels to represent user provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", - "description_kind": "plain", - "computed": true - }, - "name": { - "type": "string", - "description": "ID of the AppConnection.", - "description_kind": "plain", - "required": true - }, - "project": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "region": { - "type": "string", - "description": "The region of the AppConnection.", - "description_kind": "plain", - "optional": true }, - "terraform_labels": { - "type": [ - "map", - "string" - ], - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", - "description_kind": "plain", - "computed": true - }, - "type": { - "type": "string", - "description": "The type of network connectivity used by the AppConnection. Refer\nto https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#type\nfor a list of possible values.", - "description_kind": "plain", - "computed": true - } - }, - "description_kind": "plain" - } - }, - "google_beyondcorp_app_connector": { - "version": 0, - "block": { - "attributes": { - "display_name": { - "type": "string", - "description": "An arbitrary user-provided name for the AppConnector.", - "description_kind": "plain", - "computed": true - }, - "effective_labels": { - "type": [ - "map", - "string" - ], - "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "labels": { - "type": [ - "map", - "string" - ], - "description": "Resource labels to represent user provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", - "description_kind": "plain", - "computed": true - }, - "name": { - "type": "string", - "description": "ID of the AppConnector.", - "description_kind": "plain", - "required": true - }, - "principal_info": { + "vulnerability_scanning_config": { "type": [ "list", [ "object", { - "service_account": [ - "list", - [ - "object", - { - "email": "string" - } - ] - ] + "enablement_config": "string", + "enablement_state": "string", + "enablement_state_reason": "string" } ] ], - "description": "Principal information about the Identity of the AppConnector.", - "description_kind": "plain", - "computed": true - }, - "project": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "region": { - "type": "string", - "description": "The region of the AppConnector.", - "description_kind": "plain", - "optional": true - }, - "state": { - "type": "string", - "description": "Represents the different states of a AppConnector.", - "description_kind": "plain", - "computed": true - }, - "terraform_labels": { - "type": [ - "map", - "string" - ], - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", - "description_kind": "plain", - "computed": true - } - }, - "description_kind": "plain" - } - }, - "google_beyondcorp_app_gateway": { - "version": 1, - "block": { - "attributes": { - "allocated_connections": { - "type": [ - "list", - [ - "object", - { - "ingress_port": "number", - "psc_uri": "string" - } - ] - ], - "description": "A list of connections allocated for the Gateway.", - "description_kind": "plain", - "computed": true - }, - "display_name": { - "type": "string", - "description": "An arbitrary user-provided name for the AppGateway.", - "description_kind": "plain", - "computed": true - }, - "effective_labels": { - "type": [ - "map", - "string" - ], - "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", - "description_kind": "plain", - "computed": true - }, - "host_type": { - "type": "string", - "description": "The type of hosting used by the AppGateway. Default value: \"HOST_TYPE_UNSPECIFIED\" Possible values: [\"HOST_TYPE_UNSPECIFIED\", \"GCP_REGIONAL_MIG\"]", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "labels": { - "type": [ - "map", - "string" - ], - "description": "Resource labels to represent user provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", - "description_kind": "plain", - "computed": true - }, - "name": { - "type": "string", - "description": "ID of the AppGateway.", - "description_kind": "plain", - "required": true - }, - "project": { - "type": "string", - "description_kind": "plain", - "optional": true - }, - "region": { - "type": "string", - "description": "The region of the AppGateway.", - "description_kind": "plain", - "optional": true - }, - "state": { - "type": "string", - "description": "Represents the different states of a AppGateway.", - "description_kind": "plain", - "computed": true - }, - "terraform_labels": { - "type": [ - "map", - "string" - ], - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", - "description_kind": "plain", - "computed": true - }, - "type": { - "type": "string", - "description": "The type of network connectivity used by the AppGateway. Default value: \"TYPE_UNSPECIFIED\" Possible values: [\"TYPE_UNSPECIFIED\", \"TCP_PROXY\"]", - "description_kind": "plain", - "computed": true - }, - "uri": { - "type": "string", - "description": "Server-defined URI for this resource.", + "description": "Configuration for vulnerability scanning of artifacts stored in this repository.", "description_kind": "plain", "computed": true } @@ -155221,15 +170465,799 @@ "description_kind": "plain" } }, - "google_bigquery_analytics_hub_data_exchange_iam_policy": { + "google_artifact_registry_repository_iam_policy": { "version": 0, "block": { "attributes": { - "data_exchange_id": { - "type": "string", - "description_kind": "plain", - "required": true - }, + "etag": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "policy_data": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "repository": { + "type": "string", + "description_kind": "plain", + "required": true + } + }, + "description_kind": "plain" + } + }, + "google_backup_dr_backup": { + "version": 0, + "block": { + "attributes": { + "backup_vault_id": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "backups": { + "type": [ + "list", + [ + "object", + { + "backup_id": "string", + "backup_vault_id": "string", + "data_source_id": "string", + "location": "string", + "name": "string" + } + ] + ], + "description": "List of all backups under data source.", + "description_kind": "plain", + "computed": true + }, + "data_source_id": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Name of resource", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "required": true + } + }, + "description_kind": "plain" + } + }, + "google_backup_dr_backup_vault": { + "version": 0, + "block": { + "attributes": { + "access_restriction": { + "type": "string", + "description": "Access restriction for the backup vault. Default value is 'WITHIN_ORGANIZATION' if not provided during creation. Default value: \"WITHIN_ORGANIZATION\" Possible values: [\"ACCESS_RESTRICTION_UNSPECIFIED\", \"WITHIN_PROJECT\", \"WITHIN_ORGANIZATION\", \"UNRESTRICTED\", \"WITHIN_ORG_BUT_UNRESTRICTED_FOR_BA\"]", + "description_kind": "plain", + "computed": true + }, + "allow_missing": { + "type": "bool", + "description": "Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist.", + "description_kind": "plain", + "computed": true + }, + "annotations": { + "type": [ + "map", + "string" + ], + "description": "Optional. User annotations. See https://google.aip.dev/128#annotations\nStores small amounts of arbitrary data. \n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "computed": true + }, + "backup_count": { + "type": "string", + "description": "Output only. The number of backups in this backup vault.", + "description_kind": "plain", + "computed": true + }, + "backup_minimum_enforced_retention_duration": { + "type": "string", + "description": "Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended.", + "description_kind": "plain", + "computed": true + }, + "backup_vault_id": { + "type": "string", + "description": "Required. ID of the requesting object.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "Output only. The time when the instance was created.", + "description_kind": "plain", + "computed": true + }, + "deletable": { + "type": "bool", + "description": "Output only. Set to true when there are no backups nested under this resource.", + "description_kind": "plain", + "computed": true + }, + "description": { + "type": "string", + "description": "Optional. The description of the BackupVault instance (2048 characters or less).", + "description_kind": "plain", + "computed": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_time": { + "type": "string", + "description": "Optional. Time after which the BackupVault resource is locked.", + "description_kind": "plain", + "computed": true + }, + "etag": { + "type": "string", + "description": "Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other.", + "description_kind": "plain", + "computed": true + }, + "force_delete": { + "type": "bool", + "description": "If set, the following restrictions against deletion of the backup vault instance can be overridden:\n * deletion of a backup vault instance containing no backups, but still containing empty datasources.\n * deletion of a backup vault instance that is being referenced by an active backup plan.", + "description_kind": "plain", + "computed": true + }, + "force_update": { + "type": "bool", + "description": "If set, allow update to extend the minimum enforced retention for backup vault. This overrides\n the restriction against conflicting retention periods. This conflict may occur when the\n expiration schedule defined by the associated backup plan is shorter than the minimum\n retention set by the backup vault.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "ignore_backup_plan_references": { + "type": "bool", + "description": "If set, the following restrictions against deletion of the backup vault instance can be overridden:\n * deletion of a backup vault instance that is being referenced by an active backup plan.", + "description_kind": "plain", + "computed": true + }, + "ignore_inactive_datasources": { + "type": "bool", + "description": "If set, the following restrictions against deletion of the backup vault instance can be overridden:\n * deletion of a backup vault instance containing no backups, but still containing empty datasources.", + "description_kind": "plain", + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Optional. Resource labels to represent user provided metadata. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "computed": true + }, + "location": { + "type": "string", + "description": "The GCP location for the backup vault.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Output only. Identifier. The resource name.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "service_account": { + "type": "string", + "description": "Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there.", + "description_kind": "plain", + "computed": true + }, + "state": { + "type": "string", + "description": "Output only. The BackupVault resource instance state. \n Possible values:\n STATE_UNSPECIFIED\n CREATING\n ACTIVE\n DELETING\n ERROR", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "total_stored_bytes": { + "type": "string", + "description": "Output only. Total size of the storage used by all backup resources.", + "description_kind": "plain", + "computed": true + }, + "uid": { + "type": "string", + "description": "Output only. Output only Immutable after resource creation until resource deletion.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. The time when the instance was updated.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_backup_dr_data_source": { + "version": 0, + "block": { + "attributes": { + "backup_config_info": { + "type": [ + "list", + [ + "object", + { + "backup_appliance_backup_config": [ + "list", + [ + "object", + { + "application_name": "string", + "backup_appliance_id": "string", + "backup_appliance_name": "string", + "host_name": "string", + "sla_id": "string", + "slp_name": "string", + "slt_name": "string" + } + ] + ], + "gcp_backup_config": [ + "list", + [ + "object", + { + "backup_plan": "string", + "backup_plan_association": "string", + "backup_plan_description": "string", + "backup_plan_rules": [ + "list", + "string" + ] + } + ] + ], + "last_backup_error": [ + "map", + "string" + ], + "last_backup_state": "string", + "last_successful_backup_consistency_time": "string" + } + ] + ], + "description": "Details of how the resource is configured for backup.", + "description_kind": "plain", + "computed": true + }, + "backup_count": { + "type": "string", + "description": "Number of backups in the data source.", + "description_kind": "plain", + "computed": true + }, + "backup_vault_id": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "config_state": { + "type": "string", + "description": "The backup configuration state.", + "description_kind": "plain", + "computed": true + }, + "create_time": { + "type": "string", + "description": "The time when the instance was created.", + "description_kind": "plain", + "computed": true + }, + "data_source_backup_appliance_application": { + "type": [ + "list", + [ + "object", + { + "appliance_id": "string", + "application_id": "string", + "application_name": "string", + "backup_appliance": "string", + "host_id": "string", + "hostname": "string", + "type": "string" + } + ] + ], + "description": "The backed up resource is a backup appliance application.", + "description_kind": "plain", + "computed": true + }, + "data_source_gcp_resource": { + "type": [ + "list", + [ + "object", + { + "compute_instance_data_source_properties": [ + "list", + [ + "object", + { + "description": "string", + "machine_type": "string", + "name": "string", + "total_disk_count": "string", + "total_disk_size_gb": "string" + } + ] + ], + "gcp_resourcename": "string", + "location": "string", + "type": "string" + } + ] + ], + "description": "The backed up resource is a Google Cloud resource.\n\t\t\tThe word 'DataSource' was included in the names to indicate that this is\n\t\t\tthe representation of the Google Cloud resource used within the\n\t\t\tDataSource object.", + "description_kind": "plain", + "computed": true + }, + "data_source_id": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "etag": { + "type": "string", + "description": "Server specified ETag for the ManagementServer resource to prevent simultaneous updates from overwiting each other.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Resource labels to represent user provided metadata.", + "description_kind": "plain", + "computed": true + }, + "location": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Name of the datasource to create.\n\t\t\tIt must have the format \"projects/{project}/locations/{location}/backupVaults/{backupvault}/dataSources/{datasource}\".\n\t\t\t'{datasource}' cannot be changed after creation. It must be between 3-63 characters long and must be unique within the backup vault.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "state": { + "type": "string", + "description": "The DataSource resource instance state.", + "description_kind": "plain", + "computed": true + }, + "total_stored_bytes": { + "type": "string", + "description": "The number of bytes (metadata and data) stored in this datasource.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The time when the instance was updated.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_beyondcorp_app_connection": { + "version": 0, + "block": { + "attributes": { + "application_endpoint": { + "type": [ + "list", + [ + "object", + { + "host": "string", + "port": "number" + } + ] + ], + "description": "Address of the remote application endpoint for the BeyondCorp AppConnection.", + "description_kind": "plain", + "computed": true + }, + "connectors": { + "type": [ + "list", + "string" + ], + "description": "List of AppConnectors that are authorised to be associated with this AppConnection", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "An arbitrary user-provided name for the AppConnection.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "gateway": { + "type": [ + "list", + [ + "object", + { + "app_gateway": "string", + "ingress_port": "number", + "type": "string", + "uri": "string" + } + ] + ], + "description": "Gateway used by the AppConnection.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Resource labels to represent user provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "ID of the AppConnection.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "region": { + "type": "string", + "description": "The region of the AppConnection.", + "description_kind": "plain", + "optional": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "type": { + "type": "string", + "description": "The type of network connectivity used by the AppConnection. Refer\nto https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#type\nfor a list of possible values.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_beyondcorp_app_connector": { + "version": 0, + "block": { + "attributes": { + "display_name": { + "type": "string", + "description": "An arbitrary user-provided name for the AppConnector.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Resource labels to represent user provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "ID of the AppConnector.", + "description_kind": "plain", + "required": true + }, + "principal_info": { + "type": [ + "list", + [ + "object", + { + "service_account": [ + "list", + [ + "object", + { + "email": "string" + } + ] + ] + } + ] + ], + "description": "Principal information about the Identity of the AppConnector.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "region": { + "type": "string", + "description": "The region of the AppConnector.", + "description_kind": "plain", + "optional": true + }, + "state": { + "type": "string", + "description": "Represents the different states of a AppConnector.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_beyondcorp_app_gateway": { + "version": 1, + "block": { + "attributes": { + "allocated_connections": { + "type": [ + "list", + [ + "object", + { + "ingress_port": "number", + "psc_uri": "string" + } + ] + ], + "description": "A list of connections allocated for the Gateway.", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "An arbitrary user-provided name for the AppGateway.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "host_type": { + "type": "string", + "description": "The type of hosting used by the AppGateway. Default value: \"HOST_TYPE_UNSPECIFIED\" Possible values: [\"HOST_TYPE_UNSPECIFIED\", \"GCP_REGIONAL_MIG\"]", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Resource labels to represent user provided metadata.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "ID of the AppGateway.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "region": { + "type": "string", + "description": "The region of the AppGateway.", + "description_kind": "plain", + "optional": true + }, + "state": { + "type": "string", + "description": "Represents the different states of a AppGateway.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "type": { + "type": "string", + "description": "The type of network connectivity used by the AppGateway. Default value: \"TYPE_UNSPECIFIED\" Possible values: [\"TYPE_UNSPECIFIED\", \"TCP_PROXY\"]", + "description_kind": "plain", + "computed": true + }, + "uri": { + "type": "string", + "description": "Server-defined URI for this resource.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_bigquery_analytics_hub_data_exchange_iam_policy": { + "version": 0, + "block": { + "attributes": { + "data_exchange_id": { + "type": "string", + "description_kind": "plain", + "required": true + }, "etag": { "type": "string", "description_kind": "plain", @@ -155400,6 +171428,18 @@ [ "object", { + "condition": [ + "list", + [ + "object", + { + "description": "string", + "expression": "string", + "location": "string", + "title": "string" + } + ] + ], "dataset": [ "list", [ @@ -155731,6 +171771,49 @@ "description_kind": "plain" } }, + "google_bigquery_tables": { + "version": 0, + "block": { + "attributes": { + "dataset_id": { + "type": "string", + "description": "The ID of the dataset containing the tables.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "project": { + "type": "string", + "description": "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + "description_kind": "plain", + "optional": true + }, + "tables": { + "type": [ + "list", + [ + "object", + { + "labels": [ + "map", + "string" + ], + "table_id": "string" + } + ] + ], + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, "google_bigtable_instance_iam_policy": { "version": 0, "block": { @@ -156017,6 +172100,103 @@ "description_kind": "plain" } }, + "google_certificate_manager_certificates": { + "version": 0, + "block": { + "attributes": { + "certificates": { + "type": [ + "list", + [ + "object", + { + "description": "string", + "effective_labels": [ + "map", + "string" + ], + "labels": [ + "map", + "string" + ], + "location": "string", + "managed": [ + "list", + [ + "object", + { + "authorization_attempt_info": [ + "list", + [ + "object", + { + "details": "string", + "domain": "string", + "failure_reason": "string", + "state": "string" + } + ] + ], + "dns_authorizations": [ + "list", + "string" + ], + "domains": [ + "list", + "string" + ], + "issuance_config": "string", + "provisioning_issue": [ + "list", + [ + "object", + { + "details": "string", + "reason": "string" + } + ] + ], + "state": "string" + } + ] + ], + "name": "string", + "project": "string", + "san_dnsnames": [ + "list", + "string" + ], + "scope": "string", + "terraform_labels": [ + "map", + "string" + ] + } + ] + ], + "description_kind": "plain", + "computed": true + }, + "filter": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "region": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + }, "google_client_config": { "version": 0, "block": { @@ -157016,7 +173196,42 @@ [ "object", { + "csi": [ + "list", + [ + "object", + { + "driver": "string", + "read_only": "bool", + "volume_attributes": [ + "map", + "string" + ] + } + ] + ], + "empty_dir": [ + "list", + [ + "object", + { + "medium": "string", + "size_limit": "string" + } + ] + ], "name": "string", + "nfs": [ + "list", + [ + "object", + { + "path": "string", + "read_only": "bool", + "server": "string" + } + ] + ], "secret": [ "list", [ @@ -157194,6 +173409,12 @@ "description_kind": "plain", "computed": true }, + "deletion_protection": { + "type": "bool", + "description": "Whether Terraform will be prevented from destroying the job. Defaults to true.\nWhen a'terraform destroy' or 'terraform apply' would delete the job,\nthe command will fail if this field is not set to false in Terraform state.\nWhen the field is set to true or unset in Terraform state, a 'terraform apply'\nor 'terraform destroy' that would delete the job will fail.\nWhen the field is set to false, deleting the job is allowed.", + "description_kind": "plain", + "computed": true + }, "effective_annotations": { "type": [ "map", @@ -157226,7 +173447,7 @@ }, "expire_time": { "type": "string", - "description": "For a deleted resource, the time after which it will be permamently deleted.", + "description": "For a deleted resource, the time after which it will be permanently deleted.", "description_kind": "plain", "computed": true }, @@ -157343,7 +173564,7 @@ "string" ], "env": [ - "list", + "set", [ "object", { @@ -157430,7 +173651,38 @@ } ] ], + "empty_dir": [ + "list", + [ + "object", + { + "medium": "string", + "size_limit": "string" + } + ] + ], + "gcs": [ + "list", + [ + "object", + { + "bucket": "string", + "read_only": "bool" + } + ] + ], "name": "string", + "nfs": [ + "list", + [ + "object", + { + "path": "string", + "read_only": "bool", + "server": "string" + } + ] + ], "secret": [ "list", [ @@ -157665,6 +173917,12 @@ "description_kind": "plain", "computed": true }, + "deletion_protection": { + "type": "bool", + "description": "Whether Terraform will be prevented from destroying the service. Defaults to true.\nWhen a'terraform destroy' or 'terraform apply' would delete the service,\nthe command will fail if this field is not set to false in Terraform state.\nWhen the field is set to true or unset in Terraform state, a 'terraform apply'\nor 'terraform destroy' that would delete the service will fail.\nWhen the field is set to false, deleting the service is allowed.", + "description_kind": "plain", + "computed": true + }, "description": { "type": "string", "description": "User-provided description of the Service. This field currently has a 512-character limit.", @@ -157697,7 +173955,7 @@ }, "expire_time": { "type": "string", - "description": "For a deleted resource, the time after which it will be permamently deleted.", + "description": "For a deleted resource, the time after which it will be permanently deleted.", "description_kind": "plain", "computed": true }, @@ -157719,6 +173977,12 @@ "description_kind": "plain", "computed": true }, + "invoker_iam_disabled": { + "type": "bool", + "description": "Disables IAM permission check for run.routes.invoke for callers of this service. This feature is available by invitation only. For more information, visit https://cloud.google.com/run/docs/securing/managing-access#invoker_check.", + "description_kind": "plain", + "computed": true + }, "labels": { "type": [ "map", @@ -157781,6 +174045,20 @@ "description_kind": "plain", "computed": true }, + "scaling": { + "type": [ + "list", + [ + "object", + { + "min_instance_count": "number" + } + ] + ], + "description": "Scaling settings that apply to the whole service", + "description_kind": "plain", + "computed": true + }, "template": { "type": [ "list", @@ -157809,7 +174087,7 @@ "string" ], "env": [ - "list", + "set", [ "object", { @@ -158016,6 +174294,16 @@ } ] ], + "empty_dir": [ + "list", + [ + "object", + { + "medium": "string", + "size_limit": "string" + } + ] + ], "gcs": [ "list", [ @@ -158175,6 +174463,15 @@ "description": "The main URI in which this Service is serving traffic.", "description_kind": "plain", "computed": true + }, + "urls": { + "type": [ + "list", + "string" + ], + "description": "All URLs serving traffic for this Service.", + "description_kind": "plain", + "computed": true } }, "description_kind": "plain" @@ -159667,6 +175964,16 @@ [ "object", { + "airflow_metadata_retention_config": [ + "list", + [ + "object", + { + "retention_days": "number", + "retention_mode": "string" + } + ] + ], "task_logs_retention_config": [ "list", [ @@ -159689,6 +175996,8 @@ } ] ], + "enable_private_builds_only": "bool", + "enable_private_environment": "bool", "encryption_config": [ "list", [ @@ -159735,6 +176044,8 @@ [ "object", { + "composer_internal_ipv4_cidr_block": "string", + "composer_network_attachment": "string", "disk_size_gb": "number", "enable_ip_masq_agent": "bool", "ip_allocation_policy": [ @@ -159813,6 +176124,15 @@ "map", "string" ], + "cloud_data_lineage_integration": [ + "list", + [ + "object", + { + "enabled": "bool" + } + ] + ], "env_variables": [ "map", "string" @@ -159823,7 +176143,8 @@ "string" ], "python_version": "string", - "scheduler_count": "number" + "scheduler_count": "number", + "web_server_plugins_mode": "string" } ] ], @@ -159859,6 +176180,18 @@ [ "object", { + "dag_processor": [ + "list", + [ + "object", + { + "count": "number", + "cpu": "number", + "memory_gb": "number", + "storage_gb": "number" + } + ] + ], "scheduler": [ "list", [ @@ -160028,6 +176361,99 @@ "description_kind": "plain" } }, + "google_composer_user_workloads_config_map": { + "version": 0, + "block": { + "attributes": { + "data": { + "type": [ + "map", + "string" + ], + "description": "The \"data\" field of Kubernetes ConfigMap, organized in key-value pairs.\nFor details see: https://kubernetes.io/docs/concepts/configuration/configmap/", + "description_kind": "plain", + "computed": true + }, + "environment": { + "type": "string", + "description": "Environment where the Kubernetes ConfigMap will be stored and used.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "Name of the Kubernetes ConfigMap.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "region": { + "type": "string", + "description": "The location or Compute Engine region for the environment.", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + }, + "google_composer_user_workloads_secret": { + "version": 0, + "block": { + "attributes": { + "data": { + "type": [ + "map", + "string" + ], + "description": "A map of the secret data.", + "description_kind": "plain", + "computed": true + }, + "environment": { + "type": "string", + "description": "Name of the environment.", + "description_kind": "plain", + "required": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description": "Name of the secret.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description": "The ID of the project in which the resource belongs. If it is not provided, the provider project is used.", + "description_kind": "plain", + "optional": true + }, + "region": { + "type": "string", + "description": "The location or Compute Engine region for the environment.", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + }, "google_compute_address": { "version": 0, "block": { @@ -160526,6 +176952,7 @@ [ "object", { + "enabled": "bool", "oauth2_client_id": "string", "oauth2_client_secret": "string", "oauth2_client_secret_sha256": "string" @@ -160542,6 +176969,12 @@ "optional": true, "computed": true }, + "ip_address_selection_policy": { + "type": "string", + "description": "Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). Possible values: [\"IPV4_ONLY\", \"PREFER_IPV6\", \"IPV6_ONLY\"]", + "description_kind": "plain", + "computed": true + }, "load_balancing_scheme": { "type": "string", "description": "Indicates whether the backend service will be used with internal or\nexternal load balancing. A backend service created for one type of\nload balancing cannot be used with the other. For more information, refer to\n[Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service). Default value: \"EXTERNAL\" Possible values: [\"EXTERNAL\", \"INTERNAL_SELF_MANAGED\", \"INTERNAL_MANAGED\", \"EXTERNAL_MANAGED\"]", @@ -160582,7 +177015,7 @@ }, "locality_lb_policy": { "type": "string", - "description": "The load balancing algorithm used within the scope of the locality.\nThe possible values are:\n\n* 'ROUND_ROBIN': This is a simple policy in which each healthy backend\n is selected in round robin order.\n\n* 'LEAST_REQUEST': An O(1) algorithm which selects two random healthy\n hosts and picks the host which has fewer active requests.\n\n* 'RING_HASH': The ring/modulo hash load balancer implements consistent\n hashing to backends. The algorithm has the property that the\n addition/removal of a host from a set of N hosts only affects\n 1/N of the requests.\n\n* 'RANDOM': The load balancer selects a random healthy host.\n\n* 'ORIGINAL_DESTINATION': Backend host is selected based on the client\n connection metadata, i.e., connections are opened\n to the same address as the destination address of\n the incoming connection before the connection\n was redirected to the load balancer.\n\n* 'MAGLEV': used as a drop in replacement for the ring hash load balancer.\n Maglev is not as stable as ring hash but has faster table lookup\n build times and host selection times. For more information about\n Maglev, refer to https://ai.google/research/pubs/pub44824\n\n* 'WEIGHTED_MAGLEV': Per-instance weighted Load Balancing via health check\n reported weights. If set, the Backend Service must\n configure a non legacy HTTP-based Health Check, and\n health check replies are expected to contain\n non-standard HTTP response header field\n X-Load-Balancing-Endpoint-Weight to specify the\n per-instance weights. If set, Load Balancing is weight\n based on the per-instance weights reported in the last\n processed health check replies, as long as every\n instance either reported a valid weight or had\n UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains\n equal-weight.\n\nThis field is applicable to either:\n\n* A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2,\n and loadBalancingScheme set to INTERNAL_MANAGED.\n* A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.\n* A regional backend service with loadBalancingScheme set to EXTERNAL (External Network\n Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External\n Network Load Balancing. The default is MAGLEV.\n\nIf session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV,\nor RING_HASH, session affinity settings will not take effect.\n\nOnly ROUND_ROBIN and RING_HASH are supported when the backend service is referenced\nby a URL map that is bound to target gRPC proxy that has validate_for_proxyless\nfield set to true. Possible values: [\"ROUND_ROBIN\", \"LEAST_REQUEST\", \"RING_HASH\", \"RANDOM\", \"ORIGINAL_DESTINATION\", \"MAGLEV\", \"WEIGHTED_MAGLEV\"]", + "description": "The load balancing algorithm used within the scope of the locality.\nThe possible values are:\n\n* 'ROUND_ROBIN': This is a simple policy in which each healthy backend\n is selected in round robin order.\n\n* 'LEAST_REQUEST': An O(1) algorithm which selects two random healthy\n hosts and picks the host which has fewer active requests.\n\n* 'RING_HASH': The ring/modulo hash load balancer implements consistent\n hashing to backends. The algorithm has the property that the\n addition/removal of a host from a set of N hosts only affects\n 1/N of the requests.\n\n* 'RANDOM': The load balancer selects a random healthy host.\n\n* 'ORIGINAL_DESTINATION': Backend host is selected based on the client\n connection metadata, i.e., connections are opened\n to the same address as the destination address of\n the incoming connection before the connection\n was redirected to the load balancer.\n\n* 'MAGLEV': used as a drop in replacement for the ring hash load balancer.\n Maglev is not as stable as ring hash but has faster table lookup\n build times and host selection times. For more information about\n Maglev, refer to https://ai.google/research/pubs/pub44824\n\n* 'WEIGHTED_MAGLEV': Per-instance weighted Load Balancing via health check\n reported weights. Only applicable to loadBalancingScheme\n EXTERNAL. If set, the Backend Service must\n configure a non legacy HTTP-based Health Check, and\n health check replies are expected to contain\n non-standard HTTP response header field\n X-Load-Balancing-Endpoint-Weight to specify the\n per-instance weights. If set, Load Balancing is weight\n based on the per-instance weights reported in the last\n processed health check replies, as long as every\n instance either reported a valid weight or had\n UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains\n equal-weight.\n\nlocality_lb_policy is applicable to either:\n\n* A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2,\n and loadBalancingScheme set to INTERNAL_MANAGED.\n* A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.\n* A regional backend service with loadBalancingScheme set to EXTERNAL (External Network\n Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External\n Network Load Balancing. The default is MAGLEV.\n\nIf session_affinity is not NONE, and locality_lb_policy is not set to MAGLEV, WEIGHTED_MAGLEV,\nor RING_HASH, session affinity settings will not take effect.\n\nOnly ROUND_ROBIN and RING_HASH are supported when the backend service is referenced\nby a URL map that is bound to target gRPC proxy that has validate_for_proxyless\nfield set to true. Possible values: [\"ROUND_ROBIN\", \"LEAST_REQUEST\", \"RING_HASH\", \"RANDOM\", \"ORIGINAL_DESTINATION\", \"MAGLEV\", \"WEIGHTED_MAGLEV\"]", "description_kind": "plain", "computed": true }, @@ -160645,7 +177078,7 @@ } ] ], - "description": "Settings controlling eviction of unhealthy hosts from the load balancing pool.\nApplicable backend service types can be a global backend service with the\nloadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED.\n\nFrom version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value.\nDefault values are enforce by GCP without providing them.", + "description": "Settings controlling eviction of unhealthy hosts from the load balancing pool.\nApplicable backend service types can be a global backend service with the\nloadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED.", "description_kind": "plain", "computed": true }, @@ -160715,7 +177148,32 @@ }, "session_affinity": { "type": "string", - "description": "Type of session affinity to use. The default is NONE. Session affinity is\nnot applicable if the protocol is UDP. Possible values: [\"NONE\", \"CLIENT_IP\", \"CLIENT_IP_PORT_PROTO\", \"CLIENT_IP_PROTO\", \"GENERATED_COOKIE\", \"HEADER_FIELD\", \"HTTP_COOKIE\"]", + "description": "Type of session affinity to use. The default is NONE. Session affinity is\nnot applicable if the protocol is UDP. Possible values: [\"NONE\", \"CLIENT_IP\", \"CLIENT_IP_PORT_PROTO\", \"CLIENT_IP_PROTO\", \"GENERATED_COOKIE\", \"HEADER_FIELD\", \"HTTP_COOKIE\", \"STRONG_COOKIE_AFFINITY\"]", + "description_kind": "plain", + "computed": true + }, + "strong_session_affinity_cookie": { + "type": [ + "list", + [ + "object", + { + "name": "string", + "path": "string", + "ttl": [ + "list", + [ + "object", + { + "nanos": "number", + "seconds": "number" + } + ] + ] + } + ] + ], + "description": "Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY.", "description_kind": "plain", "computed": true }, @@ -160794,7 +177252,7 @@ } ] ], - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain", "computed": true }, @@ -161017,7 +177475,7 @@ }, "storage_pool": { "type": "string", - "description": "The URL of the storage pool in which the new disk is created.\nFor example:\n* https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/storagePools/{storagePool}\n* /projects/{project}/zones/{zone}/storagePools/{storagePool}", + "description": "The URL or the name of the storage pool in which the new disk is created.\nFor example:\n* https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/storagePools/{storagePool}\n* /projects/{project}/zones/{zone}/storagePools/{storagePool}\n* /zones/{zone}/storagePools/{storagePool}\n* /{storagePool}", "description_kind": "plain", "computed": true }, @@ -161537,6 +177995,12 @@ "description_kind": "plain", "computed": true }, + "forwarding_rule_id": { + "type": "number", + "description": "The unique identifier number for the resource. This identifier is defined by the server.", + "description_kind": "plain", + "computed": true + }, "id": { "type": "string", "description_kind": "plain", @@ -161618,6 +178082,12 @@ "description_kind": "plain", "computed": true }, + "network_tier": { + "type": "string", + "description": "This signifies the networking tier used for configuring\nthis load balancer and can only take the following values:\n'PREMIUM', 'STANDARD'.\n\nFor regional ForwardingRule, the valid values are 'PREMIUM' and\n'STANDARD'. For GlobalForwardingRule, the valid value is\n'PREMIUM'.\n\nIf this field is not specified, it is assumed to be 'PREMIUM'.\nIf 'IPAddress' is specified, this value must be equal to the\nnetworkTier of the Address. Possible values: [\"PREMIUM\", \"STANDARD\"]", + "description_kind": "plain", + "computed": true + }, "no_automate_dns_zone": { "type": "bool", "description": "This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field.", @@ -161702,7 +178172,7 @@ } }, "google_compute_ha_vpn_gateway": { - "version": 0, + "version": 1, "block": { "attributes": { "description": { @@ -161812,7 +178282,7 @@ } ] ], - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain", "computed": true }, @@ -161838,7 +178308,7 @@ } ] ], - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain", "computed": true }, @@ -161858,7 +178328,7 @@ } ] ], - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain", "computed": true }, @@ -161878,7 +178348,7 @@ } ] ], - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain", "computed": true }, @@ -161942,7 +178412,7 @@ } ] ], - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain", "computed": true }, @@ -161961,7 +178431,7 @@ } ] ], - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain", "computed": true }, @@ -162156,7 +178626,10 @@ "object", { "enable_nested_virtualization": "bool", + "enable_uefi_networking": "bool", + "performance_monitoring_unit": "string", "threads_per_core": "number", + "turbo_mode": "string", "visible_core_count": "number" } ] @@ -162217,12 +178690,17 @@ "map", "string" ], + "resource_policies": [ + "list", + "string" + ], "size": "number", "storage_pool": "string", "type": "string" } ] ], + "interface": "string", "kms_key_self_link": "string", "mode": "string", "source": "string" @@ -162260,6 +178738,12 @@ "description_kind": "plain", "computed": true }, + "creation_timestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format.", + "description_kind": "plain", + "computed": true + }, "current_status": { "type": "string", "description": "\n\t\t\t\t\tCurrent status of the instance.\n\t\t\t\t\tThis could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED.\n\t\t\t\t\tFor more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).", @@ -162280,7 +178764,7 @@ }, "desired_status": { "type": "string", - "description": "Desired status of the instance. Either \"RUNNING\" or \"TERMINATED\".", + "description": "Desired status of the instance. Either \"RUNNING\", \"SUSPENDED\" or \"TERMINATED\".", "description_kind": "plain", "computed": true }, @@ -162332,6 +178816,12 @@ "description_kind": "plain", "computed": true }, + "key_revocation_action_type": { + "type": "string", + "description": "Action to be taken when a customer's encryption key is revoked. Supports \"STOP\" and \"NONE\", with \"NONE\" being the default.", + "description_kind": "plain", + "computed": true + }, "label_fingerprint": { "type": "string", "description": "The unique fingerprint of the labels.", @@ -162524,6 +179014,7 @@ "object", { "automatic_restart": "bool", + "availability_domain": "number", "instance_termination_action": "string", "local_ssd_recovery_timeout": [ "list", @@ -162819,6 +179310,12 @@ "description_kind": "plain", "computed": true }, + "instance_group_manager_id": { + "type": "number", + "description": "The unique identifier number for the resource. This identifier is defined by the server.", + "description_kind": "plain", + "computed": true + }, "instance_lifecycle_policy": { "type": [ "list", @@ -162878,6 +179375,21 @@ "description_kind": "plain", "optional": true }, + "standby_policy": { + "type": [ + "list", + [ + "object", + { + "initial_delay_sec": "number", + "mode": "string" + } + ] + ], + "description": "Standby policy for stopped and suspended instances.", + "description_kind": "plain", + "computed": true + }, "stateful_disk": { "type": [ "set", @@ -162989,6 +179501,18 @@ "description_kind": "plain", "computed": true }, + "target_stopped_size": { + "type": "number", + "description": "The target number of stopped instances for this managed instance group.", + "description_kind": "plain", + "computed": true + }, + "target_suspended_size": { + "type": "number", + "description": "The target number of suspended instances for this managed instance group.", + "description_kind": "plain", + "computed": true + }, "update_policy": { "type": [ "list", @@ -163057,6 +179581,73 @@ "description_kind": "plain" } }, + "google_compute_instance_guest_attributes": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "query_path": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "query_value": { + "type": [ + "list", + [ + "object", + { + "key": "string", + "namespace": "string", + "value": "string" + } + ] + ], + "description_kind": "plain", + "computed": true + }, + "region": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "variable_key": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "variable_value": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "zone": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description_kind": "plain" + } + }, "google_compute_instance_iam_policy": { "version": 0, "block": { @@ -163150,7 +179741,10 @@ "object", { "enable_nested_virtualization": "bool", + "enable_uefi_networking": "bool", + "performance_monitoring_unit": "string", "threads_per_core": "number", + "turbo_mode": "string", "visible_core_count": "number" } ] @@ -163180,6 +179774,12 @@ "description_kind": "plain", "computed": true }, + "creation_timestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format.", + "description_kind": "plain", + "computed": true + }, "description": { "type": "string", "description": "A brief description of this resource.", @@ -163214,6 +179814,7 @@ ], "mode": "string", "provisioned_iops": "number", + "provisioned_throughput": "number", "resource_manager_tags": [ "map", "string" @@ -163294,6 +179895,12 @@ "description_kind": "plain", "computed": true }, + "key_revocation_action_type": { + "type": "string", + "description": "Action to be taken when a customer's encryption key is revoked. Supports \"STOP\" and \"NONE\", with \"NONE\" being the default.", + "description_kind": "plain", + "computed": true + }, "labels": { "type": [ "map", @@ -163349,7 +179956,7 @@ }, "name_prefix": { "type": "string", - "description": "Creates a unique name beginning with the specified prefix. Conflicts with name.", + "description": "Creates a unique name beginning with the specified prefix. Conflicts with name. Max length is 54 characters. Prefixes with lengths longer than 37 characters will use a shortened UUID that will be more prone to collisions.", "description_kind": "plain", "computed": true }, @@ -163489,6 +180096,7 @@ "object", { "automatic_restart": "bool", + "availability_domain": "number", "instance_termination_action": "string", "local_ssd_recovery_timeout": [ "list", @@ -163751,6 +180359,17 @@ "description_kind": "plain", "required": true }, + "network_id": { + "type": "number", + "description_kind": "plain", + "computed": true + }, + "numeric_id": { + "type": "string", + "description_kind": "plain", + "deprecated": true, + "computed": true + }, "project": { "type": "string", "description_kind": "plain", @@ -164012,7 +180631,7 @@ } ] ], - "description": "A nested object resource", + "description": "A nested object resource.", "description_kind": "plain", "computed": true }, @@ -164327,6 +180946,391 @@ "description_kind": "plain" } }, + "google_compute_region_instance_group_manager": { + "version": 0, + "block": { + "attributes": { + "all_instances_config": { + "type": [ + "list", + [ + "object", + { + "labels": [ + "map", + "string" + ], + "metadata": [ + "map", + "string" + ] + } + ] + ], + "description": "Specifies configuration that overrides the instance template configuration for the group.", + "description_kind": "plain", + "computed": true + }, + "auto_healing_policies": { + "type": [ + "list", + [ + "object", + { + "health_check": "string", + "initial_delay_sec": "number" + } + ] + ], + "description": "The autohealing policies for this managed instance group. You can specify only one value.", + "description_kind": "plain", + "computed": true + }, + "base_instance_name": { + "type": "string", + "description": "The base instance name to use for instances in this group. The value must be a valid RFC1035 name. Supported characters are lowercase letters, numbers, and hyphens (-). Instances are named by appending a hyphen and a random four-character string to the base instance name.", + "description_kind": "plain", + "computed": true + }, + "creation_timestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format.", + "description_kind": "plain", + "computed": true + }, + "description": { + "type": "string", + "description": "An optional textual description of the instance group manager.", + "description_kind": "plain", + "computed": true + }, + "distribution_policy_target_shape": { + "type": "string", + "description": "The shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType).", + "description_kind": "plain", + "computed": true + }, + "distribution_policy_zones": { + "type": [ + "set", + "string" + ], + "description": "The distribution policy for this managed instance group. You can specify one or more values.", + "description_kind": "plain", + "computed": true + }, + "fingerprint": { + "type": "string", + "description": "The fingerprint of the instance group manager.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "instance_flexibility_policy": { + "type": [ + "list", + [ + "object", + { + "instance_selections": [ + "set", + [ + "object", + { + "machine_types": [ + "set", + "string" + ], + "name": "string", + "rank": "number" + } + ] + ] + } + ] + ], + "description": "The flexibility policy for this managed instance group. Instance flexibility allowing MIG to create VMs from multiple types of machines. Instance flexibility configuration on MIG overrides instance template configuration.", + "description_kind": "plain", + "computed": true + }, + "instance_group": { + "type": "string", + "description": "The full URL of the instance group created by the manager.", + "description_kind": "plain", + "computed": true + }, + "instance_group_manager_id": { + "type": "number", + "description": "The unique identifier number for the resource. This identifier is defined by the server.", + "description_kind": "plain", + "computed": true + }, + "instance_lifecycle_policy": { + "type": [ + "list", + [ + "object", + { + "default_action_on_failure": "string", + "force_update_on_repair": "string" + } + ] + ], + "description": "The instance lifecycle policy for this managed instance group.", + "description_kind": "plain", + "computed": true + }, + "list_managed_instances_results": { + "type": "string", + "description": "Pagination behavior of the listManagedInstances API method for this managed instance group. Valid values are: \"PAGELESS\", \"PAGINATED\". If PAGELESS (default), Pagination is disabled for the group's listManagedInstances API method. maxResults and pageToken query parameters are ignored and all instances are returned in a single response. If PAGINATED, pagination is enabled, maxResults and pageToken query parameters are respected.", + "description_kind": "plain", + "computed": true + }, + "name": { + "type": "string", + "description": "The name of the instance group manager. Must be 1-63 characters long and comply with RFC1035. Supported characters include lowercase letters, numbers, and hyphens.", + "description_kind": "plain", + "optional": true + }, + "named_port": { + "type": [ + "set", + [ + "object", + { + "name": "string", + "port": "number" + } + ] + ], + "description": "The named port configuration.", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description": "The ID of the project in which the resource belongs. If it is not provided, the provider project is used.", + "description_kind": "plain", + "optional": true + }, + "region": { + "type": "string", + "description": "The region where the managed instance group resides.", + "description_kind": "plain", + "optional": true + }, + "self_link": { + "type": "string", + "description": "The URL of the created resource.", + "description_kind": "plain", + "optional": true + }, + "standby_policy": { + "type": [ + "list", + [ + "object", + { + "initial_delay_sec": "number", + "mode": "string" + } + ] + ], + "description": "Standby policy for stopped and suspended instances.", + "description_kind": "plain", + "computed": true + }, + "stateful_disk": { + "type": [ + "set", + [ + "object", + { + "delete_rule": "string", + "device_name": "string" + } + ] + ], + "description": "Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the official documentation. Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the update_policy.", + "description_kind": "plain", + "computed": true + }, + "stateful_external_ip": { + "type": [ + "list", + [ + "object", + { + "delete_rule": "string", + "interface_name": "string" + } + ] + ], + "description": "External IPs considered stateful by the instance group. ", + "description_kind": "plain", + "computed": true + }, + "stateful_internal_ip": { + "type": [ + "list", + [ + "object", + { + "delete_rule": "string", + "interface_name": "string" + } + ] + ], + "description": "External IPs considered stateful by the instance group. ", + "description_kind": "plain", + "computed": true + }, + "status": { + "type": [ + "list", + [ + "object", + { + "all_instances_config": [ + "list", + [ + "object", + { + "current_revision": "string", + "effective": "bool" + } + ] + ], + "is_stable": "bool", + "stateful": [ + "list", + [ + "object", + { + "has_stateful_config": "bool", + "per_instance_configs": [ + "list", + [ + "object", + { + "all_effective": "bool" + } + ] + ] + } + ] + ], + "version_target": [ + "list", + [ + "object", + { + "is_reached": "bool" + } + ] + ] + } + ] + ], + "description": "The status of this managed instance group.", + "description_kind": "plain", + "computed": true + }, + "target_pools": { + "type": [ + "set", + "string" + ], + "description": "The full URL of all target pools to which new instances in the group are added. Updating the target pools attribute does not affect existing instances.", + "description_kind": "plain", + "computed": true + }, + "target_size": { + "type": "number", + "description": "The target number of running instances for this managed instance group. This value should always be explicitly set unless this resource is attached to an autoscaler, in which case it should never be set. Defaults to 0.", + "description_kind": "plain", + "computed": true + }, + "target_stopped_size": { + "type": "number", + "description": "The target number of stopped instances for this managed instance group.", + "description_kind": "plain", + "computed": true + }, + "target_suspended_size": { + "type": "number", + "description": "The target number of suspended instances for this managed instance group.", + "description_kind": "plain", + "computed": true + }, + "update_policy": { + "type": [ + "list", + [ + "object", + { + "instance_redistribution_type": "string", + "max_surge_fixed": "number", + "max_surge_percent": "number", + "max_unavailable_fixed": "number", + "max_unavailable_percent": "number", + "minimal_action": "string", + "most_disruptive_allowed_action": "string", + "replacement_method": "string", + "type": "string" + } + ] + ], + "description": "The update policy for this managed instance group.", + "description_kind": "plain", + "computed": true + }, + "version": { + "type": [ + "list", + [ + "object", + { + "instance_template": "string", + "name": "string", + "target_size": [ + "list", + [ + "object", + { + "fixed": "number", + "percent": "number" + } + ] + ] + } + ] + ], + "description": "Application versions managed by this instance group. Each version deals with a specific instance template, allowing canary release scenarios.", + "description_kind": "plain", + "computed": true + }, + "wait_for_instances": { + "type": "bool", + "description": "Whether to wait for all instances to be created/updated before returning. Note that if this is set to true and the operation does not succeed, Terraform will continue trying until it times out.", + "description_kind": "plain", + "computed": true + }, + "wait_for_instances_status": { + "type": "string", + "description": "When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective and all instances configs to be effective as well as all instances to be stable before returning.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, "google_compute_region_instance_template": { "version": 1, "block": { @@ -164338,7 +181342,10 @@ "object", { "enable_nested_virtualization": "bool", + "enable_uefi_networking": "bool", + "performance_monitoring_unit": "string", "threads_per_core": "number", + "turbo_mode": "string", "visible_core_count": "number" } ] @@ -164368,6 +181375,12 @@ "description_kind": "plain", "computed": true }, + "creation_timestamp": { + "type": "string", + "description": "The time at which the instance was created in RFC 3339 format.", + "description_kind": "plain", + "computed": true + }, "description": { "type": "string", "description": "A brief description of this resource.", @@ -164402,6 +181415,7 @@ ], "mode": "string", "provisioned_iops": "number", + "provisioned_throughput": "number", "resource_manager_tags": [ "map", "string" @@ -164482,6 +181496,12 @@ "description_kind": "plain", "computed": true }, + "key_revocation_action_type": { + "type": "string", + "description": "Action to be taken when a customer's encryption key is revoked. Supports \"STOP\" and \"NONE\", with \"NONE\" being the default.", + "description_kind": "plain", + "computed": true + }, "labels": { "type": [ "map", @@ -164677,6 +181697,7 @@ "object", { "automatic_restart": "bool", + "availability_domain": "number", "instance_termination_action": "string", "local_ssd_recovery_timeout": [ "list", @@ -164886,6 +181907,20 @@ "description_kind": "plain", "optional": true }, + "psc_data": { + "type": [ + "list", + [ + "object", + { + "producer_port": "string" + } + ] + ], + "description": "This field is only used for PSC NEGs.", + "description_kind": "plain", + "computed": true + }, "psc_target_service": { "type": "string", "description": "This field is only used for PSC and INTERNET NEGs.\n\nThe target service url used to set up private service connection to\na Google API or a PSC Producer Service Attachment.", @@ -165495,6 +182530,15 @@ "optional": true, "computed": true }, + "initial_nat_ips": { + "type": [ + "set", + "string" + ], + "description": "Self-links of NAT IPs to be used as initial value for creation alongside a RouterNatAddress resource.\nConflicts with natIps and drainNatIps. Only valid if natIpAllocateOption is set to MANUAL_ONLY.", + "description_kind": "plain", + "computed": true + }, "log_config": { "type": [ "list", @@ -165539,7 +182583,7 @@ "set", "string" ], - "description": "Self-links of NAT IPs. Only valid if natIpAllocateOption\nis set to MANUAL_ONLY.", + "description": "Self-links of NAT IPs. Only valid if natIpAllocateOption\nis set to MANUAL_ONLY.\nIf this field is used alongside with a count created list of address resources 'google_compute_address.foobar.*.self_link',\nthe access level resource for the address resource must have a 'lifecycle' block with 'create_before_destroy = true' so\nthe number of resources can be increased/decreased without triggering the 'resourceInUseByAnotherResource' error.", "description_kind": "plain", "computed": true }, @@ -165765,7 +182809,34 @@ "object", { "enable": "bool", - "rule_visibility": "string" + "rule_visibility": "string", + "threshold_configs": [ + "list", + [ + "object", + { + "auto_deploy_confidence_threshold": "number", + "auto_deploy_expiration_sec": "number", + "auto_deploy_impacted_baseline_threshold": "number", + "auto_deploy_load_threshold": "number", + "detection_absolute_qps": "number", + "detection_load_threshold": "number", + "detection_relative_to_baseline_qps": "number", + "name": "string", + "traffic_granularity_configs": [ + "list", + [ + "object", + { + "enable_each_unique_value": "bool", + "type": "string", + "value": "string" + } + ] + ] + } + ] + ] } ] ] @@ -165931,6 +183002,67 @@ } ] ], + "preconfigured_waf_config": [ + "list", + [ + "object", + { + "exclusion": [ + "list", + [ + "object", + { + "request_cookie": [ + "list", + [ + "object", + { + "operator": "string", + "value": "string" + } + ] + ], + "request_header": [ + "list", + [ + "object", + { + "operator": "string", + "value": "string" + } + ] + ], + "request_query_param": [ + "list", + [ + "object", + { + "operator": "string", + "value": "string" + } + ] + ], + "request_uri": [ + "list", + [ + "object", + { + "operator": "string", + "value": "string" + } + ] + ], + "target_rule_ids": [ + "set", + "string" + ], + "target_rule_set": "string" + } + ] + ] + } + ] + ], "preview": "bool", "priority": "number", "rate_limit_options": [ @@ -165951,6 +183083,16 @@ ], "conform_action": "string", "enforce_on_key": "string", + "enforce_on_key_configs": [ + "list", + [ + "object", + { + "enforce_on_key_name": "string", + "enforce_on_key_type": "string" + } + ] + ], "enforce_on_key_name": "string", "exceed_action": "string", "exceed_redirect_options": [ @@ -166441,6 +183583,11 @@ "description_kind": "plain", "optional": true, "computed": true + }, + "subnetwork_id": { + "type": "number", + "description_kind": "plain", + "computed": true } }, "description_kind": "plain" @@ -166906,6 +184053,15 @@ } ] ], + "parallelstore_csi_driver_config": [ + "list", + [ + "object", + { + "enabled": "bool" + } + ] + ], "ray_operator_config": [ "list", [ @@ -167113,6 +184269,29 @@ "description_kind": "plain", "computed": true }, + "control_plane_endpoints_config": { + "type": [ + "list", + [ + "object", + { + "dns_endpoint_config": [ + "list", + [ + "object", + { + "allow_external_traffic": "bool", + "endpoint": "string" + } + ] + ] + } + ] + ], + "description": "Configuration for all of the cluster's control plane endpoints. Currently supports only DNS endpoint configuration, IP endpoint configuration is available in private_cluster_config.", + "description_kind": "plain", + "computed": true + }, "cost_management_config": { "type": [ "list", @@ -167186,6 +184365,7 @@ [ "object", { + "additive_vpc_scope_dns_domain": "string", "cluster_dns": "string", "cluster_dns_domain": "string", "cluster_dns_scope": "string" @@ -167196,6 +184376,15 @@ "description_kind": "plain", "computed": true }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, "enable_autopilot": { "type": "bool", "description": "Enable Autopilot for this cluster.", @@ -167208,6 +184397,12 @@ "description_kind": "plain", "computed": true }, + "enable_fqdn_network_policy": { + "type": "bool", + "description": "Whether FQDN Network Policy is enabled on this cluster.", + "description_kind": "plain", + "computed": true + }, "enable_intranode_visibility": { "type": "bool", "description": "Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.", @@ -167273,6 +184468,21 @@ "description_kind": "plain", "computed": true }, + "enterprise_config": { + "type": [ + "list", + [ + "object", + { + "cluster_tier": "string", + "desired_tier": "string" + } + ] + ], + "description": "Defines the config needed to enable/disable GKE Enterprise", + "description_kind": "plain", + "computed": true + }, "fleet": { "type": [ "list", @@ -167500,7 +184710,8 @@ } ] ], - "gcp_public_cidrs_access_enabled": "bool" + "gcp_public_cidrs_access_enabled": "bool", + "private_endpoint_enforcement_enabled": "bool" } ] ], @@ -167546,8 +184757,7 @@ "object", { "enable_metrics": "bool", - "enable_relay": "bool", - "relay_mode": "string" + "enable_relay": "bool" } ] ], @@ -167787,6 +184997,16 @@ "object", { "cgroup_mode": "string", + "hugepages_config": [ + "list", + [ + "object", + { + "hugepage_size_1g": "number", + "hugepage_size_2m": "number" + } + ] + ], "sysctls": [ "map", "string" @@ -167804,6 +185024,7 @@ ] ], "local_ssd_count": "number", + "local_ssd_encryption_mode": "string", "logging_variant": "string", "machine_type": "string", "metadata": [ @@ -167883,6 +185104,10 @@ ] ], "spot": "bool", + "storage_pools": [ + "list", + "string" + ], "tags": [ "list", "string" @@ -168191,6 +185416,16 @@ "object", { "cgroup_mode": "string", + "hugepages_config": [ + "list", + [ + "object", + { + "hugepage_size_1g": "number", + "hugepage_size_2m": "number" + } + ] + ], "sysctls": [ "map", "string" @@ -168208,6 +185443,7 @@ ] ], "local_ssd_count": "number", + "local_ssd_encryption_mode": "string", "logging_variant": "string", "machine_type": "string", "metadata": [ @@ -168287,6 +185523,10 @@ ] ], "spot": "bool", + "storage_pools": [ + "list", + "string" + ], "tags": [ "list", "string" @@ -168384,6 +185624,15 @@ [ "object", { + "linux_node_config": [ + "list", + [ + "object", + { + "cgroup_mode": "string" + } + ] + ], "network_tags": [ "list", [ @@ -168465,6 +185714,15 @@ } ] ], + "gcfs_config": [ + "list", + [ + "object", + { + "enabled": "bool" + } + ] + ], "insecure_kubelet_readonly_port_enabled": "string", "logging_variant": "string" } @@ -168589,7 +185847,7 @@ "map", "string" ], - "description": "The GCE resource labels (a map of key/value pairs) to be applied to the cluster.", + "description": "The GCE resource labels (a map of key/value pairs) to be applied to the cluster.\n\n\t\t\t\t**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\n\t\t\t\tPlease refer to the field 'effective_labels' for all of the labels present on the resource.", "description_kind": "plain", "computed": true }, @@ -168617,6 +185875,20 @@ "description_kind": "plain", "computed": true }, + "secret_manager_config": { + "type": [ + "list", + [ + "object", + { + "enabled": "bool" + } + ] + ], + "description": "Configuration for the Secret Manager feature.", + "description_kind": "plain", + "computed": true + }, "security_posture_config": { "type": [ "list", @@ -168664,12 +185936,48 @@ "description_kind": "plain", "computed": true }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, "tpu_ipv4_cidr_block": { "type": "string", "description": "The IP address range of the Cloud TPUs in this cluster, in CIDR notation (e.g. 1.2.3.4/29).", "description_kind": "plain", "computed": true }, + "user_managed_keys_config": { + "type": [ + "list", + [ + "object", + { + "aggregation_ca": "string", + "cluster_ca": "string", + "control_plane_disk_encryption_key": "string", + "etcd_api_ca": "string", + "etcd_peer_ca": "string", + "gkeops_etcd_backup_encryption_key": "string", + "service_account_signing_keys": [ + "set", + "string" + ], + "service_account_verification_keys": [ + "set", + "string" + ] + } + ] + ], + "description": "The custom keys configuration of the cluster.", + "description_kind": "plain", + "computed": true + }, "vertical_pod_autoscaling": { "type": [ "list", @@ -169574,6 +186882,12 @@ "description_kind": "plain", "computed": true }, + "deletion_protection": { + "type": "bool", + "description": "Indicates if the dataproc metastore should be protected against accidental deletions.", + "description_kind": "plain", + "computed": true + }, "effective_labels": { "type": [ "map", @@ -170260,6 +187574,18 @@ "description_kind": "plain", "computed": true }, + "deletion_protection_enabled": { + "type": "bool", + "description": "Indicates whether the instance is protected against deletion.", + "description_kind": "plain", + "computed": true + }, + "deletion_protection_reason": { + "type": "string", + "description": "The reason for enabling deletion protection.", + "description_kind": "plain", + "computed": true + }, "description": { "type": "string", "description": "A description of the instance.", @@ -170370,11 +187696,48 @@ "description_kind": "plain", "computed": true }, + "performance_config": { + "type": [ + "list", + [ + "object", + { + "fixed_iops": [ + "list", + [ + "object", + { + "max_iops": "number" + } + ] + ], + "iops_per_tb": [ + "list", + [ + "object", + { + "max_iops_per_tb": "number" + } + ] + ] + } + ] + ], + "description": "Performance configuration for the instance. If not provided,\nthe default performance settings will be used.", + "description_kind": "plain", + "computed": true + }, "project": { "type": "string", "description_kind": "plain", "optional": true }, + "protocol": { + "type": "string", + "description": "Either NFSv3, for using NFS version 3 as file sharing protocol,\nor NFSv4.1, for using NFS version 4.1 as file sharing protocol.\nNFSv4.1 can be used with HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE.\nThe default is NFSv3. Default value: \"NFS_V3\" Possible values: [\"NFS_V3\", \"NFS_V4_1\"]", + "description_kind": "plain", + "computed": true + }, "terraform_labels": { "type": [ "map", @@ -170409,6 +187772,11 @@ "description_kind": "plain", "computed": true }, + "deletion_protection": { + "type": "bool", + "description_kind": "plain", + "computed": true + }, "display_name": { "type": "string", "description_kind": "plain", @@ -170722,6 +188090,418 @@ "description_kind": "plain" } }, + "google_gke_hub_feature": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description": "Output only. When the Feature resource was created.", + "description_kind": "plain", + "computed": true + }, + "delete_time": { + "type": "string", + "description": "Output only. When the Feature resource was deleted.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "fleet_default_member_config": { + "type": [ + "list", + [ + "object", + { + "configmanagement": [ + "list", + [ + "object", + { + "config_sync": [ + "list", + [ + "object", + { + "enabled": "bool", + "git": [ + "list", + [ + "object", + { + "gcp_service_account_email": "string", + "https_proxy": "string", + "policy_dir": "string", + "secret_type": "string", + "sync_branch": "string", + "sync_repo": "string", + "sync_rev": "string", + "sync_wait_secs": "string" + } + ] + ], + "oci": [ + "list", + [ + "object", + { + "gcp_service_account_email": "string", + "policy_dir": "string", + "secret_type": "string", + "sync_repo": "string", + "sync_wait_secs": "string", + "version": "string" + } + ] + ], + "prevent_drift": "bool", + "source_format": "string" + } + ] + ], + "management": "string", + "version": "string" + } + ] + ], + "mesh": [ + "list", + [ + "object", + { + "management": "string" + } + ] + ], + "policycontroller": [ + "list", + [ + "object", + { + "policy_controller_hub_config": [ + "list", + [ + "object", + { + "audit_interval_seconds": "number", + "constraint_violation_limit": "number", + "deployment_configs": [ + "set", + [ + "object", + { + "component": "string", + "container_resources": [ + "list", + [ + "object", + { + "limits": [ + "list", + [ + "object", + { + "cpu": "string", + "memory": "string" + } + ] + ], + "requests": [ + "list", + [ + "object", + { + "cpu": "string", + "memory": "string" + } + ] + ] + } + ] + ], + "pod_affinity": "string", + "pod_toleration": [ + "list", + [ + "object", + { + "effect": "string", + "key": "string", + "operator": "string", + "value": "string" + } + ] + ], + "replica_count": "number" + } + ] + ], + "exemptable_namespaces": [ + "list", + "string" + ], + "install_spec": "string", + "log_denies_enabled": "bool", + "monitoring": [ + "list", + [ + "object", + { + "backends": [ + "list", + "string" + ] + } + ] + ], + "mutation_enabled": "bool", + "policy_content": [ + "list", + [ + "object", + { + "bundles": [ + "set", + [ + "object", + { + "bundle": "string", + "exempted_namespaces": [ + "list", + "string" + ] + } + ] + ], + "template_library": [ + "list", + [ + "object", + { + "installation": "string" + } + ] + ] + } + ] + ], + "referential_rules_enabled": "bool" + } + ] + ], + "version": "string" + } + ] + ] + } + ] + ], + "description": "Optional. Fleet Default Membership Configuration.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "GCP labels for this Feature.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "computed": true + }, + "location": { + "type": "string", + "description": "The location for the resource", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "The full, unique name of this Feature resource", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "resource_state": { + "type": [ + "list", + [ + "object", + { + "has_resources": "bool", + "state": "string" + } + ] + ], + "description": "State of the Feature resource itself.", + "description_kind": "plain", + "computed": true + }, + "spec": { + "type": [ + "list", + [ + "object", + { + "clusterupgrade": [ + "list", + [ + "object", + { + "gke_upgrade_overrides": [ + "list", + [ + "object", + { + "post_conditions": [ + "list", + [ + "object", + { + "soaking": "string" + } + ] + ], + "upgrade": [ + "list", + [ + "object", + { + "name": "string", + "version": "string" + } + ] + ] + } + ] + ], + "post_conditions": [ + "list", + [ + "object", + { + "soaking": "string" + } + ] + ], + "upstream_fleets": [ + "list", + "string" + ] + } + ] + ], + "fleetobservability": [ + "list", + [ + "object", + { + "logging_config": [ + "list", + [ + "object", + { + "default_config": [ + "list", + [ + "object", + { + "mode": "string" + } + ] + ], + "fleet_scope_logs_config": [ + "list", + [ + "object", + { + "mode": "string" + } + ] + ] + } + ] + ] + } + ] + ], + "multiclusteringress": [ + "list", + [ + "object", + { + "config_membership": "string" + } + ] + ] + } + ] + ], + "description": "Optional. Hub-wide Feature configuration. If this Feature does not support any Hub-wide configuration, this field may be unused.", + "description_kind": "plain", + "computed": true + }, + "state": { + "type": [ + "list", + [ + "object", + { + "state": [ + "list", + [ + "object", + { + "code": "string", + "description": "string", + "update_time": "string" + } + ] + ] + } + ] + ], + "description": "Output only. The Hub-wide Feature state", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "Output only. When the Feature resource was last updated.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, "google_gke_hub_feature_iam_policy": { "version": 0, "block": { @@ -171848,6 +189628,70 @@ "description_kind": "plain" } }, + "google_kms_crypto_key_latest_version": { + "version": 0, + "block": { + "attributes": { + "algorithm": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "crypto_key": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "filter": { + "type": "string", + "description": "\n\t\t\t\t\tThe filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering.\n\n\t\t\t\t\tExample filter values if filtering on state.\n\n\t\t\t\t\t* \"state:ENABLED\" will retrieve the latest cryptoKeyVersion that has the state \"ENABLED\".\n\n\t\t\t\t\t[See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering)\n\t\t\t\t", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "protection_level": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "public_key": { + "type": [ + "list", + [ + "object", + { + "algorithm": "string", + "pem": "string" + } + ] + ], + "description_kind": "plain", + "computed": true + }, + "state": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "version": { + "type": "number", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, "google_kms_crypto_key_version": { "version": 0, "block": { @@ -171906,6 +189750,75 @@ "description_kind": "plain" } }, + "google_kms_crypto_key_versions": { + "version": 0, + "block": { + "attributes": { + "crypto_key": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "filter": { + "type": "string", + "description": "\n\t\t\t\t\tThe filter argument is used to add a filter query parameter that limits which cryptoKeyVersions are retrieved by the data source: ?filter={{filter}}.\n\t\t\t\t\tExample values:\n\t\t\t\t\t\n\t\t\t\t\t* \"name:my-cryptokey-version-\" will retrieve cryptoKeyVersions that contain \"my-key-\" anywhere in their name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions/{{cryptoKeyVersion}}.\n\t\t\t\t\t* \"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/1\" will only retrieve a key with that exact name.\n\t\t\t\t\t\n\t\t\t\t\t[See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering)\n\t\t\t\t", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "public_key": { + "type": [ + "list", + [ + "object", + { + "algorithm": "string", + "pem": "string" + } + ] + ], + "description_kind": "plain", + "computed": true + }, + "versions": { + "type": [ + "list", + [ + "object", + { + "algorithm": "string", + "crypto_key": "string", + "id": "string", + "name": "string", + "protection_level": "string", + "public_key": [ + "list", + [ + "object", + { + "algorithm": "string", + "pem": "string" + } + ] + ], + "state": "string", + "version": "number" + } + ] + ], + "description": "A list of all the retrieved cryptoKeyVersions from the provided crypto key", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, "google_kms_crypto_keys": { "version": 0, "block": { @@ -173133,6 +191046,1297 @@ "description_kind": "plain" } }, + "google_oracle_database_autonomous_database": { + "version": 0, + "block": { + "attributes": { + "admin_password": { + "type": "string", + "description": "The password for the default ADMIN user.", + "description_kind": "plain", + "computed": true + }, + "autonomous_database_id": { + "type": "string", + "description": "The ID of the Autonomous Database to create. This value is restricted\nto (^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$) and must be a maximum of 63\ncharacters in length. The value must start with a letter and end with\na letter or a number.", + "description_kind": "plain", + "required": true + }, + "cidr": { + "type": "string", + "description": "The subnet CIDR range for the Autonmous Database.", + "description_kind": "plain", + "computed": true + }, + "create_time": { + "type": "string", + "description": "The date and time that the Autonomous Database was created.", + "description_kind": "plain", + "computed": true + }, + "database": { + "type": "string", + "description": "The name of the Autonomous Database. The database name must be unique in\nthe project. The name must begin with a letter and can\ncontain a maximum of 30 alphanumeric characters.", + "description_kind": "plain", + "computed": true + }, + "deletion_protection": { + "type": "bool", + "description": "Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail.", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "The display name for the Autonomous Database. The name does not have to\nbe unique within your project.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "entitlement_id": { + "type": "string", + "description": "The ID of the subscription entitlement associated with the Autonomous\nDatabase.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "The labels or tags associated with the Autonomous Database. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "computed": true + }, + "location": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. See documentation for resource type 'oracledatabase.googleapis.com/AutonomousDatabaseBackup'.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. The name of the Autonomous Database resource in the following format:\nprojects/{project}/locations/{region}/autonomousDatabases/{autonomous_database}", + "description_kind": "plain", + "computed": true + }, + "network": { + "type": "string", + "description": "The name of the VPC network used by the Autonomous Database.\nFormat: projects/{project}/global/networks/{network}", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "properties": { + "type": [ + "list", + [ + "object", + { + "actual_used_data_storage_size_tb": "number", + "allocated_storage_size_tb": "number", + "apex_details": [ + "list", + [ + "object", + { + "apex_version": "string", + "ords_version": "string" + } + ] + ], + "are_primary_allowlisted_ips_used": "bool", + "autonomous_container_database_id": "string", + "available_upgrade_versions": [ + "list", + "string" + ], + "backup_retention_period_days": "number", + "character_set": "string", + "compute_count": "number", + "connection_strings": [ + "list", + [ + "object", + { + "all_connection_strings": [ + "list", + [ + "object", + { + "high": "string", + "low": "string", + "medium": "string" + } + ] + ], + "dedicated": "string", + "high": "string", + "low": "string", + "medium": "string", + "profiles": [ + "list", + [ + "object", + { + "consumer_group": "string", + "display_name": "string", + "host_format": "string", + "is_regional": "bool", + "protocol": "string", + "session_mode": "string", + "syntax_format": "string", + "tls_authentication": "string", + "value": "string" + } + ] + ] + } + ] + ], + "connection_urls": [ + "list", + [ + "object", + { + "apex_uri": "string", + "database_transforms_uri": "string", + "graph_studio_uri": "string", + "machine_learning_notebook_uri": "string", + "machine_learning_user_management_uri": "string", + "mongo_db_uri": "string", + "ords_uri": "string", + "sql_dev_web_uri": "string" + } + ] + ], + "customer_contacts": [ + "list", + [ + "object", + { + "email": "string" + } + ] + ], + "data_safe_state": "string", + "data_storage_size_gb": "number", + "data_storage_size_tb": "number", + "database_management_state": "string", + "db_edition": "string", + "db_version": "string", + "db_workload": "string", + "failed_data_recovery_duration": "string", + "is_auto_scaling_enabled": "bool", + "is_local_data_guard_enabled": "bool", + "is_storage_auto_scaling_enabled": "bool", + "license_type": "string", + "lifecycle_details": "string", + "local_adg_auto_failover_max_data_loss_limit": "number", + "local_disaster_recovery_type": "string", + "local_standby_db": [ + "list", + [ + "object", + { + "data_guard_role_changed_time": "string", + "disaster_recovery_role_changed_time": "string", + "lag_time_duration": "string", + "lifecycle_details": "string", + "state": "string" + } + ] + ], + "maintenance_begin_time": "string", + "maintenance_end_time": "string", + "maintenance_schedule_type": "string", + "memory_per_oracle_compute_unit_gbs": "number", + "memory_table_gbs": "number", + "mtls_connection_required": "bool", + "n_character_set": "string", + "next_long_term_backup_time": "string", + "oci_url": "string", + "ocid": "string", + "open_mode": "string", + "operations_insights_state": "string", + "peer_db_ids": [ + "list", + "string" + ], + "permission_level": "string", + "private_endpoint": "string", + "private_endpoint_ip": "string", + "private_endpoint_label": "string", + "refreshable_mode": "string", + "refreshable_state": "string", + "role": "string", + "scheduled_operation_details": [ + "list", + [ + "object", + { + "day_of_week": "string", + "start_time": [ + "list", + [ + "object", + { + "hours": "number", + "minutes": "number", + "nanos": "number", + "seconds": "number" + } + ] + ], + "stop_time": [ + "list", + [ + "object", + { + "hours": "number", + "minutes": "number", + "nanos": "number", + "seconds": "number" + } + ] + ] + } + ] + ], + "sql_web_developer_url": "string", + "state": "string", + "supported_clone_regions": [ + "list", + "string" + ], + "total_auto_backup_storage_size_gbs": "number", + "used_data_storage_size_tbs": "number" + } + ] + ], + "description": "The properties of an Autonomous Database.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_oracle_database_autonomous_databases": { + "version": 0, + "block": { + "attributes": { + "autonomous_databases": { + "type": [ + "list", + [ + "object", + { + "admin_password": "string", + "autonomous_database_id": "string", + "cidr": "string", + "create_time": "string", + "database": "string", + "deletion_protection": "bool", + "display_name": "string", + "effective_labels": [ + "map", + "string" + ], + "entitlement_id": "string", + "labels": [ + "map", + "string" + ], + "location": "string", + "name": "string", + "network": "string", + "project": "string", + "properties": [ + "list", + [ + "object", + { + "actual_used_data_storage_size_tb": "number", + "allocated_storage_size_tb": "number", + "apex_details": [ + "list", + [ + "object", + { + "apex_version": "string", + "ords_version": "string" + } + ] + ], + "are_primary_allowlisted_ips_used": "bool", + "autonomous_container_database_id": "string", + "available_upgrade_versions": [ + "list", + "string" + ], + "backup_retention_period_days": "number", + "character_set": "string", + "compute_count": "number", + "connection_strings": [ + "list", + [ + "object", + { + "all_connection_strings": [ + "list", + [ + "object", + { + "high": "string", + "low": "string", + "medium": "string" + } + ] + ], + "dedicated": "string", + "high": "string", + "low": "string", + "medium": "string", + "profiles": [ + "list", + [ + "object", + { + "consumer_group": "string", + "display_name": "string", + "host_format": "string", + "is_regional": "bool", + "protocol": "string", + "session_mode": "string", + "syntax_format": "string", + "tls_authentication": "string", + "value": "string" + } + ] + ] + } + ] + ], + "connection_urls": [ + "list", + [ + "object", + { + "apex_uri": "string", + "database_transforms_uri": "string", + "graph_studio_uri": "string", + "machine_learning_notebook_uri": "string", + "machine_learning_user_management_uri": "string", + "mongo_db_uri": "string", + "ords_uri": "string", + "sql_dev_web_uri": "string" + } + ] + ], + "customer_contacts": [ + "list", + [ + "object", + { + "email": "string" + } + ] + ], + "data_safe_state": "string", + "data_storage_size_gb": "number", + "data_storage_size_tb": "number", + "database_management_state": "string", + "db_edition": "string", + "db_version": "string", + "db_workload": "string", + "failed_data_recovery_duration": "string", + "is_auto_scaling_enabled": "bool", + "is_local_data_guard_enabled": "bool", + "is_storage_auto_scaling_enabled": "bool", + "license_type": "string", + "lifecycle_details": "string", + "local_adg_auto_failover_max_data_loss_limit": "number", + "local_disaster_recovery_type": "string", + "local_standby_db": [ + "list", + [ + "object", + { + "data_guard_role_changed_time": "string", + "disaster_recovery_role_changed_time": "string", + "lag_time_duration": "string", + "lifecycle_details": "string", + "state": "string" + } + ] + ], + "maintenance_begin_time": "string", + "maintenance_end_time": "string", + "maintenance_schedule_type": "string", + "memory_per_oracle_compute_unit_gbs": "number", + "memory_table_gbs": "number", + "mtls_connection_required": "bool", + "n_character_set": "string", + "next_long_term_backup_time": "string", + "oci_url": "string", + "ocid": "string", + "open_mode": "string", + "operations_insights_state": "string", + "peer_db_ids": [ + "list", + "string" + ], + "permission_level": "string", + "private_endpoint": "string", + "private_endpoint_ip": "string", + "private_endpoint_label": "string", + "refreshable_mode": "string", + "refreshable_state": "string", + "role": "string", + "scheduled_operation_details": [ + "list", + [ + "object", + { + "day_of_week": "string", + "start_time": [ + "list", + [ + "object", + { + "hours": "number", + "minutes": "number", + "nanos": "number", + "seconds": "number" + } + ] + ], + "stop_time": [ + "list", + [ + "object", + { + "hours": "number", + "minutes": "number", + "nanos": "number", + "seconds": "number" + } + ] + ] + } + ] + ], + "sql_web_developer_url": "string", + "state": "string", + "supported_clone_regions": [ + "list", + "string" + ], + "total_auto_backup_storage_size_gbs": "number", + "used_data_storage_size_tbs": "number" + } + ] + ], + "terraform_labels": [ + "map", + "string" + ] + } + ] + ], + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "location", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description": "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + }, + "google_oracle_database_cloud_exadata_infrastructure": { + "version": 0, + "block": { + "attributes": { + "cloud_exadata_infrastructure_id": { + "type": "string", + "description": "The ID of the Exadata Infrastructure to create. This value is restricted\nto (^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$) and must be a maximum of 63\ncharacters in length. The value must start with a letter and end with\na letter or a number.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The date and time that the Exadata Infrastructure was created.", + "description_kind": "plain", + "computed": true + }, + "deletion_protection": { + "type": "bool", + "description": "Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail.", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "User friendly name for this resource.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "entitlement_id": { + "type": "string", + "description": "Entitlement ID of the private offer against which this infrastructure\nresource is provisioned.", + "description_kind": "plain", + "computed": true + }, + "gcp_oracle_zone": { + "type": "string", + "description": "GCP location where Oracle Exadata is hosted.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Labels or tags associated with the resource. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "computed": true + }, + "location": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. See documentation for resource type 'oracledatabase.googleapis.com/DbServer'.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. The name of the Exadata Infrastructure resource with the following format:\nprojects/{project}/locations/{region}/cloudExadataInfrastructures/{cloud_exadata_infrastructure}", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "properties": { + "type": [ + "list", + [ + "object", + { + "activated_storage_count": "number", + "additional_storage_count": "number", + "available_storage_size_gb": "number", + "compute_count": "number", + "cpu_count": "number", + "customer_contacts": [ + "list", + [ + "object", + { + "email": "string" + } + ] + ], + "data_storage_size_tb": "number", + "db_node_storage_size_gb": "number", + "db_server_version": "string", + "maintenance_window": [ + "list", + [ + "object", + { + "custom_action_timeout_mins": "number", + "days_of_week": [ + "list", + "string" + ], + "hours_of_day": [ + "list", + "number" + ], + "is_custom_action_timeout_enabled": "bool", + "lead_time_week": "number", + "months": [ + "list", + "string" + ], + "patching_mode": "string", + "preference": "string", + "weeks_of_month": [ + "list", + "number" + ] + } + ] + ], + "max_cpu_count": "number", + "max_data_storage_tb": "number", + "max_db_node_storage_size_gb": "number", + "max_memory_gb": "number", + "memory_size_gb": "number", + "monthly_db_server_version": "string", + "monthly_storage_server_version": "string", + "next_maintenance_run_id": "string", + "next_maintenance_run_time": "string", + "next_security_maintenance_run_time": "string", + "oci_url": "string", + "ocid": "string", + "shape": "string", + "state": "string", + "storage_count": "number", + "storage_server_version": "string", + "total_storage_size_gb": "number" + } + ] + ], + "description": "Various properties of Exadata Infrastructure.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_oracle_database_cloud_exadata_infrastructures": { + "version": 0, + "block": { + "attributes": { + "cloud_exadata_infrastructures": { + "type": [ + "list", + [ + "object", + { + "cloud_exadata_infrastructure_id": "string", + "create_time": "string", + "deletion_protection": "bool", + "display_name": "string", + "effective_labels": [ + "map", + "string" + ], + "entitlement_id": "string", + "gcp_oracle_zone": "string", + "labels": [ + "map", + "string" + ], + "location": "string", + "name": "string", + "project": "string", + "properties": [ + "list", + [ + "object", + { + "activated_storage_count": "number", + "additional_storage_count": "number", + "available_storage_size_gb": "number", + "compute_count": "number", + "cpu_count": "number", + "customer_contacts": [ + "list", + [ + "object", + { + "email": "string" + } + ] + ], + "data_storage_size_tb": "number", + "db_node_storage_size_gb": "number", + "db_server_version": "string", + "maintenance_window": [ + "list", + [ + "object", + { + "custom_action_timeout_mins": "number", + "days_of_week": [ + "list", + "string" + ], + "hours_of_day": [ + "list", + "number" + ], + "is_custom_action_timeout_enabled": "bool", + "lead_time_week": "number", + "months": [ + "list", + "string" + ], + "patching_mode": "string", + "preference": "string", + "weeks_of_month": [ + "list", + "number" + ] + } + ] + ], + "max_cpu_count": "number", + "max_data_storage_tb": "number", + "max_db_node_storage_size_gb": "number", + "max_memory_gb": "number", + "memory_size_gb": "number", + "monthly_db_server_version": "string", + "monthly_storage_server_version": "string", + "next_maintenance_run_id": "string", + "next_maintenance_run_time": "string", + "next_security_maintenance_run_time": "string", + "oci_url": "string", + "ocid": "string", + "shape": "string", + "state": "string", + "storage_count": "number", + "storage_server_version": "string", + "total_storage_size_gb": "number" + } + ] + ], + "terraform_labels": [ + "map", + "string" + ] + } + ] + ], + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "location", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description": "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + }, + "google_oracle_database_cloud_vm_cluster": { + "version": 0, + "block": { + "attributes": { + "backup_subnet_cidr": { + "type": "string", + "description": "CIDR range of the backup subnet.", + "description_kind": "plain", + "computed": true + }, + "cidr": { + "type": "string", + "description": "Network settings. CIDR to use for cluster IP allocation.", + "description_kind": "plain", + "computed": true + }, + "cloud_vm_cluster_id": { + "type": "string", + "description": "The ID of the VM Cluster to create. This value is restricted\nto (^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$) and must be a maximum of 63\ncharacters in length. The value must start with a letter and end with\na letter or a number.", + "description_kind": "plain", + "required": true + }, + "create_time": { + "type": "string", + "description": "The date and time that the VM cluster was created.", + "description_kind": "plain", + "computed": true + }, + "deletion_protection": { + "type": "bool", + "description": "Whether Terraform will be prevented from destroying the cluster. Deleting this cluster via terraform destroy or terraform apply will only succeed if this field is false in the Terraform state.", + "description_kind": "plain", + "computed": true + }, + "display_name": { + "type": "string", + "description": "User friendly name for this resource.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "exadata_infrastructure": { + "type": "string", + "description": "The name of the Exadata Infrastructure resource on which VM cluster\nresource is created, in the following format:\nprojects/{project}/locations/{region}/cloudExadataInfrastuctures/{cloud_extradata_infrastructure}", + "description_kind": "plain", + "computed": true + }, + "gcp_oracle_zone": { + "type": "string", + "description": "GCP location where Oracle Exadata is hosted. It is same as GCP Oracle zone\nof Exadata infrastructure.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Labels or tags associated with the VM Cluster. \n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "computed": true + }, + "location": { + "type": "string", + "description": "Resource ID segment making up resource 'name'. See documentation for resource type 'oracledatabase.googleapis.com/DbNode'.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "Identifier. The name of the VM Cluster resource with the format:\nprojects/{project}/locations/{region}/cloudVmClusters/{cloud_vm_cluster}", + "description_kind": "plain", + "computed": true + }, + "network": { + "type": "string", + "description": "The name of the VPC network.\nFormat: projects/{project}/global/networks/{network}", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "properties": { + "type": [ + "list", + [ + "object", + { + "cluster_name": "string", + "compartment_id": "string", + "cpu_core_count": "number", + "data_storage_size_tb": "number", + "db_node_storage_size_gb": "number", + "db_server_ocids": [ + "list", + "string" + ], + "diagnostics_data_collection_options": [ + "list", + [ + "object", + { + "diagnostics_events_enabled": "bool", + "health_monitoring_enabled": "bool", + "incident_logs_enabled": "bool" + } + ] + ], + "disk_redundancy": "string", + "dns_listener_ip": "string", + "domain": "string", + "gi_version": "string", + "hostname": "string", + "hostname_prefix": "string", + "license_type": "string", + "local_backup_enabled": "bool", + "memory_size_gb": "number", + "node_count": "number", + "oci_url": "string", + "ocid": "string", + "ocpu_count": "number", + "scan_dns": "string", + "scan_dns_record_id": "string", + "scan_ip_ids": [ + "list", + "string" + ], + "scan_listener_port_tcp": "number", + "scan_listener_port_tcp_ssl": "number", + "shape": "string", + "sparse_diskgroup_enabled": "bool", + "ssh_public_keys": [ + "list", + "string" + ], + "state": "string", + "storage_size_gb": "number", + "system_version": "string", + "time_zone": [ + "list", + [ + "object", + { + "id": "string" + } + ] + ] + } + ] + ], + "description": "Various properties and settings associated with Exadata VM cluster.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_oracle_database_cloud_vm_clusters": { + "version": 0, + "block": { + "attributes": { + "cloud_vm_clusters": { + "type": [ + "list", + [ + "object", + { + "backup_subnet_cidr": "string", + "cidr": "string", + "cloud_vm_cluster_id": "string", + "create_time": "string", + "deletion_protection": "bool", + "display_name": "string", + "effective_labels": [ + "map", + "string" + ], + "exadata_infrastructure": "string", + "gcp_oracle_zone": "string", + "labels": [ + "map", + "string" + ], + "location": "string", + "name": "string", + "network": "string", + "project": "string", + "properties": [ + "list", + [ + "object", + { + "cluster_name": "string", + "compartment_id": "string", + "cpu_core_count": "number", + "data_storage_size_tb": "number", + "db_node_storage_size_gb": "number", + "db_server_ocids": [ + "list", + "string" + ], + "diagnostics_data_collection_options": [ + "list", + [ + "object", + { + "diagnostics_events_enabled": "bool", + "health_monitoring_enabled": "bool", + "incident_logs_enabled": "bool" + } + ] + ], + "disk_redundancy": "string", + "dns_listener_ip": "string", + "domain": "string", + "gi_version": "string", + "hostname": "string", + "hostname_prefix": "string", + "license_type": "string", + "local_backup_enabled": "bool", + "memory_size_gb": "number", + "node_count": "number", + "oci_url": "string", + "ocid": "string", + "ocpu_count": "number", + "scan_dns": "string", + "scan_dns_record_id": "string", + "scan_ip_ids": [ + "list", + "string" + ], + "scan_listener_port_tcp": "number", + "scan_listener_port_tcp_ssl": "number", + "shape": "string", + "sparse_diskgroup_enabled": "bool", + "ssh_public_keys": [ + "list", + "string" + ], + "state": "string", + "storage_size_gb": "number", + "system_version": "string", + "time_zone": [ + "list", + [ + "object", + { + "id": "string" + } + ] + ] + } + ] + ], + "terraform_labels": [ + "map", + "string" + ] + } + ] + ], + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "location", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description": "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + }, + "google_oracle_database_db_nodes": { + "version": 0, + "block": { + "attributes": { + "cloud_vm_cluster": { + "type": "string", + "description": "vmcluster", + "description_kind": "plain", + "required": true + }, + "db_nodes": { + "type": [ + "list", + [ + "object", + { + "name": "string", + "properties": [ + "list", + [ + "object", + { + "db_node_storage_size_gb": "number", + "db_server_ocid": "string", + "hostname": "string", + "memory_size_gb": "number", + "ocid": "string", + "ocpu_count": "number", + "state": "string", + "total_cpu_core_count": "number" + } + ] + ] + } + ] + ], + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "location", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description": "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + }, + "google_oracle_database_db_servers": { + "version": 0, + "block": { + "attributes": { + "cloud_exadata_infrastructure": { + "type": "string", + "description": "exadata", + "description_kind": "plain", + "required": true + }, + "db_servers": { + "type": [ + "list", + [ + "object", + { + "display_name": "string", + "properties": [ + "list", + [ + "object", + { + "db_node_ids": [ + "list", + "string" + ], + "db_node_storage_size_gb": "number", + "max_db_node_storage_size_gb": "number", + "max_memory_size_gb": "number", + "max_ocpu_count": "number", + "memory_size_gb": "number", + "ocid": "string", + "ocpu_count": "number", + "state": "string", + "vm_count": "number" + } + ] + ] + } + ] + ], + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description": "location", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description": "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + }, "google_organization": { "version": 0, "block": { @@ -173254,243 +192458,537 @@ "description_kind": "plain" } }, - "google_privateca_certificate_authority": { + "google_privateca_certificate_authority": { + "version": 0, + "block": { + "attributes": { + "access_urls": { + "type": [ + "list", + [ + "object", + { + "ca_certificate_access_url": "string", + "crl_access_urls": [ + "list", + "string" + ] + } + ] + ], + "description": "URLs for accessing content published by this CA, such as the CA certificate and CRLs.", + "description_kind": "plain", + "computed": true + }, + "certificate_authority_id": { + "type": "string", + "description": "The user provided Resource ID for this Certificate Authority.", + "description_kind": "plain", + "optional": true + }, + "config": { + "type": [ + "list", + [ + "object", + { + "subject_config": [ + "list", + [ + "object", + { + "subject": [ + "list", + [ + "object", + { + "common_name": "string", + "country_code": "string", + "locality": "string", + "organization": "string", + "organizational_unit": "string", + "postal_code": "string", + "province": "string", + "street_address": "string" + } + ] + ], + "subject_alt_name": [ + "list", + [ + "object", + { + "dns_names": [ + "list", + "string" + ], + "email_addresses": [ + "list", + "string" + ], + "ip_addresses": [ + "list", + "string" + ], + "uris": [ + "list", + "string" + ] + } + ] + ] + } + ] + ], + "subject_key_id": [ + "list", + [ + "object", + { + "key_id": "string" + } + ] + ], + "x509_config": [ + "list", + [ + "object", + { + "additional_extensions": [ + "list", + [ + "object", + { + "critical": "bool", + "object_id": [ + "list", + [ + "object", + { + "object_id_path": [ + "list", + "number" + ] + } + ] + ], + "value": "string" + } + ] + ], + "aia_ocsp_servers": [ + "list", + "string" + ], + "ca_options": [ + "list", + [ + "object", + { + "is_ca": "bool", + "max_issuer_path_length": "number", + "non_ca": "bool", + "zero_max_issuer_path_length": "bool" + } + ] + ], + "key_usage": [ + "list", + [ + "object", + { + "base_key_usage": [ + "list", + [ + "object", + { + "cert_sign": "bool", + "content_commitment": "bool", + "crl_sign": "bool", + "data_encipherment": "bool", + "decipher_only": "bool", + "digital_signature": "bool", + "encipher_only": "bool", + "key_agreement": "bool", + "key_encipherment": "bool" + } + ] + ], + "extended_key_usage": [ + "list", + [ + "object", + { + "client_auth": "bool", + "code_signing": "bool", + "email_protection": "bool", + "ocsp_signing": "bool", + "server_auth": "bool", + "time_stamping": "bool" + } + ] + ], + "unknown_extended_key_usages": [ + "list", + [ + "object", + { + "object_id_path": [ + "list", + "number" + ] + } + ] + ] + } + ] + ], + "name_constraints": [ + "list", + [ + "object", + { + "critical": "bool", + "excluded_dns_names": [ + "list", + "string" + ], + "excluded_email_addresses": [ + "list", + "string" + ], + "excluded_ip_ranges": [ + "list", + "string" + ], + "excluded_uris": [ + "list", + "string" + ], + "permitted_dns_names": [ + "list", + "string" + ], + "permitted_email_addresses": [ + "list", + "string" + ], + "permitted_ip_ranges": [ + "list", + "string" + ], + "permitted_uris": [ + "list", + "string" + ] + } + ] + ], + "policy_ids": [ + "list", + [ + "object", + { + "object_id_path": [ + "list", + "number" + ] + } + ] + ] + } + ] + ] + } + ] + ], + "description": "The config used to create a self-signed X.509 certificate or CSR.", + "description_kind": "plain", + "computed": true + }, + "create_time": { + "type": "string", + "description": "The time at which this CertificateAuthority was created.\n\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine\nfractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + }, + "deletion_protection": { + "type": "bool", + "description": "Whether Terraform will be prevented from destroying the CertificateAuthority.\nWhen the field is set to true or unset in Terraform state, a 'terraform apply'\nor 'terraform destroy' that would delete the CertificateAuthority will fail.\nWhen the field is set to false, deleting the CertificateAuthority is allowed.", + "description_kind": "plain", + "computed": true + }, + "desired_state": { + "type": "string", + "description": "Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA.\nPossible values: ENABLED, DISABLED, STAGED.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "gcs_bucket": { + "type": "string", + "description": "The name of a Cloud Storage bucket where this CertificateAuthority will publish content,\nsuch as the CA certificate and CRLs. This must be a bucket name, without any prefixes\n(such as 'gs://') or suffixes (such as '.googleapis.com'). For example, to use a bucket named\nmy-bucket, you would simply specify 'my-bucket'. If not specified, a managed bucket will be\ncreated.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "ignore_active_certificates_on_deletion": { + "type": "bool", + "description": "This field allows the CA to be deleted even if the CA has active certs. Active certs include both unrevoked and unexpired certs.\nUse with care. Defaults to 'false'.", + "description_kind": "plain", + "computed": true + }, + "key_spec": { + "type": [ + "list", + [ + "object", + { + "algorithm": "string", + "cloud_kms_key_version": "string" + } + ] + ], + "description": "Used when issuing certificates for this CertificateAuthority. If this CertificateAuthority\nis a self-signed CertificateAuthority, this key is also used to sign the self-signed CA\ncertificate. Otherwise, it is used to sign a CSR.", + "description_kind": "plain", + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "Labels with user-defined metadata.\n\nAn object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\":\n\"1.3kg\", \"count\": \"3\" }.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "computed": true + }, + "lifetime": { + "type": "string", + "description": "The desired lifetime of the CA certificate. Used to create the \"notBeforeTime\" and\n\"notAfterTime\" fields inside an X.509 certificate. A duration in seconds with up to nine\nfractional digits, terminated by 's'. Example: \"3.5s\".", + "description_kind": "plain", + "computed": true + }, + "location": { + "type": "string", + "description": "Location of the CertificateAuthority. A full list of valid locations can be found by\nrunning 'gcloud privateca locations list'.", + "description_kind": "plain", + "optional": true + }, + "name": { + "type": "string", + "description": "The resource name for this CertificateAuthority in the format\nprojects/*/locations/*/certificateAuthorities/*.", + "description_kind": "plain", + "computed": true + }, + "pem_ca_certificate": { + "type": "string", + "description": "The signed CA certificate issued from the subordinated CA's CSR. This is needed when activating the subordiante CA with a third party issuer.", + "description_kind": "plain", + "computed": true + }, + "pem_ca_certificates": { + "type": [ + "list", + "string" + ], + "description": "This CertificateAuthority's certificate chain, including the current\nCertificateAuthority's certificate. Ordered such that the root issuer is the final\nelement (consistent with RFC 5246). For a self-signed CA, this will only list the current\nCertificateAuthority's certificate.", + "description_kind": "plain", + "computed": true + }, + "pem_csr": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "pool": { + "type": "string", + "description": "The name of the CaPool this Certificate Authority belongs to.", + "description_kind": "plain", + "optional": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "skip_grace_period": { + "type": "bool", + "description": "If this flag is set, the Certificate Authority will be deleted as soon as\npossible without a 30-day grace period where undeletion would have been\nallowed. If you proceed, there will be no way to recover this CA.\nUse with care. Defaults to 'false'.", + "description_kind": "plain", + "computed": true + }, + "state": { + "type": "string", + "description": "The State for this CertificateAuthority.", + "description_kind": "plain", + "computed": true + }, + "subordinate_config": { + "type": [ + "list", + [ + "object", + { + "certificate_authority": "string", + "pem_issuer_chain": [ + "list", + [ + "object", + { + "pem_certificates": [ + "list", + "string" + ] + } + ] + ] + } + ] + ], + "description": "If this is a subordinate CertificateAuthority, this field will be set\nwith the subordinate configuration, which describes its issuers.", + "description_kind": "plain", + "computed": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "type": { + "type": "string", + "description": "The Type of this CertificateAuthority.\n\n~> **Note:** For 'SUBORDINATE' Certificate Authorities, they need to\nbe activated before they can issue certificates. Default value: \"SELF_SIGNED\" Possible values: [\"SELF_SIGNED\", \"SUBORDINATE\"]", + "description_kind": "plain", + "computed": true + }, + "update_time": { + "type": "string", + "description": "The time at which this CertificateAuthority was updated.\n\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine\nfractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_privateca_certificate_template_iam_policy": { + "version": 0, + "block": { + "attributes": { + "certificate_template": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "etag": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "policy_data": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_privileged_access_manager_entitlement": { "version": 0, "block": { "attributes": { - "access_urls": { + "additional_notification_targets": { "type": [ "list", [ "object", { - "ca_certificate_access_url": "string", - "crl_access_urls": [ - "list", + "admin_email_recipients": [ + "set", + "string" + ], + "requester_email_recipients": [ + "set", "string" ] } ] ], - "description": "URLs for accessing content published by this CA, such as the CA certificate and CRLs.", + "description": "AdditionalNotificationTargets includes email addresses to be notified.", "description_kind": "plain", "computed": true }, - "certificate_authority_id": { - "type": "string", - "description": "The user provided Resource ID for this Certificate Authority.", - "description_kind": "plain", - "optional": true - }, - "config": { + "approval_workflow": { "type": [ "list", [ "object", { - "subject_config": [ + "manual_approvals": [ "list", [ "object", { - "subject": [ + "require_approver_justification": "bool", + "steps": [ "list", [ "object", { - "common_name": "string", - "country_code": "string", - "locality": "string", - "organization": "string", - "organizational_unit": "string", - "postal_code": "string", - "province": "string", - "street_address": "string" - } - ] - ], - "subject_alt_name": [ - "list", - [ - "object", - { - "dns_names": [ - "list", - "string" - ], - "email_addresses": [ - "list", - "string" - ], - "ip_addresses": [ - "list", + "approvals_needed": "number", + "approver_email_recipients": [ + "set", "string" ], - "uris": [ - "list", - "string" - ] - } - ] - ] - } - ] - ], - "subject_key_id": [ - "list", - [ - "object", - { - "key_id": "string" - } - ] - ], - "x509_config": [ - "list", - [ - "object", - { - "additional_extensions": [ - "list", - [ - "object", - { - "critical": "bool", - "object_id": [ + "approvers": [ "list", [ "object", { - "object_id_path": [ - "list", - "number" - ] - } - ] - ], - "value": "string" - } - ] - ], - "aia_ocsp_servers": [ - "list", - "string" - ], - "ca_options": [ - "list", - [ - "object", - { - "is_ca": "bool", - "max_issuer_path_length": "number", - "non_ca": "bool", - "zero_max_issuer_path_length": "bool" - } - ] - ], - "key_usage": [ - "list", - [ - "object", - { - "base_key_usage": [ - "list", - [ - "object", - { - "cert_sign": "bool", - "content_commitment": "bool", - "crl_sign": "bool", - "data_encipherment": "bool", - "decipher_only": "bool", - "digital_signature": "bool", - "encipher_only": "bool", - "key_agreement": "bool", - "key_encipherment": "bool" - } - ] - ], - "extended_key_usage": [ - "list", - [ - "object", - { - "client_auth": "bool", - "code_signing": "bool", - "email_protection": "bool", - "ocsp_signing": "bool", - "server_auth": "bool", - "time_stamping": "bool" - } - ] - ], - "unknown_extended_key_usages": [ - "list", - [ - "object", - { - "object_id_path": [ - "list", - "number" + "principals": [ + "set", + "string" ] } ] ] } ] - ], - "name_constraints": [ - "list", - [ - "object", - { - "critical": "bool", - "excluded_dns_names": [ - "list", - "string" - ], - "excluded_email_addresses": [ - "list", - "string" - ], - "excluded_ip_ranges": [ - "list", - "string" - ], - "excluded_uris": [ - "list", - "string" - ], - "permitted_dns_names": [ - "list", - "string" - ], - "permitted_email_addresses": [ - "list", - "string" - ], - "permitted_ip_ranges": [ - "list", - "string" - ], - "permitted_uris": [ - "list", - "string" - ] - } - ] - ], - "policy_ids": [ - "list", - [ - "object", - { - "object_id_path": [ - "list", - "number" - ] - } - ] ] } ] @@ -173498,155 +192996,97 @@ } ] ], - "description": "The config used to create a self-signed X.509 certificate or CSR.", + "description": "The approvals needed before access will be granted to a requester.\nNo approvals will be needed if this field is null. Different types of approval workflows that can be used to gate privileged access granting.", "description_kind": "plain", "computed": true }, "create_time": { "type": "string", - "description": "The time at which this CertificateAuthority was created.\n\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine\nfractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", - "description_kind": "plain", - "computed": true - }, - "deletion_protection": { - "type": "bool", - "description": "Whether Terraform will be prevented from destroying the CertificateAuthority.\nWhen the field is set to true or unset in Terraform state, a 'terraform apply'\nor 'terraform destroy' that would delete the CertificateAuthority will fail.\nWhen the field is set to false, deleting the CertificateAuthority is allowed.", - "description_kind": "plain", - "computed": true - }, - "desired_state": { - "type": "string", - "description": "Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA.", - "description_kind": "plain", - "computed": true - }, - "effective_labels": { - "type": [ - "map", - "string" - ], - "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", - "description_kind": "plain", - "computed": true - }, - "gcs_bucket": { - "type": "string", - "description": "The name of a Cloud Storage bucket where this CertificateAuthority will publish content,\nsuch as the CA certificate and CRLs. This must be a bucket name, without any prefixes\n(such as 'gs://') or suffixes (such as '.googleapis.com'). For example, to use a bucket named\nmy-bucket, you would simply specify 'my-bucket'. If not specified, a managed bucket will be\ncreated.", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "ignore_active_certificates_on_deletion": { - "type": "bool", - "description": "This field allows the CA to be deleted even if the CA has active certs. Active certs include both unrevoked and unexpired certs.\nUse with care. Defaults to 'false'.", + "description": "Output only. Create time stamp. A timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\"", "description_kind": "plain", "computed": true }, - "key_spec": { + "eligible_users": { "type": [ "list", [ "object", { - "algorithm": "string", - "cloud_kms_key_version": "string" + "principals": [ + "set", + "string" + ] } ] ], - "description": "Used when issuing certificates for this CertificateAuthority. If this CertificateAuthority\nis a self-signed CertificateAuthority, this key is also used to sign the self-signed CA\ncertificate. Otherwise, it is used to sign a CSR.", + "description": "Who can create Grants using Entitlement. This list should contain at most one entry", "description_kind": "plain", "computed": true }, - "labels": { - "type": [ - "map", - "string" - ], - "description": "Labels with user-defined metadata.\n\nAn object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\":\n\"1.3kg\", \"count\": \"3\" }.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", - "description_kind": "plain", - "computed": true - }, - "lifetime": { - "type": "string", - "description": "The desired lifetime of the CA certificate. Used to create the \"notBeforeTime\" and\n\"notAfterTime\" fields inside an X.509 certificate. A duration in seconds with up to nine\nfractional digits, terminated by 's'. Example: \"3.5s\".", - "description_kind": "plain", - "computed": true - }, - "location": { + "entitlement_id": { "type": "string", - "description": "Location of the CertificateAuthority. A full list of valid locations can be found by\nrunning 'gcloud privateca locations list'.", + "description": "The ID to use for this Entitlement. This will become the last part of the resource name.\nThis value should be 4-63 characters, and valid characters are \"[a-z]\", \"[0-9]\", and \"-\". The first character should be from [a-z].\nThis value should be unique among all other Entitlements under the specified 'parent'.", "description_kind": "plain", "optional": true }, - "name": { - "type": "string", - "description": "The resource name for this CertificateAuthority in the format\nprojects/*/locations/*/certificateAuthorities/*.", - "description_kind": "plain", - "computed": true - }, - "pem_ca_certificate": { + "etag": { "type": "string", - "description": "The signed CA certificate issued from the subordinated CA's CSR. This is needed when activating the subordiante CA with a third party issuer.", - "description_kind": "plain", - "computed": true - }, - "pem_ca_certificates": { - "type": [ - "list", - "string" - ], - "description": "This CertificateAuthority's certificate chain, including the current\nCertificateAuthority's certificate. Ordered such that the root issuer is the final\nelement (consistent with RFC 5246). For a self-signed CA, this will only list the current\nCertificateAuthority's certificate.", + "description": "For Resource freshness validation (https://google.aip.dev/154)", "description_kind": "plain", "computed": true }, - "pem_csr": { + "id": { "type": "string", "description_kind": "plain", + "optional": true, "computed": true }, - "pool": { + "location": { "type": "string", - "description": "The name of the CaPool this Certificate Authority belongs to.", + "description": "The region of the Entitlement resource.", "description_kind": "plain", "optional": true }, - "project": { + "max_request_duration": { "type": "string", + "description": "The maximum amount of time for which access would be granted for a request.\nA requester can choose to ask for access for less than this duration but never more.\nFormat: calculate the time in seconds and concatenate it with 's' i.e. 2 hours = \"7200s\", 45 minutes = \"2700s\"", "description_kind": "plain", - "optional": true + "computed": true }, - "skip_grace_period": { - "type": "bool", - "description": "If this flag is set, the Certificate Authority will be deleted as soon as\npossible without a 30-day grace period where undeletion would have been\nallowed. If you proceed, there will be no way to recover this CA.\nUse with care. Defaults to 'false'.", + "name": { + "type": "string", + "description": "Output Only. The entitlement's name follows a hierarchical structure, comprising the organization, folder, or project, alongside the region and a unique entitlement ID.\nFormats: organizations/{organization-number}/locations/{region}/entitlements/{entitlement-id}, folders/{folder-number}/locations/{region}/entitlements/{entitlement-id}, and projects/{project-id|project-number}/locations/{region}/entitlements/{entitlement-id}.", "description_kind": "plain", "computed": true }, - "state": { + "parent": { "type": "string", - "description": "The State for this CertificateAuthority.", + "description": "Format: projects/{project-id|project-number} or organizations/{organization-number} or folders/{folder-number}", "description_kind": "plain", - "computed": true + "optional": true }, - "subordinate_config": { + "privileged_access": { "type": [ "list", [ "object", { - "certificate_authority": "string", - "pem_issuer_chain": [ + "gcp_iam_access": [ "list", [ "object", { - "pem_certificates": [ + "resource": "string", + "resource_type": "string", + "role_bindings": [ "list", - "string" + [ + "object", + { + "condition_expression": "string", + "role": "string" + } + ] ] } ] @@ -173654,70 +193094,47 @@ } ] ], - "description": "If this is a subordinate CertificateAuthority, this field will be set\nwith the subordinate configuration, which describes its issuers.", + "description": "Privileged access that this service can be used to gate.", "description_kind": "plain", "computed": true }, - "terraform_labels": { + "requester_justification_config": { "type": [ - "map", - "string" + "list", + [ + "object", + { + "not_mandatory": [ + "list", + [ + "object", + {} + ] + ], + "unstructured": [ + "list", + [ + "object", + {} + ] + ] + } + ] ], - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description": "Defines the ways in which a requester should provide the justification while requesting for access.", "description_kind": "plain", "computed": true }, - "type": { + "state": { "type": "string", - "description": "The Type of this CertificateAuthority.\n\n~> **Note:** For 'SUBORDINATE' Certificate Authorities, they need to\nbe activated before they can issue certificates. Default value: \"SELF_SIGNED\" Possible values: [\"SELF_SIGNED\", \"SUBORDINATE\"]", + "description": "Output only. The current state of the Entitlement.", "description_kind": "plain", "computed": true }, "update_time": { "type": "string", - "description": "The time at which this CertificateAuthority was updated.\n\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine\nfractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", - "description_kind": "plain", - "computed": true - } - }, - "description_kind": "plain" - } - }, - "google_privateca_certificate_template_iam_policy": { - "version": 0, - "block": { - "attributes": { - "certificate_template": { - "type": "string", - "description_kind": "plain", - "required": true - }, - "etag": { - "type": "string", - "description_kind": "plain", - "computed": true - }, - "id": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "location": { - "type": "string", - "description_kind": "plain", - "optional": true, - "computed": true - }, - "policy_data": { - "type": "string", - "description_kind": "plain", - "computed": true - }, - "project": { - "type": "string", + "description": "Output only. Update time stamp. A timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".", "description_kind": "plain", - "optional": true, "computed": true } }, @@ -173800,9 +193217,12 @@ "description_kind": "plain", "optional": true }, - "skip_delete": { - "type": "bool", - "description": "If true, the Terraform resource can be deleted without deleting the Project via the Google API.", + "tags": { + "type": [ + "map", + "string" + ], + "description": "A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource.", "description_kind": "plain", "computed": true }, @@ -174114,6 +193534,7 @@ [ "object", { + "use_topic_schema": "bool", "write_metadata": "bool" } ] @@ -174124,6 +193545,7 @@ "filename_suffix": "string", "max_bytes": "number", "max_duration": "string", + "max_messages": "number", "service_account_email": "string", "state": "string" } @@ -174206,7 +193628,7 @@ }, "message_retention_duration": { "type": "string", - "description": "How long to retain unacknowledged messages in the subscription's\nbacklog, from the moment a message is published. If\nretain_acked_messages is true, then this also configures the retention\nof acknowledged messages, and thus configures how far back in time a\nsubscriptions.seek can be done. Defaults to 7 days. Cannot be more\nthan 7 days ('\"604800s\"') or less than 10 minutes ('\"600s\"').\n\nA duration in seconds with up to nine fractional digits, terminated\nby 's'. Example: '\"600.5s\"'.", + "description": "How long to retain unacknowledged messages in the subscription's\nbacklog, from the moment a message is published. If\nretain_acked_messages is true, then this also configures the retention\nof acknowledged messages, and thus configures how far back in time a\nsubscriptions.seek can be done. Defaults to 7 days. Cannot be more\nthan 31 days ('\"2678400s\"') or less than 10 minutes ('\"600s\"').\n\nA duration in seconds with up to nine fractional digits, terminated\nby 's'. Example: '\"600.5s\"'.", "description_kind": "plain", "computed": true }, @@ -174369,6 +193791,49 @@ "stream_arn": "string" } ] + ], + "cloud_storage": [ + "list", + [ + "object", + { + "avro_format": [ + "list", + [ + "object", + {} + ] + ], + "bucket": "string", + "match_glob": "string", + "minimum_object_create_time": "string", + "pubsub_avro_format": [ + "list", + [ + "object", + {} + ] + ], + "text_format": [ + "list", + [ + "object", + { + "delimiter": "string" + } + ] + ] + } + ] + ], + "platform_logs_settings": [ + "list", + [ + "object", + { + "severity": "string" + } + ] ] } ] @@ -174872,6 +194337,442 @@ "description_kind": "plain" } }, + "google_secret_manager_regional_secret": { + "version": 0, + "block": { + "attributes": { + "annotations": { + "type": [ + "map", + "string" + ], + "description": "Custom metadata about the regional secret.\n\nAnnotations are distinct from various forms of labels. Annotations exist to allow\nclient tools to store their own state information without requiring a database.\n\nAnnotation keys must be between 1 and 63 characters long, have a UTF-8 encoding of\nmaximum 128 bytes, begin and end with an alphanumeric character ([a-z0-9A-Z]), and\nmay have dashes (-), underscores (_), dots (.), and alphanumerics in between these\nsymbols.\n\nThe total size of annotation keys and values must be less than 16KiB.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.\n\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field 'effective_annotations' for all of the annotations present on the resource.", + "description_kind": "plain", + "computed": true + }, + "create_time": { + "type": "string", + "description": "The time at which the regional secret was created.", + "description_kind": "plain", + "computed": true + }, + "customer_managed_encryption": { + "type": [ + "list", + [ + "object", + { + "kms_key_name": "string" + } + ] + ], + "description": "The customer-managed encryption configuration of the regional secret.", + "description_kind": "plain", + "computed": true + }, + "effective_annotations": { + "type": [ + "map", + "string" + ], + "description": "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "effective_labels": { + "type": [ + "map", + "string" + ], + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + "description_kind": "plain", + "computed": true + }, + "expire_time": { + "type": "string", + "description": "Timestamp in UTC when the regional secret is scheduled to expire. This is always provided on\noutput, regardless of what was sent on input. A timestamp in RFC3339 UTC \"Zulu\" format, with\nnanosecond resolution and up to nine fractional digits. Examples: \"2014-10-02T15:01:23Z\" and\n\"2014-10-02T15:01:23.045123456Z\". Only one of 'expire_time' or 'ttl' can be provided.", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "labels": { + "type": [ + "map", + "string" + ], + "description": "The labels assigned to this regional secret.\n\nLabel keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes,\nand must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}][\\p{Ll}\\p{Lo}\\p{N}_-]{0,62}\n\nLabel values must be between 0 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes,\nand must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}\n\nNo more than 64 labels can be assigned to a given resource.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.", + "description_kind": "plain", + "computed": true + }, + "location": { + "type": "string", + "description": "The location of the regional secret. eg us-central1", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "The resource name of the regional secret. Format:\n'projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}'", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "rotation": { + "type": [ + "list", + [ + "object", + { + "next_rotation_time": "string", + "rotation_period": "string" + } + ] + ], + "description": "The rotation time and period for a regional secret. At 'next_rotation_time', Secret Manager\nwill send a Pub/Sub notification to the topics configured on the Secret. 'topics' must be\nset to configure rotation.", + "description_kind": "plain", + "computed": true + }, + "secret_id": { + "type": "string", + "description": "This must be unique within the project.", + "description_kind": "plain", + "required": true + }, + "terraform_labels": { + "type": [ + "map", + "string" + ], + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.", + "description_kind": "plain", + "computed": true + }, + "topics": { + "type": [ + "list", + [ + "object", + { + "name": "string" + } + ] + ], + "description": "A list of up to 10 Pub/Sub topics to which messages are published when control plane\noperations are called on the regional secret or its versions.", + "description_kind": "plain", + "computed": true + }, + "ttl": { + "type": "string", + "description": "The TTL for the regional secret. A duration in seconds with up to nine fractional digits,\nterminated by 's'. Example: \"3.5s\". Only one of 'ttl' or 'expire_time' can be provided.", + "description_kind": "plain", + "computed": true + }, + "version_aliases": { + "type": [ + "map", + "string" + ], + "description": "Mapping from version alias to version name.\n\nA version alias is a string with a maximum length of 63 characters and can contain\nuppercase and lowercase letters, numerals, and the hyphen (-) and underscore ('_')\ncharacters. An alias string must start with a letter and cannot be the string\n'latest' or 'NEW'. No more than 50 aliases can be assigned to a given secret.\n\nAn object containing a list of \"key\": value pairs. Example:\n{ \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + "description_kind": "plain", + "computed": true + }, + "version_destroy_ttl": { + "type": "string", + "description": "Secret Version TTL after destruction request.\nThis is a part of the delayed delete feature on Secret Version.\nFor secret with versionDestroyTtl>0, version destruction doesn't happen immediately\non calling destroy instead the version goes to a disabled state and\nthe actual destruction happens after this TTL expires. It must be atleast 24h.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_secret_manager_regional_secret_iam_policy": { + "version": 0, + "block": { + "attributes": { + "etag": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "policy_data": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "secret_id": { + "type": "string", + "description_kind": "plain", + "required": true + } + }, + "description_kind": "plain" + } + }, + "google_secret_manager_regional_secret_version": { + "version": 0, + "block": { + "attributes": { + "create_time": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "customer_managed_encryption": { + "type": [ + "list", + [ + "object", + { + "kms_key_version_name": "string" + } + ] + ], + "description_kind": "plain", + "computed": true + }, + "destroy_time": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "enabled": { + "type": "bool", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "is_secret_data_base64": { + "type": "bool", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "secret": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "secret_data": { + "type": "string", + "description_kind": "plain", + "computed": true, + "sensitive": true + }, + "version": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_secret_manager_regional_secret_version_access": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "is_secret_data_base64": { + "type": "bool", + "description_kind": "plain", + "optional": true + }, + "location": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "name": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "secret": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "secret_data": { + "type": "string", + "description_kind": "plain", + "computed": true, + "sensitive": true + }, + "version": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + } + }, + "description_kind": "plain" + } + }, + "google_secret_manager_regional_secrets": { + "version": 0, + "block": { + "attributes": { + "filter": { + "type": "string", + "description": "Filter string, adhering to the rules in List-operation filtering (https://cloud.google.com/secret-manager/docs/filtering).\nList only secrets matching the filter. If filter is empty, all regional secrets are listed from the specified location.", + "description_kind": "plain", + "optional": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "location": { + "type": "string", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "secrets": { + "type": [ + "list", + [ + "object", + { + "annotations": [ + "map", + "string" + ], + "create_time": "string", + "customer_managed_encryption": [ + "list", + [ + "object", + { + "kms_key_name": "string" + } + ] + ], + "effective_annotations": [ + "map", + "string" + ], + "effective_labels": [ + "map", + "string" + ], + "expire_time": "string", + "labels": [ + "map", + "string" + ], + "location": "string", + "name": "string", + "project": "string", + "rotation": [ + "list", + [ + "object", + { + "next_rotation_time": "string", + "rotation_period": "string" + } + ] + ], + "secret_id": "string", + "terraform_labels": [ + "map", + "string" + ], + "topics": [ + "list", + [ + "object", + { + "name": "string" + } + ] + ], + "ttl": "string", + "version_aliases": [ + "map", + "string" + ], + "version_destroy_ttl": "string" + } + ] + ], + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, "google_secret_manager_secret": { "version": 0, "block": { @@ -175126,6 +195027,11 @@ "optional": true, "computed": true }, + "is_secret_data_base64": { + "type": "bool", + "description_kind": "plain", + "optional": true + }, "name": { "type": "string", "description_kind": "plain", @@ -175168,6 +195074,11 @@ "optional": true, "computed": true }, + "is_secret_data_base64": { + "type": "bool", + "description_kind": "plain", + "optional": true + }, "name": { "type": "string", "description_kind": "plain", @@ -175430,6 +195341,11 @@ "description_kind": "plain", "required": true }, + "disabled": { + "type": "bool", + "description_kind": "plain", + "computed": true + }, "display_name": { "type": "string", "description_kind": "plain", @@ -175672,6 +195588,44 @@ "description_kind": "plain" } }, + "google_service_accounts": { + "version": 0, + "block": { + "attributes": { + "accounts": { + "type": [ + "list", + [ + "object", + { + "account_id": "string", + "disabled": "bool", + "display_name": "string", + "email": "string", + "member": "string", + "name": "string", + "unique_id": "string" + } + ] + ], + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + }, "google_service_networking_peered_dns_domain": { "version": 0, "block": { @@ -175773,6 +195727,12 @@ "version": 0, "block": { "attributes": { + "create_ignore_already_exists": { + "type": "bool", + "description": "If set to true, skip repository creation if a repository with the same name already exists.", + "description_kind": "plain", + "computed": true + }, "id": { "type": "string", "description_kind": "plain", @@ -175857,6 +195817,94 @@ "description_kind": "plain" } }, + "google_spanner_database": { + "version": 0, + "block": { + "attributes": { + "database_dialect": { + "type": "string", + "description": "The dialect of the Cloud Spanner Database.\nIf it is not provided, \"GOOGLE_STANDARD_SQL\" will be used. Possible values: [\"GOOGLE_STANDARD_SQL\", \"POSTGRESQL\"]", + "description_kind": "plain", + "computed": true + }, + "ddl": { + "type": [ + "list", + "string" + ], + "description": "An optional list of DDL statements to run inside the newly created\ndatabase. Statements can create tables, indexes, etc. These statements\nexecute atomically with the creation of the database: if there is an\nerror in any statement, the database is not created.", + "description_kind": "plain", + "computed": true + }, + "deletion_protection": { + "type": "bool", + "description": "Whether Terraform will be prevented from destroying the database. Defaults to true.\nWhen a'terraform destroy' or 'terraform apply' would delete the database,\nthe command will fail if this field is not set to false in Terraform state.\nWhen the field is set to true or unset in Terraform state, a 'terraform apply'\nor 'terraform destroy' that would delete the database will fail.\nWhen the field is set to false, deleting the database is allowed.", + "description_kind": "plain", + "computed": true + }, + "enable_drop_protection": { + "type": "bool", + "description": "Whether drop protection is enabled for this database. Defaults to false.\nDrop protection is different from\nthe \"deletion_protection\" attribute in the following ways:\n(1) \"deletion_protection\" only protects the database from deletions in Terraform.\nwhereas setting “enableDropProtection” to true protects the database from deletions in all interfaces.\n(2) Setting \"enableDropProtection\" to true also prevents the deletion of the parent instance containing the database.\n\"deletion_protection\" attribute does not provide protection against the deletion of the parent instance.", + "description_kind": "plain", + "computed": true + }, + "encryption_config": { + "type": [ + "list", + [ + "object", + { + "kms_key_name": "string", + "kms_key_names": [ + "list", + "string" + ] + } + ] + ], + "description": "Encryption configuration for the database", + "description_kind": "plain", + "computed": true + }, + "id": { + "type": "string", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "instance": { + "type": "string", + "description": "The instance to create the database on.", + "description_kind": "plain", + "required": true + }, + "name": { + "type": "string", + "description": "A unique identifier for the database, which cannot be changed after the\ninstance is created. Values are of the form '[a-z][-_a-z0-9]*[a-z0-9]'.", + "description_kind": "plain", + "required": true + }, + "project": { + "type": "string", + "description_kind": "plain", + "optional": true + }, + "state": { + "type": "string", + "description": "An explanation of the status of the database.", + "description_kind": "plain", + "computed": true + }, + "version_retention_period": { + "type": "string", + "description": "The retention period for the database. The retention period must be between 1 hour\nand 7 days, and can be specified in days, hours, minutes, or seconds. For example,\nthe values 1d, 24h, 1440m, and 86400s are equivalent. Default value is 1h.\nIf this property is used, you must avoid adding new DDL statements to 'ddl' that\nupdate the database's version_retention_period.", + "description_kind": "plain", + "computed": true + } + }, + "description_kind": "plain" + } + }, "google_spanner_database_iam_policy": { "version": 0, "block": { @@ -175907,6 +195955,41 @@ [ "object", { + "asymmetric_autoscaling_options": [ + "list", + [ + "object", + { + "overrides": [ + "list", + [ + "object", + { + "autoscaling_limits": [ + "list", + [ + "object", + { + "max_nodes": "number", + "min_nodes": "number" + } + ] + ] + } + ] + ], + "replica_selection": [ + "list", + [ + "object", + { + "location": "string" + } + ] + ] + } + ] + ], "autoscaling_limits": [ "list", [ @@ -175954,6 +196037,12 @@ "description_kind": "plain", "optional": true }, + "edition": { + "type": "string", + "description": "The edition selected for this instance. Different editions provide different capabilities at different price points. Possible values: [\"EDITION_UNSPECIFIED\", \"STANDARD\", \"ENTERPRISE\", \"ENTERPRISE_PLUS\"]", + "description_kind": "plain", + "computed": true + }, "effective_labels": { "type": [ "map", @@ -176262,7 +196351,7 @@ }, "database_version": { "type": "string", - "description": "The MySQL, PostgreSQL or SQL Server (beta) version to use. Supported values include MYSQL_5_6, MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6, POSTGRES_10, POSTGRES_11, POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. Database Version Policies includes an up-to-date reference of supported versions.", + "description": "The MySQL, PostgreSQL or SQL Server (beta) version to use. Supported values include MYSQL_5_6, MYSQL_5_7, MYSQL_8_0, MYSQL_8_4, POSTGRES_9_6, POSTGRES_10, POSTGRES_11, POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, POSTGRES_16, POSTGRES_17, SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. Database Version Policies includes an up-to-date reference of supported versions.", "description_kind": "plain", "computed": true }, @@ -176371,6 +196460,7 @@ "object", { "ca_certificate": "string", + "cascadable_replica": "bool", "client_certificate": "string", "client_key": "string", "connect_retry_interval": "number", @@ -176388,6 +196478,15 @@ "description_kind": "plain", "computed": true }, + "replica_names": { + "type": [ + "list", + "string" + ], + "description": "The replicas of the instance.", + "description_kind": "plain", + "computed": true + }, "restore_backup_context": { "type": [ "list", @@ -176570,12 +196669,22 @@ "set", "string" ], + "psc_auto_connections": [ + "list", + [ + "object", + { + "consumer_network": "string", + "consumer_service_project_id": "string" + } + ] + ], "psc_enabled": "bool" } ] ], - "require_ssl": "bool", "server_ca_mode": "string", + "server_ca_pool": "string", "ssl_mode": "string" } ] @@ -176751,6 +196860,7 @@ "object", { "ca_certificate": "string", + "cascadable_replica": "bool", "client_certificate": "string", "client_key": "string", "connect_retry_interval": "number", @@ -176764,6 +196874,10 @@ } ] ], + "replica_names": [ + "list", + "string" + ], "restore_backup_context": [ "list", [ @@ -176922,12 +197036,22 @@ "set", "string" ], + "psc_auto_connections": [ + "list", + [ + "object", + { + "consumer_network": "string", + "consumer_service_project_id": "string" + } + ] + ], "psc_enabled": "bool" } ] ], - "require_ssl": "bool", "server_ca_mode": "string", + "server_ca_pool": "string", "ssl_mode": "string" } ] @@ -177116,7 +197240,7 @@ } }, "google_storage_bucket": { - "version": 2, + "version": 3, "block": { "attributes": { "autoclass": { @@ -177218,6 +197342,20 @@ "description_kind": "plain", "computed": true }, + "hierarchical_namespace": { + "type": [ + "list", + [ + "object", + { + "enabled": "bool" + } + ] + ], + "description": "The bucket's HNS configuration, which defines bucket can organize folders in logical file system structure.", + "description_kind": "plain", + "computed": true + }, "id": { "type": "string", "description_kind": "plain", @@ -177271,7 +197409,6 @@ "list", "string" ], - "no_age": "bool", "noncurrent_time_before": "string", "num_newer_versions": "number", "send_age_if_zero": "bool", @@ -178528,6 +198665,63 @@ "version": 0, "block": { "attributes": { + "autoscaling_settings": { + "type": [ + "list", + [ + "object", + { + "autoscaling_policies": [ + "set", + [ + "object", + { + "autoscale_policy_id": "string", + "consumed_memory_thresholds": [ + "list", + [ + "object", + { + "scale_in": "number", + "scale_out": "number" + } + ] + ], + "cpu_thresholds": [ + "list", + [ + "object", + { + "scale_in": "number", + "scale_out": "number" + } + ] + ], + "node_type_id": "string", + "scale_out_size": "number", + "storage_thresholds": [ + "list", + [ + "object", + { + "scale_in": "number", + "scale_out": "number" + } + ] + ] + } + ] + ], + "cool_down_period": "string", + "max_cluster_node_count": "number", + "min_cluster_node_count": "number" + } + ] + ], + "description": "Configuration of the autoscaling applied to this cluster", + "description_kind": "plain", + "computed": true + }, "id": { "type": "string", "description_kind": "plain", @@ -179139,6 +199333,58 @@ [ "object", { + "autoscaling_settings": [ + "list", + [ + "object", + { + "autoscaling_policies": [ + "set", + [ + "object", + { + "autoscale_policy_id": "string", + "consumed_memory_thresholds": [ + "list", + [ + "object", + { + "scale_in": "number", + "scale_out": "number" + } + ] + ], + "cpu_thresholds": [ + "list", + [ + "object", + { + "scale_in": "number", + "scale_out": "number" + } + ] + ], + "node_type_id": "string", + "scale_out_size": "number", + "storage_thresholds": [ + "list", + [ + "object", + { + "scale_in": "number", + "scale_out": "number" + } + ] + ] + } + ] + ], + "cool_down_period": "string", + "max_cluster_node_count": "number", + "min_cluster_node_count": "number" + } + ] + ], "cluster_id": "string", "node_type_configs": [ "set", @@ -179431,7 +199677,7 @@ }, "max_throughput": { "type": "number", - "description": "Maximum throughput of the connector in Mbps, must be greater than 'min_throughput'. Default is 300. Refers to the expected throughput\nwhen using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by\nmin_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of\nmax_throughput is discouraged in favor of max_instances.", + "description": "Maximum throughput of the connector in Mbps, must be greater than 'min_throughput'. Default is 300. Refers to the expected throughput\nwhen using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by\nmin_throughput. Only one of 'max_throughput' and 'max_instances' can be specified. The use of max_throughput is discouraged in favor of max_instances.", "description_kind": "plain", "computed": true }, @@ -179443,7 +199689,7 @@ }, "min_throughput": { "type": "number", - "description": "Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type.\nValue must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and\nmin_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances.", + "description": "Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type.\nValue must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput.\nOnly one of 'min_throughput' and 'min_instances' can be specified. The use of min_throughput is discouraged in favor of min_instances.", "description_kind": "plain", "computed": true }, diff --git a/examples-generated/apigee/v1beta1/envgroupattachment.yaml b/examples-generated/apigee/v1beta1/envgroupattachment.yaml index 128a80964..19e3e62be 100644 --- a/examples-generated/apigee/v1beta1/envgroupattachment.yaml +++ b/examples-generated/apigee/v1beta1/envgroupattachment.yaml @@ -116,6 +116,7 @@ metadata: spec: forProvider: billingAccount: "" + deletionPolicy: DELETE name: my-project orgId: "" projectId: my-project diff --git a/examples-generated/apigee/v1beta1/instanceattachment.yaml b/examples-generated/apigee/v1beta1/instanceattachment.yaml index 2a2e3cfb3..8512918a3 100644 --- a/examples-generated/apigee/v1beta1/instanceattachment.yaml +++ b/examples-generated/apigee/v1beta1/instanceattachment.yaml @@ -115,6 +115,7 @@ metadata: spec: forProvider: billingAccount: "" + deletionPolicy: DELETE name: my-project orgId: "" projectId: my-project diff --git a/examples-generated/apigee/v1beta1/syncauthorization.yaml b/examples-generated/apigee/v1beta1/syncauthorization.yaml index b8506a8f4..2b4791a6c 100644 --- a/examples-generated/apigee/v1beta1/syncauthorization.yaml +++ b/examples-generated/apigee/v1beta1/syncauthorization.yaml @@ -43,6 +43,7 @@ metadata: spec: forProvider: billingAccount: 000000-0000000-0000000-000000 + deletionPolicy: DELETE name: my-project orgId: "123456789" projectId: my-project diff --git a/examples-generated/appengine/v1beta1/firewallrule.yaml b/examples-generated/appengine/v1beta1/firewallrule.yaml index 4dd6bd337..a56fa2eac 100644 --- a/examples-generated/appengine/v1beta1/firewallrule.yaml +++ b/examples-generated/appengine/v1beta1/firewallrule.yaml @@ -45,6 +45,7 @@ metadata: spec: forProvider: billingAccount: 000000-0000000-0000000-000000 + deletionPolicy: DELETE name: tf-test-project orgId: "123456789" projectId: ae-project diff --git a/examples-generated/bigquery/v1beta2/job.yaml b/examples-generated/bigquery/v1beta2/job.yaml index ad0ddfeeb..10d43d640 100644 --- a/examples-generated/bigquery/v1beta2/job.yaml +++ b/examples-generated/bigquery/v1beta2/job.yaml @@ -57,4 +57,3 @@ spec: datasetIdSelector: matchLabels: testing.upbound.io/example-name: example - deletionProtection: false diff --git a/examples-generated/bigtable/v1beta1/table.yaml b/examples-generated/bigtable/v1beta1/table.yaml index f3ea9bc41..e07d83aa5 100644 --- a/examples-generated/bigtable/v1beta1/table.yaml +++ b/examples-generated/bigtable/v1beta1/table.yaml @@ -15,6 +15,11 @@ spec: columnFamily: - family: family-first - family: family-second + type: intsum + - family: family-third + type: " {\n\t\t\t\t\t\"aggregateType\": {\n\t\t\t\t\t\t\"max\": {},\n\t\t\t\t\t\t\"inputType\": + {\n\t\t\t\t\t\t\t\"int64Type\": {\n\t\t\t\t\t\t\t\t\"encoding\": {\n\t\t\t\t\t\t\t\t\t\"bigEndianBytes\": + {}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n" instanceNameSelector: matchLabels: testing.upbound.io/example-name: instance diff --git a/examples-generated/bigtable/v1beta2/appprofile.yaml b/examples-generated/bigtable/v1beta2/appprofile.yaml index 8e41f6fd7..d23aa355a 100644 --- a/examples-generated/bigtable/v1beta2/appprofile.yaml +++ b/examples-generated/bigtable/v1beta2/appprofile.yaml @@ -39,4 +39,3 @@ spec: numNodes: 3 storageType: HDD zone: us-central1-c - deletionProtection: "true" diff --git a/examples-generated/compute/v1beta1/firewallpolicyassociation.yaml b/examples-generated/compute/v1beta1/firewallpolicyassociation.yaml index f8710e2c1..a34755841 100644 --- a/examples-generated/compute/v1beta1/firewallpolicyassociation.yaml +++ b/examples-generated/compute/v1beta1/firewallpolicyassociation.yaml @@ -13,7 +13,7 @@ spec: testing.upbound.io/example-name: folder firewallPolicySelector: matchLabels: - testing.upbound.io/example-name: default + testing.upbound.io/example-name: policy name: my-association --- @@ -24,10 +24,27 @@ metadata: annotations: meta.upbound.io/example-id: compute/v1beta1/firewallpolicyassociation labels: - testing.upbound.io/example-name: default - name: default + testing.upbound.io/example-name: policy + name: policy spec: forProvider: description: Example Resource - parent: organizations/12345 + parent: organizations/123456789 shortName: my-policy + +--- + +apiVersion: cloudplatform.gcp.upbound.io/v1beta1 +kind: Folder +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta1/firewallpolicyassociation + labels: + testing.upbound.io/example-name: folder + name: folder +spec: + forProvider: + displayName: my-folder + parentSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/compute/v1beta1/instancegroupnamedport.yaml b/examples-generated/compute/v1beta1/instancegroupnamedport.yaml index 9cc90970a..052e16d70 100644 --- a/examples-generated/compute/v1beta1/instancegroupnamedport.yaml +++ b/examples-generated/compute/v1beta1/instancegroupnamedport.yaml @@ -57,7 +57,6 @@ metadata: name: my-cluster spec: forProvider: - deletionProtection: "true" initialNodeCount: 1 ipAllocationPolicy: - clusterIpv4CidrBlock: /19 diff --git a/examples-generated/compute/v1beta1/networkfirewallpolicyassociation.yaml b/examples-generated/compute/v1beta1/networkfirewallpolicyassociation.yaml index 5482185fb..9eb07a00c 100644 --- a/examples-generated/compute/v1beta1/networkfirewallpolicyassociation.yaml +++ b/examples-generated/compute/v1beta1/networkfirewallpolicyassociation.yaml @@ -4,8 +4,8 @@ metadata: annotations: meta.upbound.io/example-id: compute/v1beta1/networkfirewallpolicyassociation labels: - testing.upbound.io/example-name: primary - name: primary + testing.upbound.io/example-name: default + name: default spec: forProvider: attachmentTargetSelector: @@ -13,7 +13,7 @@ spec: testing.upbound.io/example-name: network firewallPolicySelector: matchLabels: - testing.upbound.io/example-name: network_firewall_policy + testing.upbound.io/example-name: policy project: my-project-name --- @@ -27,7 +27,8 @@ metadata: testing.upbound.io/example-name: network name: network spec: - forProvider: {} + forProvider: + autoCreateSubnetworks: false --- @@ -37,8 +38,8 @@ metadata: annotations: meta.upbound.io/example-id: compute/v1beta1/networkfirewallpolicyassociation labels: - testing.upbound.io/example-name: network_firewall_policy - name: network-firewall-policy + testing.upbound.io/example-name: policy + name: policy spec: forProvider: description: Sample global network firewall policy diff --git a/examples-generated/compute/v1beta1/regionnetworkfirewallpolicyassociation.yaml b/examples-generated/compute/v1beta1/regionnetworkfirewallpolicyassociation.yaml index cb4f604d5..cf23edfca 100644 --- a/examples-generated/compute/v1beta1/regionnetworkfirewallpolicyassociation.yaml +++ b/examples-generated/compute/v1beta1/regionnetworkfirewallpolicyassociation.yaml @@ -4,16 +4,16 @@ metadata: annotations: meta.upbound.io/example-id: compute/v1beta1/regionnetworkfirewallpolicyassociation labels: - testing.upbound.io/example-name: primary - name: primary + testing.upbound.io/example-name: default + name: default spec: forProvider: attachmentTargetSelector: matchLabels: - testing.upbound.io/example-name: basic_network + testing.upbound.io/example-name: network firewallPolicySelector: matchLabels: - testing.upbound.io/example-name: basic_regional_network_firewall_policy + testing.upbound.io/example-name: policy project: my-project-name region: us-west1 @@ -25,10 +25,11 @@ metadata: annotations: meta.upbound.io/example-id: compute/v1beta1/regionnetworkfirewallpolicyassociation labels: - testing.upbound.io/example-name: basic_network - name: basic-network + testing.upbound.io/example-name: network + name: network spec: - forProvider: {} + forProvider: + autoCreateSubnetworks: false --- @@ -38,8 +39,8 @@ metadata: annotations: meta.upbound.io/example-id: compute/v1beta1/regionnetworkfirewallpolicyassociation labels: - testing.upbound.io/example-name: basic_regional_network_firewall_policy - name: basic-regional-network-firewall-policy + testing.upbound.io/example-name: policy + name: policy spec: forProvider: description: Sample global network firewall policy diff --git a/examples-generated/compute/v1beta2/firewallpolicyrule.yaml b/examples-generated/compute/v1beta2/firewallpolicyrule.yaml index 12e50c691..d0a516d15 100644 --- a/examples-generated/compute/v1beta2/firewallpolicyrule.yaml +++ b/examples-generated/compute/v1beta2/firewallpolicyrule.yaml @@ -4,8 +4,8 @@ metadata: annotations: meta.upbound.io/example-id: compute/v1beta2/firewallpolicyrule labels: - testing.upbound.io/example-name: primary - name: primary + testing.upbound.io/example-name: policy_rule + name: policy-rule spec: forProvider: action: allow @@ -66,7 +66,7 @@ metadata: name: folder spec: forProvider: - displayName: policy + displayName: folder parentSelector: matchLabels: testing.upbound.io/example-name: example diff --git a/examples-generated/compute/v1beta2/image.yaml b/examples-generated/compute/v1beta2/image.yaml index cbc3c2a26..6cee6ba2c 100644 --- a/examples-generated/compute/v1beta2/image.yaml +++ b/examples-generated/compute/v1beta2/image.yaml @@ -8,5 +8,23 @@ metadata: name: example spec: forProvider: - rawDisk: - - source: https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz + sourceDiskSelector: + matchLabels: + testing.upbound.io/example-name: persistent + +--- + +apiVersion: compute.gcp.upbound.io/v1beta2 +kind: Disk +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/image + labels: + testing.upbound.io/example-name: persistent + name: persistent +spec: + forProvider: + image: ${data.google_compute_image.debian.self_link} + size: 10 + type: pd-ssd + zone: us-central1-a diff --git a/examples-generated/containeraws/v1beta2/nodepool.yaml b/examples-generated/containeraws/v1beta2/nodepool.yaml index 569d7ba3e..d7a0ce0e8 100644 --- a/examples-generated/containeraws/v1beta2/nodepool.yaml +++ b/examples-generated/containeraws/v1beta2/nodepool.yaml @@ -41,6 +41,11 @@ spec: - effect: PREFER_NO_SCHEDULE key: taint-key value: taint-value + kubeletConfig: + - cpuCfsQuota: true + cpuCfsQuotaPeriod: 100ms + cpuManagerPolicy: none + podPidsLimit: 1024 location: us-west1 management: - autoRepair: true diff --git a/examples-generated/datastore/v1beta1/index.yaml b/examples-generated/datastore/v1beta1/index.yaml deleted file mode 100644 index a5cc848cd..000000000 --- a/examples-generated/datastore/v1beta1/index.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: datastore.gcp.upbound.io/v1beta1 -kind: Index -metadata: - annotations: - meta.upbound.io/example-id: datastore/v1beta1/index - labels: - testing.upbound.io/example-name: default - name: default -spec: - forProvider: - kind: foo - properties: - - direction: ASCENDING - name: property_a - - direction: ASCENDING - name: property_b diff --git a/examples-generated/dialogflowcx/v1beta2/agent.yaml b/examples-generated/dialogflowcx/v1beta2/agent.yaml index fb82ef14f..78f579c87 100644 --- a/examples-generated/dialogflowcx/v1beta2/agent.yaml +++ b/examples-generated/dialogflowcx/v1beta2/agent.yaml @@ -15,6 +15,18 @@ spec: - enabled: true finishDigit: '#' maxDigits: 1 + loggingSettings: + - enableConsentBasedRedaction: true + enableInteractionLogging: true + enableStackdriverLogging: true + speechSettings: + - endpointerSensitivity: 30 + models: + count: "3" + mass: 1.3kg + name: wrench + noSpeechTimeout: 3.500s + useTimeoutBasedEndpointing: true avatarUri: https://cloud.google.com/_static/images/cloud/icons/favicons/onecloud/super_cloud.png defaultLanguageCode: en description: Example description. diff --git a/examples-generated/dns/v1beta1/responsepolicy.yaml b/examples-generated/dns/v1beta1/responsepolicy.yaml index b9b8dccbf..24030fdb5 100644 --- a/examples-generated/dns/v1beta1/responsepolicy.yaml +++ b/examples-generated/dns/v1beta1/responsepolicy.yaml @@ -86,7 +86,6 @@ spec: forProvider: defaultSnatStatus: - disabled: true - deletionProtection: "true" initialNodeCount: 1 ipAllocationPolicy: - clusterSecondaryRangeName: pod diff --git a/examples-generated/gke/v1beta2/backupbackupplan.yaml b/examples-generated/gke/v1beta2/backupbackupplan.yaml index c90932afd..e279acfaf 100644 --- a/examples-generated/gke/v1beta2/backupbackupplan.yaml +++ b/examples-generated/gke/v1beta2/backupbackupplan.yaml @@ -32,7 +32,6 @@ spec: addonsConfig: - gkeBackupAgentConfig: - enabled: true - deletionProtection: "true" initialNodeCount: 1 location: us-central1 networkSelector: diff --git a/examples-generated/gkehub/v1beta2/membership.yaml b/examples-generated/gkehub/v1beta2/membership.yaml index f248ee95a..430551ae6 100644 --- a/examples-generated/gkehub/v1beta2/membership.yaml +++ b/examples-generated/gkehub/v1beta2/membership.yaml @@ -27,7 +27,6 @@ metadata: name: primary spec: forProvider: - deletionProtection: false initialNodeCount: 1 location: us-central1-a networkSelector: diff --git a/examples-generated/identityplatform/v1beta1/config.yaml b/examples-generated/identityplatform/v1beta1/config.yaml new file mode 100644 index 000000000..94e143108 --- /dev/null +++ b/examples-generated/identityplatform/v1beta1/config.yaml @@ -0,0 +1,84 @@ +apiVersion: identityplatform.gcp.upbound.io/v1beta1 +kind: Config +metadata: + annotations: + meta.upbound.io/example-id: identityplatform/v1beta1/config + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + authorizedDomains: + - localhost + - my-project.firebaseapp.com + - my-project.web.app + autodeleteAnonymousUsers: true + blockingFunctions: + - forwardInboundCredentials: + - accessToken: true + idToken: true + refreshToken: true + triggers: + - eventType: beforeSignIn + functionUri: https://us-east1-my-project.cloudfunctions.net/before-sign-in + projectSelector: + matchLabels: + testing.upbound.io/example-name: default + quota: + - signUpQuotaConfig: + - quota: 1000 + quotaDuration: 7200s + startTime: "2014-10-02T15:01:23Z" + signIn: + - allowDuplicateEmails: true + anonymous: + - enabled: true + email: + - enabled: true + passwordRequired: false + phoneNumber: + - enabled: true + testPhoneNumbers: + "+11231231234": "000000" + smsRegionConfig: + - allowlistOnly: + - allowedRegions: + - US + - CA + +--- + +apiVersion: cloudplatform.gcp.upbound.io/v1beta1 +kind: Project +metadata: + annotations: + meta.upbound.io/example-id: identityplatform/v1beta1/config + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + billingAccount: 000000-0000000-0000000-000000 + deletionPolicy: DELETE + labels: + firebase: enabled + name: my-project + orgId: "123456789" + projectId: my-project + +--- + +apiVersion: cloudplatform.gcp.upbound.io/v1beta1 +kind: ProjectService +metadata: + annotations: + meta.upbound.io/example-id: identityplatform/v1beta1/config + labels: + testing.upbound.io/example-name: identitytoolkit + name: identitytoolkit +spec: + forProvider: + projectSelector: + matchLabels: + testing.upbound.io/example-name: default + service: identitytoolkit.googleapis.com diff --git a/examples-generated/identityplatform/v1beta2/projectdefaultconfig.yaml b/examples-generated/identityplatform/v1beta2/projectdefaultconfig.yaml deleted file mode 100644 index 1f5964032..000000000 --- a/examples-generated/identityplatform/v1beta2/projectdefaultconfig.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: identityplatform.gcp.upbound.io/v1beta2 -kind: ProjectDefaultConfig -metadata: - annotations: - meta.upbound.io/example-id: identityplatform/v1beta2/projectdefaultconfig - labels: - testing.upbound.io/example-name: default - name: default -spec: - forProvider: - signIn: - - allowDuplicateEmails: true - anonymous: - - enabled: true - email: - - enabled: true - passwordRequired: false - phoneNumber: - - enabled: true - testPhoneNumbers: - "+11231231234": "000000" diff --git a/examples-generated/networkconnectivity/v1beta2/spoke.yaml b/examples-generated/networkconnectivity/v1beta2/spoke.yaml index 8d9c1330f..ad1c663f9 100644 --- a/examples-generated/networkconnectivity/v1beta2/spoke.yaml +++ b/examples-generated/networkconnectivity/v1beta2/spoke.yaml @@ -18,6 +18,9 @@ spec: - excludeExportRanges: - 198.51.100.0/24 - 10.10.0.0/16 + includeExportRanges: + - 198.51.100.0/23 + - 10.0.0.0/8 uriSelector: matchLabels: testing.upbound.io/example-name: network diff --git a/examples-generated/orgpolicy/v1beta1/policy.yaml b/examples-generated/orgpolicy/v1beta1/policy.yaml index a9032b124..b503d9ac6 100644 --- a/examples-generated/orgpolicy/v1beta1/policy.yaml +++ b/examples-generated/orgpolicy/v1beta1/policy.yaml @@ -27,6 +27,7 @@ metadata: name: basic spec: forProvider: + deletionPolicy: DELETE name: id orgId: "123456789" projectId: id diff --git a/examples-generated/privateca/v1beta2/certificate.yaml b/examples-generated/privateca/v1beta2/certificate.yaml index 4efe1254b..31502727c 100644 --- a/examples-generated/privateca/v1beta2/certificate.yaml +++ b/examples-generated/privateca/v1beta2/certificate.yaml @@ -111,7 +111,6 @@ spec: crlSign: true extendedKeyUsage: - serverAuth: true - deletionProtection: false ignoreActiveCertificatesOnDeletion: true keySpec: - algorithm: RSA_PKCS1_4096_SHA256 diff --git a/examples-generated/privateca/v1beta2/certificateauthority.yaml b/examples-generated/privateca/v1beta2/certificateauthority.yaml index 025675d3d..cf1ea4169 100644 --- a/examples-generated/privateca/v1beta2/certificateauthority.yaml +++ b/examples-generated/privateca/v1beta2/certificateauthority.yaml @@ -12,34 +12,19 @@ spec: - subjectConfig: - subject: - commonName: my-certificate-authority - organization: HashiCorp - subjectAltName: - - dnsNames: - - hashicorp.com + organization: ACME x509Config: - caOptions: - isCa: true - maxIssuerPathLength: 10 keyUsage: - baseKeyUsage: - certSign: true - contentCommitment: true crlSign: true - dataEncipherment: true - decipherOnly: true - digitalSignature: true - keyAgreement: true - keyEncipherment: false extendedKeyUsage: - - clientAuth: false - codeSigning: true - emailProtection: true - serverAuth: true - timeStamping: true - deletionProtection: "true" + - {} keySpec: - algorithm: RSA_PKCS1_4096_SHA256 - lifetime: 86400s + lifetime: ${10 * 365 * 24 * 3600}s location: us-central1 poolSelector: matchLabels: diff --git a/examples-generated/redis/v1beta1/cluster.yaml b/examples-generated/redis/v1beta1/cluster.yaml index ede766320..ece0ebaa2 100644 --- a/examples-generated/redis/v1beta1/cluster.yaml +++ b/examples-generated/redis/v1beta1/cluster.yaml @@ -9,6 +9,15 @@ metadata: spec: forProvider: authorizationMode: AUTH_MODE_DISABLED + deletionProtectionEnabled: true + maintenancePolicy: + - weeklyMaintenanceWindow: + - day: MONDAY + startTime: + - hours: 1 + minutes: 0 + nanos: 0 + seconds: 0 nodeType: REDIS_SHARED_CORE_NANO pscConfigs: - networkSelector: diff --git a/examples-generated/spanner/v1beta2/database.yaml b/examples-generated/spanner/v1beta2/database.yaml index ec94410cd..929386e17 100644 --- a/examples-generated/spanner/v1beta2/database.yaml +++ b/examples-generated/spanner/v1beta2/database.yaml @@ -11,7 +11,6 @@ spec: ddl: - CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1) - CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2) - deletionProtection: false instanceSelector: matchLabels: testing.upbound.io/example-name: main diff --git a/examples-generated/spanner/v1beta2/instance.yaml b/examples-generated/spanner/v1beta2/instance.yaml index ffc0fd209..558253dd5 100644 --- a/examples-generated/spanner/v1beta2/instance.yaml +++ b/examples-generated/spanner/v1beta2/instance.yaml @@ -11,6 +11,7 @@ spec: config: regional-us-central1 defaultBackupScheduleType: AUTOMATIC displayName: Test Spanner Instance + edition: STANDARD labels: foo: bar numNodes: 2 diff --git a/examples-generated/sql/v1beta1/database.yaml b/examples-generated/sql/v1beta1/database.yaml index b21d9327c..60be18f7a 100644 --- a/examples-generated/sql/v1beta1/database.yaml +++ b/examples-generated/sql/v1beta1/database.yaml @@ -25,7 +25,6 @@ metadata: spec: forProvider: databaseVersion: MYSQL_8_0 - deletionProtection: "true" region: us-central1 settings: - tier: db-f1-micro diff --git a/examples-generated/tags/v1beta1/tagbinding.yaml b/examples-generated/tags/v1beta1/tagbinding.yaml index f0f58aba2..a999f5940 100644 --- a/examples-generated/tags/v1beta1/tagbinding.yaml +++ b/examples-generated/tags/v1beta1/tagbinding.yaml @@ -25,6 +25,7 @@ metadata: name: project spec: forProvider: + deletionPolicy: DELETE name: project_id orgId: "123456789" projectId: project_id diff --git a/examples-generated/vpcaccess/v1beta2/connector.yaml b/examples-generated/vpcaccess/v1beta2/connector.yaml index 55b5505f6..89d774d42 100644 --- a/examples-generated/vpcaccess/v1beta2/connector.yaml +++ b/examples-generated/vpcaccess/v1beta2/connector.yaml @@ -9,6 +9,8 @@ metadata: spec: forProvider: ipCidrRange: 10.8.0.0/28 + maxInstances: 3 + minInstances: 2 networkSelector: matchLabels: testing.upbound.io/example-name: example diff --git a/examples/bigquery/v1beta2/connection.yaml b/examples/bigquery/v1beta2/connection.yaml index 4af9312e4..a1e6fb79c 100644 --- a/examples/bigquery/v1beta2/connection.yaml +++ b/examples/bigquery/v1beta2/connection.yaml @@ -63,7 +63,6 @@ metadata: spec: forProvider: databaseVersion: POSTGRES_11 - deletionProtection: false region: us-central1 settings: tier: db-f1-micro diff --git a/examples/bigquery/v1beta2/job.yaml b/examples/bigquery/v1beta2/job.yaml index f5a6e9fc9..cacecb500 100644 --- a/examples/bigquery/v1beta2/job.yaml +++ b/examples/bigquery/v1beta2/job.yaml @@ -66,4 +66,3 @@ spec: datasetIdSelector: matchLabels: testing.upbound.io/example-name: bar - deletionProtection: false diff --git a/examples/bigquery/v1beta2/table.yaml b/examples/bigquery/v1beta2/table.yaml index 2e0f9c1e7..c2e3e053f 100644 --- a/examples/bigquery/v1beta2/table.yaml +++ b/examples/bigquery/v1beta2/table.yaml @@ -15,7 +15,6 @@ spec: datasetIdSelector: matchLabels: testing.upbound.io/example-name: default - deletionProtection: false labels: env: default schema: | diff --git a/examples/bigquery/v1beta2/tableiambinding.yaml b/examples/bigquery/v1beta2/tableiambinding.yaml index 01b09cc31..e6e1fe617 100644 --- a/examples/bigquery/v1beta2/tableiambinding.yaml +++ b/examples/bigquery/v1beta2/tableiambinding.yaml @@ -40,7 +40,6 @@ spec: datasetIdSelector: matchLabels: testing.upbound.io/example-name: default - deletionProtection: false labels: env: default schema: | diff --git a/examples/bigquery/v1beta2/tableiammember.yaml b/examples/bigquery/v1beta2/tableiammember.yaml index 6a686a18d..930a1046f 100644 --- a/examples/bigquery/v1beta2/tableiammember.yaml +++ b/examples/bigquery/v1beta2/tableiammember.yaml @@ -39,7 +39,6 @@ spec: datasetIdSelector: matchLabels: testing.upbound.io/example-name: default - deletionProtection: false labels: env: default schema: | diff --git a/examples/bigtable/v1beta1/table.yaml b/examples/bigtable/v1beta1/table.yaml index a7a58aed1..1625a28b7 100644 --- a/examples/bigtable/v1beta1/table.yaml +++ b/examples/bigtable/v1beta1/table.yaml @@ -12,6 +12,24 @@ metadata: name: table spec: forProvider: + columnFamily: + - family: family-first + - family: family-second + type: intsum + - family: family-third + type: | + { + "aggregateType": { + "max": {}, + "inputType": { + "int64Type": { + "encoding": { + "bigEndianBytes": {} + } + } + } + } + } instanceNameSelector: matchLabels: testing.upbound.io/example-name: instance diff --git a/examples/bigtable/v1beta2/appprofile.yaml b/examples/bigtable/v1beta2/appprofile.yaml index b997fd659..49fc0aa06 100644 --- a/examples/bigtable/v1beta2/appprofile.yaml +++ b/examples/bigtable/v1beta2/appprofile.yaml @@ -43,4 +43,3 @@ spec: numNodes: 3 storageType: HDD zone: us-central1-c - deletionProtection: false diff --git a/examples/bigtable/v1beta2/garbagecollectionpolicy.yaml b/examples/bigtable/v1beta2/garbagecollectionpolicy.yaml index 664c56c51..ad0e5d323 100644 --- a/examples/bigtable/v1beta2/garbagecollectionpolicy.yaml +++ b/examples/bigtable/v1beta2/garbagecollectionpolicy.yaml @@ -39,7 +39,6 @@ spec: numNodes: 3 storageType: HDD zone: us-central1-b - deletionProtection: false --- diff --git a/examples/bigtable/v1beta2/instance.yaml b/examples/bigtable/v1beta2/instance.yaml index b81a1ca61..d19d85882 100644 --- a/examples/bigtable/v1beta2/instance.yaml +++ b/examples/bigtable/v1beta2/instance.yaml @@ -17,6 +17,5 @@ spec: numNodes: 1 storageType: HDD zone: us-central1-b - deletionProtection: false labels: my-label: prod-label diff --git a/examples/bigtable/v1beta2/instanceiambinding.yaml b/examples/bigtable/v1beta2/instanceiambinding.yaml index ec4972979..838734854 100644 --- a/examples/bigtable/v1beta2/instanceiambinding.yaml +++ b/examples/bigtable/v1beta2/instanceiambinding.yaml @@ -34,6 +34,5 @@ spec: numNodes: 1 storageType: HDD zone: us-central1-b - deletionProtection: false labels: my-label: prod-label diff --git a/examples/bigtable/v1beta2/instanceiammember.yaml b/examples/bigtable/v1beta2/instanceiammember.yaml index e374eb01b..91914c424 100644 --- a/examples/bigtable/v1beta2/instanceiammember.yaml +++ b/examples/bigtable/v1beta2/instanceiammember.yaml @@ -33,6 +33,5 @@ spec: numNodes: 1 storageType: HDD zone: us-central1-b - deletionProtection: false labels: my-label: prod-label diff --git a/examples/bigtable/v1beta2/tableiambinding.yaml b/examples/bigtable/v1beta2/tableiambinding.yaml index ab13eaf64..f452c3925 100644 --- a/examples/bigtable/v1beta2/tableiambinding.yaml +++ b/examples/bigtable/v1beta2/tableiambinding.yaml @@ -55,4 +55,3 @@ spec: numNodes: 3 storageType: HDD zone: us-central1-b - deletionProtection: false diff --git a/examples/bigtable/v1beta2/tableiammember.yaml b/examples/bigtable/v1beta2/tableiammember.yaml index ae835070a..344dcf36e 100644 --- a/examples/bigtable/v1beta2/tableiammember.yaml +++ b/examples/bigtable/v1beta2/tableiammember.yaml @@ -54,4 +54,3 @@ spec: numNodes: 3 storageType: HDD zone: us-central1-b - deletionProtection: false diff --git a/examples/compute/v1beta1/networkfirewallpolicyassociation.yaml b/examples/compute/v1beta1/networkfirewallpolicyassociation.yaml index 2d08536c8..dbae67ef1 100644 --- a/examples/compute/v1beta1/networkfirewallpolicyassociation.yaml +++ b/examples/compute/v1beta1/networkfirewallpolicyassociation.yaml @@ -30,8 +30,8 @@ metadata: testing.upbound.io/example-name: network name: network spec: - forProvider: {} - + forProvider: + autoCreateSubnetworks: false --- apiVersion: compute.gcp.upbound.io/v1beta1 diff --git a/examples/compute/v1beta1/regionnetworkfirewallpolicyassociation.yaml b/examples/compute/v1beta1/regionnetworkfirewallpolicyassociation.yaml index 30988438f..24549a958 100644 --- a/examples/compute/v1beta1/regionnetworkfirewallpolicyassociation.yaml +++ b/examples/compute/v1beta1/regionnetworkfirewallpolicyassociation.yaml @@ -31,7 +31,8 @@ metadata: testing.upbound.io/example-name: basic_network name: basic-network spec: - forProvider: {} + forProvider: + autoCreateSubnetworks: false --- diff --git a/examples/container/v1beta2/cluster.yaml b/examples/container/v1beta2/cluster.yaml index a9bd3980f..0d14a4886 100644 --- a/examples/container/v1beta2/cluster.yaml +++ b/examples/container/v1beta2/cluster.yaml @@ -12,7 +12,6 @@ metadata: name: cluster spec: forProvider: - deletionProtection: false enableAutopilot: true enableIntranodeVisibility: true ipAllocationPolicy: {} diff --git a/examples/container/v1beta2/nodepool.yaml b/examples/container/v1beta2/nodepool.yaml index 37613763d..7775598b6 100644 --- a/examples/container/v1beta2/nodepool.yaml +++ b/examples/container/v1beta2/nodepool.yaml @@ -38,7 +38,6 @@ metadata: name: nodepool spec: forProvider: - deletionProtection: false initialNodeCount: 1 location: us-central1-a removeDefaultNodePool: true diff --git a/examples/containeraws/v1beta2/nodepool.yaml b/examples/containeraws/v1beta2/nodepool.yaml index 0df958ca0..a4652206a 100644 --- a/examples/containeraws/v1beta2/nodepool.yaml +++ b/examples/containeraws/v1beta2/nodepool.yaml @@ -37,6 +37,11 @@ spec: volumeType: GP3 tags: tag-one: value-one + kubeletConfig: + - cpuCfsQuota: true + cpuCfsQuotaPeriod: 100ms + cpuManagerPolicy: none + podPidsLimit: 1024 location: us-west1 maxPodsConstraint: maxPodsPerNode: 110 diff --git a/examples/datastore/v1beta1/index.yaml b/examples/datastore/v1beta1/index.yaml deleted file mode 100644 index f012b8e7a..000000000 --- a/examples/datastore/v1beta1/index.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# SPDX-FileCopyrightText: 2024 The Crossplane Authors -# -# SPDX-License-Identifier: CC0-1.0 - -apiVersion: datastore.gcp.upbound.io/v1beta1 -kind: Index -metadata: - annotations: - meta.upbound.io/example-id: datastore/v1beta1/index - upjet.upbound.io/manual-intervention: "Manual intervention due to resource created successfully but Uptest returned error: Index already exist." - labels: - testing.upbound.io/example-name: index - name: index -spec: - forProvider: - kind: Index - properties: - - direction: "DESCENDING" - name: "property_example_a" - - direction: "DESCENDING" - name: "property_example_b" diff --git a/examples/dns/v1beta1/responsepolicy.yaml b/examples/dns/v1beta1/responsepolicy.yaml index 253591fe0..f6071b00c 100644 --- a/examples/dns/v1beta1/responsepolicy.yaml +++ b/examples/dns/v1beta1/responsepolicy.yaml @@ -80,7 +80,6 @@ metadata: name: cluster-1 spec: forProvider: - deletionProtection: false defaultSnatStatus: - disabled: true initialNodeCount: 1 diff --git a/examples/gke/v1beta2/backupbackupplan.yaml b/examples/gke/v1beta2/backupbackupplan.yaml index 8a92219fd..2f3160bc8 100644 --- a/examples/gke/v1beta2/backupbackupplan.yaml +++ b/examples/gke/v1beta2/backupbackupplan.yaml @@ -36,7 +36,6 @@ spec: addonsConfig: gkeBackupAgentConfig: enabled: true - deletionProtection: false initialNodeCount: 1 location: us-central1 removeDefaultNodePool: false diff --git a/examples/gkehub/v1beta2/membership.yaml b/examples/gkehub/v1beta2/membership.yaml index 109a5f7c2..924bdcd51 100644 --- a/examples/gkehub/v1beta2/membership.yaml +++ b/examples/gkehub/v1beta2/membership.yaml @@ -31,7 +31,6 @@ metadata: name: membership spec: forProvider: - deletionProtection: false initialNodeCount: 2 location: us-central1-a nodeConfig: diff --git a/examples-generated/identityplatform/v1beta1/projectdefaultconfig.yaml b/examples/identityplatform/v1beta1/config.yaml similarity index 67% rename from examples-generated/identityplatform/v1beta1/projectdefaultconfig.yaml rename to examples/identityplatform/v1beta1/config.yaml index 5e2cb04e0..55009e854 100644 --- a/examples-generated/identityplatform/v1beta1/projectdefaultconfig.yaml +++ b/examples/identityplatform/v1beta1/config.yaml @@ -1,8 +1,8 @@ apiVersion: identityplatform.gcp.upbound.io/v1beta1 -kind: ProjectDefaultConfig +kind: Config metadata: annotations: - meta.upbound.io/example-id: identityplatform/v1beta1/projectdefaultconfig + meta.upbound.io/example-id: identityplatform/v1beta1/config labels: testing.upbound.io/example-name: default name: default @@ -11,11 +11,11 @@ spec: signIn: - allowDuplicateEmails: true anonymous: - - enabled: true + enabled: true email: - - enabled: true + enabled: true passwordRequired: false phoneNumber: - - enabled: true + enabled: true testPhoneNumbers: "+11231231234": "000000" diff --git a/examples/identityplatform/v1beta1/projectdefaultconfig.yaml b/examples/identityplatform/v1beta1/projectdefaultconfig.yaml deleted file mode 100644 index 80f4e26f8..000000000 --- a/examples/identityplatform/v1beta1/projectdefaultconfig.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# SPDX-FileCopyrightText: 2024 The Crossplane Authors -# -# SPDX-License-Identifier: CC0-1.0 - -apiVersion: identityplatform.gcp.upbound.io/v1beta1 -kind: ProjectDefaultConfig -metadata: - annotations: - meta.upbound.io/example-id: identityplatform/v1beta1/projectdefaultconfig - labels: - testing.upbound.io/example-name: default - name: default -spec: - forProvider: - signIn: - - allowDuplicateEmails: true - anonymous: - - enabled: true - email: - - enabled: true - passwordRequired: false - phoneNumber: - - enabled: true - testPhoneNumbers: - "+11231231234": "0000000" diff --git a/examples/identityplatform/v1beta2/projectdefaultconfig.yaml b/examples/identityplatform/v1beta2/projectdefaultconfig.yaml deleted file mode 100644 index 91b372155..000000000 --- a/examples/identityplatform/v1beta2/projectdefaultconfig.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# SPDX-FileCopyrightText: 2024 The Crossplane Authors -# -# SPDX-License-Identifier: Apache-2.0 - -apiVersion: identityplatform.gcp.upbound.io/v1beta2 -kind: ProjectDefaultConfig -metadata: - annotations: - meta.upbound.io/example-id: identityplatform/v1beta2/projectdefaultconfig - labels: - testing.upbound.io/example-name: default - name: default -spec: - forProvider: - signIn: - allowDuplicateEmails: true - anonymous: - enabled: true - email: - enabled: true - passwordRequired: false - phoneNumber: - enabled: true - testPhoneNumbers: - "+11231231234": "0000000" diff --git a/examples/privateca/v1beta2/certificate.yaml b/examples/privateca/v1beta2/certificate.yaml index 808857a8f..592facd0e 100644 --- a/examples/privateca/v1beta2/certificate.yaml +++ b/examples/privateca/v1beta2/certificate.yaml @@ -102,7 +102,6 @@ spec: crlSign: true extendedKeyUsage: serverAuth: true - deletionProtection: false ignoreActiveCertificatesOnDeletion: false keySpec: algorithm: RSA_PKCS1_4096_SHA256 diff --git a/examples/privateca/v1beta2/certificateauthority.yaml b/examples/privateca/v1beta2/certificateauthority.yaml index 55c895236..59222de24 100644 --- a/examples/privateca/v1beta2/certificateauthority.yaml +++ b/examples/privateca/v1beta2/certificateauthority.yaml @@ -42,7 +42,6 @@ spec: emailProtection: true serverAuth: true timeStamping: true - deletionProtection: false keySpec: algorithm: RSA_PKCS1_4096_SHA256 lifetime: 86400s diff --git a/examples/redis/v1beta1/cluster.yaml b/examples/redis/v1beta1/cluster.yaml index 94d1a936e..e57d2d7bb 100644 --- a/examples/redis/v1beta1/cluster.yaml +++ b/examples/redis/v1beta1/cluster.yaml @@ -10,6 +10,15 @@ metadata: spec: forProvider: authorizationMode: AUTH_MODE_DISABLED + deletionProtectionEnabled: false + maintenancePolicy: + - weeklyMaintenanceWindow: + - day: MONDAY + startTime: + - hours: 1 + minutes: 0 + nanos: 0 + seconds: 0 nodeType: REDIS_SHARED_CORE_NANO pscConfigs: - networkSelector: diff --git a/examples/spanner/v1beta2/database.yaml b/examples/spanner/v1beta2/database.yaml index ccf46bf05..6c46025e8 100644 --- a/examples/spanner/v1beta2/database.yaml +++ b/examples/spanner/v1beta2/database.yaml @@ -15,7 +15,6 @@ spec: ddl: - CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1) - CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2) - deletionProtection: false instanceSelector: matchLabels: testing.upbound.io/example-name: main diff --git a/examples/spanner/v1beta2/databaseiammember.yaml b/examples/spanner/v1beta2/databaseiammember.yaml index 4a6418b05..69078a5e6 100644 --- a/examples/spanner/v1beta2/databaseiammember.yaml +++ b/examples/spanner/v1beta2/databaseiammember.yaml @@ -34,7 +34,6 @@ spec: ddl: - CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1) - CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2) - deletionProtection: false instanceSelector: matchLabels: testing.upbound.io/example-name: main diff --git a/examples/spanner/v1beta2/instance.yaml b/examples/spanner/v1beta2/instance.yaml index 9cf770111..b87bb9f01 100644 --- a/examples/spanner/v1beta2/instance.yaml +++ b/examples/spanner/v1beta2/instance.yaml @@ -14,6 +14,7 @@ spec: forProvider: config: regional-us-central1 displayName: Test Spanner Instance + edition: STANDARD labels: foo: bar numNodes: 2 diff --git a/examples/sql/v1beta2/instance.yaml b/examples/sql/v1beta2/instance.yaml index d547ccf24..4449cb17b 100644 --- a/examples/sql/v1beta2/instance.yaml +++ b/examples/sql/v1beta2/instance.yaml @@ -13,7 +13,6 @@ metadata: spec: forProvider: databaseVersion: MYSQL_5_7 - deletionProtection: false region: us-central1 settings: diskSize: 20 diff --git a/examples/sql/v1beta2/user.yaml b/examples/sql/v1beta2/user.yaml index 7914534c4..748e5f004 100644 --- a/examples/sql/v1beta2/user.yaml +++ b/examples/sql/v1beta2/user.yaml @@ -51,7 +51,6 @@ metadata: spec: forProvider: databaseVersion: MYSQL_5_7 - deletionProtection: false region: us-central1 settings: diskSize: 20 diff --git a/examples/vpcaccess/v1beta2/connector.yaml b/examples/vpcaccess/v1beta2/connector.yaml index 55b5505f6..89d774d42 100644 --- a/examples/vpcaccess/v1beta2/connector.yaml +++ b/examples/vpcaccess/v1beta2/connector.yaml @@ -9,6 +9,8 @@ metadata: spec: forProvider: ipCidrRange: 10.8.0.0/28 + maxInstances: 3 + minInstances: 2 networkSelector: matchLabels: testing.upbound.io/example-name: example diff --git a/go.mod b/go.mod index 509162db6..3b99a62e4 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/crossplane/upjet v1.4.1-0.20250108142216-db86f70a1651 github.com/hashicorp/terraform-json v0.22.1 github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 - github.com/hashicorp/terraform-provider-google v1.20.1-0.20241111170140-c875b30c3ae6 + github.com/hashicorp/terraform-provider-google v1.20.1-0.20250113183301-1a5ead85e7d2 // v6.16.0 github.com/pkg/errors v0.9.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 k8s.io/apimachinery v0.30.0 @@ -24,16 +24,16 @@ require ( require ( bitbucket.org/creachadair/stringset v0.0.8 // indirect - cel.dev/expr v0.15.0 // indirect - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.8.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect - cloud.google.com/go/bigtable v1.29.0 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.12 // indirect - cloud.google.com/go/longrunning v0.5.11 // indirect - cloud.google.com/go/monitoring v1.20.3 // indirect - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0 // indirect + cel.dev/expr v0.16.0 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/bigtable v1.33.0 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/iam v1.2.2 // indirect + cloud.google.com/go/longrunning v0.6.2 // indirect + cloud.google.com/go/monitoring v1.21.2 // indirect + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.76.0 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/alecthomas/kingpin/v2 v2.4.0 // indirect @@ -49,12 +49,12 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect + github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 // indirect github.com/dave/jennifer v1.4.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/envoyproxy/go-control-plane v0.12.0 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/camelcase v1.0.0 // indirect @@ -81,8 +81,8 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.13.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.14.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect @@ -90,16 +90,16 @@ require ( github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/go-plugin v1.6.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hc-install v0.6.4 // indirect github.com/hashicorp/hcl/v2 v2.20.1 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect - github.com/hashicorp/terraform-plugin-framework v1.7.0 // indirect + github.com/hashicorp/terraform-plugin-framework v1.13.0 // indirect github.com/hashicorp/terraform-plugin-framework-validators v0.9.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.23.0 // indirect + github.com/hashicorp/terraform-plugin-go v0.25.0 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect github.com/hashicorp/terraform-plugin-testing v1.5.1 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect @@ -127,8 +127,9 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/muvaf/typewriter v0.0.0-20210910160850-80e49fe1eb32 // indirect github.com/oklog/run v1.0.0 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -144,13 +145,13 @@ require ( github.com/zclconf/go-cty v1.14.4 // indirect github.com/zclconf/go-cty-yaml v1.0.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/sdk v1.24.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect @@ -158,21 +159,21 @@ require ( golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sync v0.12.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect - golang.org/x/time v0.6.0 // indirect + golang.org/x/time v0.8.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/api v0.191.0 // indirect + google.golang.org/api v0.214.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/grpc v1.64.1 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 073458b89..e2e18433a 100644 --- a/go.sum +++ b/go.sum @@ -1,29 +1,29 @@ bitbucket.org/creachadair/stringset v0.0.8 h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0JvHgx5H+M= bitbucket.org/creachadair/stringset v0.0.8/go.mod h1:AgthVMyMxC/6FK1KBJ2ALdqkZObGN8hOetgpwXyMn34= -cel.dev/expr v0.15.0 h1:O1jzfJCQBfL5BFoYktaxwIhuttaQPsVWerH9/EEKx0w= -cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cel.dev/expr v0.16.0 h1:yloc84fytn4zmJX2GU3TkXGsaieaV7dQ057Qs4sIG2Y= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.8.0 h1:y8jUJLl/Fg+qNBWxP/Hox2ezJvjkrPb952PC1p0G6A4= -cloud.google.com/go/auth v0.8.0/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= -cloud.google.com/go/bigtable v1.29.0 h1:2CnFjKPwjpZMZdTi2RpppvxzD80zKzDYrLYEQw/NnAs= -cloud.google.com/go/bigtable v1.29.0/go.mod h1:5p909nNdWaNUcWs6KGZO8mI5HUovstlmrIi7+eA5PTQ= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= -cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk= -cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= -cloud.google.com/go/monitoring v1.20.3 h1:v/7MXFxYrhXLEZ9sSfwXdlTLLB/xrU7xTyYjY5acynQ= -cloud.google.com/go/monitoring v1.20.3/go.mod h1:GPIVIdNznIdGqEjtRKQWTLcUeRnPjZW85szouimiczU= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/bigtable v1.33.0 h1:2BDaWLRAwXO14DJL/u8crbV2oUbMZkIa2eGq8Yao1bk= +cloud.google.com/go/bigtable v1.33.0/go.mod h1:HtpnH4g25VT1pejHRtInlFPnN5sjTxbQlsYBjh9t5l0= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= +cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= +cloud.google.com/go/longrunning v0.6.2 h1:xjDfh1pQcWPEvnfjZmwjKQEcHnpz6lHjfy7Fo0MK+hc= +cloud.google.com/go/longrunning v0.6.2/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= +cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= +cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0 h1:vRKCLiR3faPmXAoqSdwXLv28/kygggzaKXzgdm6GXhg= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.76.0 h1:VH/j8GmTsvPds/NkGfo4OYr9C7R8ysikaqq4rcDUT0s= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.76.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= @@ -67,8 +67,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 h1:fLZ97KE86ELjEYJCEUVzmbhfzDxHHGwBrDVMd4XL6Bs= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/staticfile v0.1.2/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -94,11 +94,11 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= @@ -174,8 +174,8 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/addlicense v0.0.0-20210428195630-6d92264d7170/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -201,10 +201,10 @@ github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= -github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= +github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -220,8 +220,8 @@ github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB1 github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/hashicorp/go-plugin v1.6.2 h1:zdGAEd0V1lCaU0u+MxWQhtSDQmahpkwOun8U8EiRVog= +github.com/hashicorp/go-plugin v1.6.2/go.mod h1:CkgLQ5CZqNmdL9U9JzM532t8ZiYQ35+pj3b1FD37R0Q= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -238,22 +238,22 @@ github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVW github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= -github.com/hashicorp/terraform-plugin-framework v1.7.0 h1:wOULbVmfONnJo9iq7/q+iBOBJul5vRovaYJIu2cY/Pw= -github.com/hashicorp/terraform-plugin-framework v1.7.0/go.mod h1:jY9Id+3KbZ17OMpulgnWLSfwxNVYSoYBQFTgsx044CI= +github.com/hashicorp/terraform-plugin-framework v1.13.0 h1:8OTG4+oZUfKgnfTdPTJwZ532Bh2BobF4H+yBiYJ/scw= +github.com/hashicorp/terraform-plugin-framework v1.13.0/go.mod h1:j64rwMGpgM3NYXTKuxrCnyubQb/4VKldEKlcG8cvmjU= github.com/hashicorp/terraform-plugin-framework-validators v0.9.0 h1:LYz4bXh3t7bTEydXOmPDPupRRnA480B/9+jV8yZvxBA= github.com/hashicorp/terraform-plugin-framework-validators v0.9.0/go.mod h1:+BVERsnfdlhYR2YkXMBtPnmn9UsL19U3qUtSZ+Y/5MY= -github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= -github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= +github.com/hashicorp/terraform-plugin-go v0.25.0 h1:oi13cx7xXA6QciMcpcFi/rwA974rdTxjqEhXJjbAyks= +github.com/hashicorp/terraform-plugin-go v0.25.0/go.mod h1:+SYagMYadJP86Kvn+TGeV+ofr/R3g4/If0O5sO96MVw= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= -github.com/hashicorp/terraform-plugin-mux v0.15.0 h1:+/+lDx0WUsIOpkAmdwBIoFU8UP9o2eZASoOnLsWbKME= -github.com/hashicorp/terraform-plugin-mux v0.15.0/go.mod h1:9ezplb1Dyq394zQ+ldB0nvy/qbNAz3mMoHHseMTMaKo= +github.com/hashicorp/terraform-plugin-mux v0.17.0 h1:/J3vv3Ps2ISkbLPiZOLspFcIZ0v5ycUXCEQScudGCCw= +github.com/hashicorp/terraform-plugin-mux v0.17.0/go.mod h1:yWuM9U1Jg8DryNfvCp+lH70WcYv6D8aooQxxxIzFDsE= github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 h1:qHprzXy/As0rxedphECBEQAh3R4yp6pKksKHcqZx5G8= github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0/go.mod h1:H+8tjs9TjV2w57QFVSMBQacf8k/E1XwLXGCARgViC6A= github.com/hashicorp/terraform-plugin-testing v1.5.1 h1:T4aQh9JAhmWo4+t1A7x+rnxAJHCDIYW9kXyo4sVO92c= github.com/hashicorp/terraform-plugin-testing v1.5.1/go.mod h1:dg8clO6K59rZ8w9EshBmDp1CxTIPu3yA4iaDpX1h5u0= -github.com/hashicorp/terraform-provider-google v1.20.1-0.20241111170140-c875b30c3ae6 h1:O7G/mWdsQCJJDc2wWmSfEbjsGf0VtxzOVGKZZYj68m4= -github.com/hashicorp/terraform-provider-google v1.20.1-0.20241111170140-c875b30c3ae6/go.mod h1:Iqt6uYg1iDXW2rOMsZQiFpkt2YnD88HfaTEohOJrla4= +github.com/hashicorp/terraform-provider-google v1.20.1-0.20250113183301-1a5ead85e7d2 h1:MZECoSHbnT3kSPf8+3Vympc13B+BEZGjxJqhrm4pyhY= +github.com/hashicorp/terraform-provider-google v1.20.1-0.20250113183301-1a5ead85e7d2/go.mod h1:4KAmex0caG547rMN6yDmyRdN7LHlSA1P1ONW8NAN/k0= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -344,13 +344,15 @@ github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFz github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= @@ -418,20 +420,20 @@ github.com/zclconf/go-cty-yaml v1.0.3 h1:og/eOQ7lvA/WWhHGFETVWNduJM7Rjsv2RRpx1sd github.com/zclconf/go-cty-yaml v1.0.3/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8= -go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -481,8 +483,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -528,8 +530,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -549,8 +551,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.191.0 h1:cJcF09Z+4HAB2t5qTQM1ZtfL/PemsLFkcFG67qq2afk= -google.golang.org/api v0.191.0/go.mod h1:tD5dsFGxFza0hnQveGfVk9QQYKcfp+VzgRqyXFxE0+E= +google.golang.org/api v0.214.0 h1:h2Gkq07OYi6kusGOaT/9rnNljuXmqPnaig7WGPmKbwA= +google.golang.org/api v0.214.0/go.mod h1:bYPpLG8AyeMWwDU6NXoB00xC0DFkikVvd5MfwoxjLqE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= @@ -560,20 +562,20 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20241104194629-dd2ea8efbc28 h1:KJjNNclfpIkVqrZlTWcgOOaVQ00LdBnoEaRfkUx760s= +google.golang.org/genproto v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:mt9/MofW7AWQ+Gy179ChOnvmJatV8YHUmrcedo9CIFI= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -585,8 +587,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/controller/datastore/index/zz_controller.go b/internal/controller/identityplatform/config/zz_controller.go similarity index 80% rename from internal/controller/datastore/index/zz_controller.go rename to internal/controller/identityplatform/config/zz_controller.go index dc6518d81..a0c4a8c95 100755 --- a/internal/controller/datastore/index/zz_controller.go +++ b/internal/controller/identityplatform/config/zz_controller.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package index +package config import ( "time" @@ -21,27 +21,28 @@ import ( "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" - v1beta1 "github.com/upbound/provider-gcp/apis/datastore/v1beta1" + v1beta1 "github.com/upbound/provider-gcp/apis/identityplatform/v1beta1" features "github.com/upbound/provider-gcp/internal/features" ) -// Setup adds a controller that reconciles Index managed resources. +// Setup adds a controller that reconciles Config managed resources. func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { - name := managed.ControllerName(v1beta1.Index_GroupVersionKind.String()) + name := managed.ControllerName(v1beta1.Config_GroupVersionKind.String()) var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} if o.SecretStoreConfigGVK != nil { cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) } - eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1beta1.Index_GroupVersionKind))) - ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.Index_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1beta1.Config_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.Config_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["google_datastore_index"], + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["google_identity_platform_config"], tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1beta1.Index_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1beta1.Config_GroupVersionKind, mgr, o.PollInterval)), tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), @@ -61,31 +62,31 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) } - // register webhooks for the kind v1beta1.Index + // register webhooks for the kind v1beta1.Config // if they're enabled. if o.StartWebhooks { if err := ctrl.NewWebhookManagedBy(mgr). - For(&v1beta1.Index{}). + For(&v1beta1.Config{}). Complete(); err != nil { - return errors.Wrap(err, "cannot register webhook for the kind v1beta1.Index") + return errors.Wrap(err, "cannot register webhook for the kind v1beta1.Config") } } if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { stateMetricsRecorder := statemetrics.NewMRStateRecorder( - mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1beta1.IndexList{}, o.MetricOptions.PollStateMetricInterval, + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1beta1.ConfigList{}, o.MetricOptions.PollStateMetricInterval, ) if err := mgr.Add(stateMetricsRecorder); err != nil { - return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1beta1.IndexList") + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1beta1.ConfigList") } } - r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1beta1.Index_GroupVersionKind), opts...) + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1beta1.Config_GroupVersionKind), opts...) return ctrl.NewControllerManagedBy(mgr). Named(name). WithOptions(o.ForControllerRuntime()). WithEventFilter(xpresource.DesiredStateChanged()). - Watches(&v1beta1.Index{}, eventHandler). + Watches(&v1beta1.Config{}, eventHandler). Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) } diff --git a/internal/controller/identityplatform/projectdefaultconfig/zz_controller.go b/internal/controller/identityplatform/projectdefaultconfig/zz_controller.go deleted file mode 100755 index 70b605fda..000000000 --- a/internal/controller/identityplatform/projectdefaultconfig/zz_controller.go +++ /dev/null @@ -1,91 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package projectdefaultconfig - -import ( - "time" - - "github.com/crossplane/crossplane-runtime/pkg/connection" - "github.com/crossplane/crossplane-runtime/pkg/event" - "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" - "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/controller/handler" - "github.com/crossplane/upjet/pkg/metrics" - "github.com/pkg/errors" - ctrl "sigs.k8s.io/controller-runtime" - - v1beta1 "github.com/upbound/provider-gcp/apis/identityplatform/v1beta1" - features "github.com/upbound/provider-gcp/internal/features" -) - -// Setup adds a controller that reconciles ProjectDefaultConfig managed resources. -func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { - name := managed.ControllerName(v1beta1.ProjectDefaultConfig_GroupVersionKind.String()) - var initializers managed.InitializerChain - cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} - if o.SecretStoreConfigGVK != nil { - cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) - } - eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1beta1.ProjectDefaultConfig_GroupVersionKind))) - ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.ProjectDefaultConfig_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) - opts := []managed.ReconcilerOption{ - managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["google_identity_platform_project_default_config"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1beta1.ProjectDefaultConfig_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), - managed.WithLogger(o.Logger.WithValues("controller", name)), - managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), - managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3 * time.Minute), - managed.WithInitializers(initializers), - managed.WithConnectionPublishers(cps...), - managed.WithPollInterval(o.PollInterval), - } - if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) - } - if o.Features.Enabled(features.EnableBetaManagementPolicies) { - opts = append(opts, managed.WithManagementPolicies()) - } - if o.MetricOptions != nil { - opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) - } - - // register webhooks for the kind v1beta1.ProjectDefaultConfig - // if they're enabled. - if o.StartWebhooks { - if err := ctrl.NewWebhookManagedBy(mgr). - For(&v1beta1.ProjectDefaultConfig{}). - Complete(); err != nil { - return errors.Wrap(err, "cannot register webhook for the kind v1beta1.ProjectDefaultConfig") - } - } - - if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { - stateMetricsRecorder := statemetrics.NewMRStateRecorder( - mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1beta1.ProjectDefaultConfigList{}, o.MetricOptions.PollStateMetricInterval, - ) - if err := mgr.Add(stateMetricsRecorder); err != nil { - return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1beta1.ProjectDefaultConfigList") - } - } - - r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1beta1.ProjectDefaultConfig_GroupVersionKind), opts...) - - return ctrl.NewControllerManagedBy(mgr). - Named(name). - WithOptions(o.ForControllerRuntime()). - WithEventFilter(xpresource.DesiredStateChanged()). - Watches(&v1beta1.ProjectDefaultConfig{}, eventHandler). - Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) -} diff --git a/internal/controller/zz_datastore_setup.go b/internal/controller/zz_datastore_setup.go deleted file mode 100755 index eff70b642..000000000 --- a/internal/controller/zz_datastore_setup.go +++ /dev/null @@ -1,26 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -package controller - -import ( - ctrl "sigs.k8s.io/controller-runtime" - - "github.com/crossplane/upjet/pkg/controller" - - index "github.com/upbound/provider-gcp/internal/controller/datastore/index" -) - -// Setup_datastore creates all controllers with the supplied logger and adds them to -// the supplied manager. -func Setup_datastore(mgr ctrl.Manager, o controller.Options) error { - for _, setup := range []func(ctrl.Manager, controller.Options) error{ - index.Setup, - } { - if err := setup(mgr, o); err != nil { - return err - } - } - return nil -} diff --git a/internal/controller/zz_identityplatform_setup.go b/internal/controller/zz_identityplatform_setup.go index 755fc6e20..fefdf34ba 100755 --- a/internal/controller/zz_identityplatform_setup.go +++ b/internal/controller/zz_identityplatform_setup.go @@ -9,10 +9,10 @@ import ( "github.com/crossplane/upjet/pkg/controller" + config "github.com/upbound/provider-gcp/internal/controller/identityplatform/config" defaultsupportedidpconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/defaultsupportedidpconfig" inboundsamlconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/inboundsamlconfig" oauthidpconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/oauthidpconfig" - projectdefaultconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/projectdefaultconfig" tenant "github.com/upbound/provider-gcp/internal/controller/identityplatform/tenant" tenantdefaultsupportedidpconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/tenantdefaultsupportedidpconfig" tenantinboundsamlconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/tenantinboundsamlconfig" @@ -23,10 +23,10 @@ import ( // the supplied manager. func Setup_identityplatform(mgr ctrl.Manager, o controller.Options) error { for _, setup := range []func(ctrl.Manager, controller.Options) error{ + config.Setup, defaultsupportedidpconfig.Setup, inboundsamlconfig.Setup, oauthidpconfig.Setup, - projectdefaultconfig.Setup, tenant.Setup, tenantdefaultsupportedidpconfig.Setup, tenantinboundsamlconfig.Setup, diff --git a/internal/controller/zz_monolith_setup.go b/internal/controller/zz_monolith_setup.go index faceff81e..c98500a52 100755 --- a/internal/controller/zz_monolith_setup.go +++ b/internal/controller/zz_monolith_setup.go @@ -232,7 +232,6 @@ import ( jobdataproc "github.com/upbound/provider-gcp/internal/controller/dataproc/job" metastoreservice "github.com/upbound/provider-gcp/internal/controller/dataproc/metastoreservice" workflowtemplate "github.com/upbound/provider-gcp/internal/controller/dataproc/workflowtemplate" - index "github.com/upbound/provider-gcp/internal/controller/datastore/index" connectionprofile "github.com/upbound/provider-gcp/internal/controller/datastream/connectionprofile" privateconnection "github.com/upbound/provider-gcp/internal/controller/datastream/privateconnection" agent "github.com/upbound/provider-gcp/internal/controller/dialogflowcx/agent" @@ -274,10 +273,10 @@ import ( webiammember "github.com/upbound/provider-gcp/internal/controller/iap/webiammember" webtypeappengineiammember "github.com/upbound/provider-gcp/internal/controller/iap/webtypeappengineiammember" webtypecomputeiammember "github.com/upbound/provider-gcp/internal/controller/iap/webtypecomputeiammember" + config "github.com/upbound/provider-gcp/internal/controller/identityplatform/config" defaultsupportedidpconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/defaultsupportedidpconfig" inboundsamlconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/inboundsamlconfig" oauthidpconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/oauthidpconfig" - projectdefaultconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/projectdefaultconfig" tenant "github.com/upbound/provider-gcp/internal/controller/identityplatform/tenant" tenantdefaultsupportedidpconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/tenantdefaultsupportedidpconfig" tenantinboundsamlconfig "github.com/upbound/provider-gcp/internal/controller/identityplatform/tenantinboundsamlconfig" @@ -606,7 +605,6 @@ func Setup_monolith(mgr ctrl.Manager, o controller.Options) error { jobdataproc.Setup, metastoreservice.Setup, workflowtemplate.Setup, - index.Setup, connectionprofile.Setup, privateconnection.Setup, agent.Setup, @@ -648,10 +646,10 @@ func Setup_monolith(mgr ctrl.Manager, o controller.Options) error { webiammember.Setup, webtypeappengineiammember.Setup, webtypecomputeiammember.Setup, + config.Setup, defaultsupportedidpconfig.Setup, inboundsamlconfig.Setup, oauthidpconfig.Setup, - projectdefaultconfig.Setup, tenant.Setup, tenantdefaultsupportedidpconfig.Setup, tenantinboundsamlconfig.Setup, diff --git a/package/crds/accesscontextmanager.gcp.upbound.io_accesslevels.yaml b/package/crds/accesscontextmanager.gcp.upbound.io_accesslevels.yaml index 59324798e..b252cebc7 100644 --- a/package/crds/accesscontextmanager.gcp.upbound.io_accesslevels.yaml +++ b/package/crds/accesscontextmanager.gcp.upbound.io_accesslevels.yaml @@ -1216,8 +1216,8 @@ spec: permission to be granted to caller. type: string vpcIpSubnetworks: - description: CIDR block IP subnetwork specification. - Must be IPv4. + description: A list of CIDR block IP subnetwork + specification. Must be IPv4. items: type: string type: array @@ -1442,8 +1442,8 @@ spec: permission to be granted to caller. type: string vpcIpSubnetworks: - description: CIDR block IP subnetwork specification. - Must be IPv4. + description: A list of CIDR block IP subnetwork + specification. Must be IPv4. items: type: string type: array @@ -1840,8 +1840,8 @@ spec: permission to be granted to caller. type: string vpcIpSubnetworks: - description: CIDR block IP subnetwork specification. - Must be IPv4. + description: A list of CIDR block IP subnetwork + specification. Must be IPv4. items: type: string type: array diff --git a/package/crds/accesscontextmanager.gcp.upbound.io_serviceperimeters.yaml b/package/crds/accesscontextmanager.gcp.upbound.io_serviceperimeters.yaml index cf91e0d8c..e007fe9ce 100644 --- a/package/crds/accesscontextmanager.gcp.upbound.io_serviceperimeters.yaml +++ b/package/crds/accesscontextmanager.gcp.upbound.io_serviceperimeters.yaml @@ -3035,9 +3035,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array @@ -3155,9 +3157,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array @@ -3414,9 +3418,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array @@ -3534,9 +3540,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array @@ -3934,9 +3942,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array @@ -4054,9 +4064,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array @@ -4313,9 +4325,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array @@ -4433,9 +4447,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array @@ -4932,9 +4948,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array @@ -5052,9 +5070,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array @@ -5232,9 +5252,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array @@ -5352,9 +5374,11 @@ spec: properties: identities: description: |- - A list of identities that are allowed access through this ingress policy. - Should be in the format of email address. The email address should represent - individual user or service account only. + Identities can be an individual user, service account, Google group, + or third-party identity. For third-party identity, only single identities + are supported and other identity types are not supported.The v1 identities + that have the prefix user, group and serviceAccount in + https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. items: type: string type: array diff --git a/package/crds/activedirectory.gcp.upbound.io_domains.yaml b/package/crds/activedirectory.gcp.upbound.io_domains.yaml index 6cfb61909..77bdd0d37 100644 --- a/package/crds/activedirectory.gcp.upbound.io_domains.yaml +++ b/package/crds/activedirectory.gcp.upbound.io_domains.yaml @@ -374,6 +374,11 @@ spec: type: string type: array x-kubernetes-list-type: set + deletionProtection: + description: |- + Defaults to true. + When the field is set to false, deleting the domain is allowed. + type: boolean domainName: description: |- The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions diff --git a/package/crds/alloydb.gcp.upbound.io_clusters.yaml b/package/crds/alloydb.gcp.upbound.io_clusters.yaml index 5e9c9fd28..5e06d5ef7 100644 --- a/package/crds/alloydb.gcp.upbound.io_clusters.yaml +++ b/package/crds/alloydb.gcp.upbound.io_clusters.yaml @@ -2431,6 +2431,7 @@ spec: Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + Possible values: DEFAULT, FORCE type: string displayName: description: User-settable and human-readable display name for @@ -2533,11 +2534,6 @@ spec: type: object type: array type: object - network: - description: |- - The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - "projects/{projectNumber}/global/networks/{network_id}". - type: string networkConfig: description: |- Metadata related to network configuration. @@ -2630,80 +2626,6 @@ spec: type: object type: object type: object - networkRef: - description: Reference to a Network in compute to populate network. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - networkSelector: - description: Selector for a Network in compute to populate network. - properties: - matchControllerRef: - description: |- - MatchControllerRef ensures an object with the same controller reference - as the selecting object is selected. - type: boolean - matchLabels: - additionalProperties: - type: string - description: MatchLabels ensures an object with matching labels - is selected. - type: object - policy: - description: Policies for selection. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - type: object project: description: |- The ID of the project in which the resource belongs. @@ -2982,6 +2904,11 @@ spec: type: object type: object type: object + subscriptionType: + description: |- + The subscrition type of cluster. + Possible values are: TRIAL, STANDARD. + type: string required: - location type: object @@ -3141,6 +3068,7 @@ spec: Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + Possible values: DEFAULT, FORCE type: string displayName: description: User-settable and human-readable display name for @@ -3242,11 +3170,6 @@ spec: type: object type: array type: object - network: - description: |- - The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - "projects/{projectNumber}/global/networks/{network_id}". - type: string networkConfig: description: |- Metadata related to network configuration. @@ -3339,80 +3262,6 @@ spec: type: object type: object type: object - networkRef: - description: Reference to a Network in compute to populate network. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - networkSelector: - description: Selector for a Network in compute to populate network. - properties: - matchControllerRef: - description: |- - MatchControllerRef ensures an object with the same controller reference - as the selecting object is selected. - type: boolean - matchLabels: - additionalProperties: - type: string - description: MatchLabels ensures an object with matching labels - is selected. - type: object - policy: - description: Policies for selection. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - type: object project: description: |- The ID of the project in which the resource belongs. @@ -3691,6 +3540,11 @@ spec: type: object type: object type: object + subscriptionType: + description: |- + The subscrition type of cluster. + Possible values are: TRIAL, STANDARD. + type: string type: object managementPolicies: default: @@ -4069,6 +3923,7 @@ spec: Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + Possible values: DEFAULT, FORCE type: string displayName: description: User-settable and human-readable display name for @@ -4208,11 +4063,6 @@ spec: name: description: The name of the cluster resource. type: string - network: - description: |- - The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - "projects/{projectNumber}/global/networks/{network_id}". - type: string networkConfig: description: |- Metadata related to network configuration. @@ -4288,6 +4138,11 @@ spec: state: description: Output only. The current serving state of the cluster. type: string + subscriptionType: + description: |- + The subscrition type of cluster. + Possible values are: TRIAL, STANDARD. + type: string terraformLabels: additionalProperties: type: string @@ -4296,6 +4151,27 @@ spec: and default labels configured on the provider. type: object x-kubernetes-map-type: granular + trialMetadata: + description: |- + Contains information and all metadata related to TRIAL clusters. + Structure is documented below. + items: + properties: + endTime: + description: End time of the trial cluster. + type: string + graceEndTime: + description: Grace end time of the trial cluster. + type: string + startTime: + description: Start time of the trial cluster. + type: string + upgradeTime: + description: Upgrade time of the trial cluster to standard + cluster. + type: string + type: object + type: array uid: description: The system-generated UID of the resource. type: string diff --git a/package/crds/alloydb.gcp.upbound.io_instances.yaml b/package/crds/alloydb.gcp.upbound.io_instances.yaml index 6629c2d68..0dea08000 100644 --- a/package/crds/alloydb.gcp.upbound.io_instances.yaml +++ b/package/crds/alloydb.gcp.upbound.io_instances.yaml @@ -1467,6 +1467,9 @@ spec: type: string type: object type: array + enableOutboundPublicIp: + description: Enabling outbound public ip for the instance. + type: boolean enablePublicIp: description: |- Enabling public ip for the instance. If a user wishes to disable this, @@ -1717,6 +1720,9 @@ spec: type: string type: object type: array + enableOutboundPublicIp: + description: Enabling outbound public ip for the instance. + type: boolean enablePublicIp: description: |- Enabling public ip for the instance. If a user wishes to disable this, @@ -2080,6 +2086,9 @@ spec: type: string type: object type: array + enableOutboundPublicIp: + description: Enabling outbound public ip for the instance. + type: boolean enablePublicIp: description: |- Enabling public ip for the instance. If a user wishes to disable this, @@ -2087,6 +2096,14 @@ spec: the same instance. type: boolean type: object + outboundPublicIpAddresses: + description: |- + The outbound public IP addresses for the instance. This is available ONLY when + networkConfig.enableOutboundPublicIp is set to true. These IP addresses are used + for outbound connections. + items: + type: string + type: array pscInstanceConfig: description: |- Configuration for Private Service Connect (PSC) for the instance. diff --git a/package/crds/apigee.gcp.upbound.io_nataddresses.yaml b/package/crds/apigee.gcp.upbound.io_nataddresses.yaml index 0d028a4d3..2ba8eb9ef 100644 --- a/package/crds/apigee.gcp.upbound.io_nataddresses.yaml +++ b/package/crds/apigee.gcp.upbound.io_nataddresses.yaml @@ -73,6 +73,10 @@ spec: type: string forProvider: properties: + activate: + description: Flag that specifies whether the reserved NAT address + should be activate. + type: boolean instanceId: description: |- The Apigee instance associated with the Apigee environment, @@ -165,6 +169,11 @@ spec: required on creation, but we do not desire to update them after creation, for example because of an external controller is managing them, like an autoscaler. + properties: + activate: + description: Flag that specifies whether the reserved NAT address + should be activate. + type: boolean type: object managementPolicies: default: @@ -338,6 +347,10 @@ spec: properties: atProvider: properties: + activate: + description: Flag that specifies whether the reserved NAT address + should be activate. + type: boolean id: description: an identifier for the resource with format {{instance_id}}/natAddresses/{{name}} type: string diff --git a/package/crds/artifact.gcp.upbound.io_registryrepositories.yaml b/package/crds/artifact.gcp.upbound.io_registryrepositories.yaml index f89eb9f9d..c3558566f 100644 --- a/package/crds/artifact.gcp.upbound.io_registryrepositories.yaml +++ b/package/crds/artifact.gcp.upbound.io_registryrepositories.yaml @@ -1967,8 +1967,13 @@ spec: type: object x-kubernetes-map-type: granular location: - description: The name of the location this repository is located - in. + description: |- + The name of the repository's location. In addition to specific regions, + special values for multi-region locations are asia, europe, and us. + See here, + or use the + google_artifact_registry_locations + data source for possible values. type: string mavenConfig: description: |- @@ -2026,6 +2031,91 @@ spec: type: string type: object type: object + commonRepository: + description: |- + Specific settings for an Artifact Registory remote repository. + Structure is documented below. + properties: + uri: + description: Specific uri to the registry, e.g. "https://registry-1.docker.io" + type: string + uriRef: + description: Reference to a RegistryRepository in artifact + to populate uri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + uriSelector: + description: Selector for a RegistryRepository in artifact + to populate uri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object description: description: The description of the remote source. type: string @@ -2041,7 +2131,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -2061,7 +2151,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -2081,7 +2171,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -2101,7 +2191,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -2336,6 +2426,17 @@ spec: type: object type: array type: object + vulnerabilityScanningConfig: + description: |- + Configuration for vulnerability scanning of artifacts stored in this repository. + Structure is documented below. + properties: + enablementConfig: + description: |- + This configures whether vulnerability scanning is automatically performed for artifacts pushed to this repository. + Possible values are: INHERITED, DISABLED. + type: string + type: object type: object initProvider: description: |- @@ -2522,6 +2623,91 @@ spec: type: string type: object type: object + commonRepository: + description: |- + Specific settings for an Artifact Registory remote repository. + Structure is documented below. + properties: + uri: + description: Specific uri to the registry, e.g. "https://registry-1.docker.io" + type: string + uriRef: + description: Reference to a RegistryRepository in artifact + to populate uri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + uriSelector: + description: Selector for a RegistryRepository in artifact + to populate uri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object description: description: The description of the remote source. type: string @@ -2537,7 +2723,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -2557,7 +2743,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -2577,7 +2763,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -2597,7 +2783,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -2832,6 +3018,17 @@ spec: type: object type: array type: object + vulnerabilityScanningConfig: + description: |- + Configuration for vulnerability scanning of artifacts stored in this repository. + Structure is documented below. + properties: + enablementConfig: + description: |- + This configures whether vulnerability scanning is automatically performed for artifacts pushed to this repository. + Possible values are: INHERITED, DISABLED. + type: string + type: object type: object managementPolicies: default: @@ -3139,8 +3336,13 @@ spec: type: object x-kubernetes-map-type: granular location: - description: The name of the location this repository is located - in. + description: |- + The name of the repository's location. In addition to specific regions, + special values for multi-region locations are asia, europe, and us. + See here, + or use the + google_artifact_registry_locations + data source for possible values. type: string mavenConfig: description: |- @@ -3203,6 +3405,15 @@ spec: type: string type: object type: object + commonRepository: + description: |- + Specific settings for an Artifact Registory remote repository. + Structure is documented below. + properties: + uri: + description: Specific uri to the registry, e.g. "https://registry-1.docker.io" + type: string + type: object description: description: The description of the remote source. type: string @@ -3218,7 +3429,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -3238,7 +3449,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -3258,7 +3469,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -3278,7 +3489,7 @@ spec: properties: customRepository: description: |- - Settings for a remote repository with a custom uri. + [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. Structure is documented below. properties: uri: @@ -3372,6 +3583,27 @@ spec: type: object type: array type: object + vulnerabilityScanningConfig: + description: |- + Configuration for vulnerability scanning of artifacts stored in this repository. + Structure is documented below. + properties: + enablementConfig: + description: |- + This configures whether vulnerability scanning is automatically performed for artifacts pushed to this repository. + Possible values are: INHERITED, DISABLED. + type: string + enablementState: + description: |- + (Output) + This field returns whether scanning is active for this repository. + type: string + enablementStateReason: + description: |- + (Output) + This provides an explanation for the state of scanning on this repository. + type: string + type: object type: object conditions: description: Conditions of the resource. diff --git a/package/crds/bigquery.gcp.upbound.io_analyticshubdataexchanges.yaml b/package/crds/bigquery.gcp.upbound.io_analyticshubdataexchanges.yaml index a1c847d13..846a4c018 100644 --- a/package/crds/bigquery.gcp.upbound.io_analyticshubdataexchanges.yaml +++ b/package/crds/bigquery.gcp.upbound.io_analyticshubdataexchanges.yaml @@ -106,6 +106,21 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + sharingEnvironmentConfig: + description: |- + Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + Structure is documented below. + properties: + dcrExchangeConfig: + description: Data Clean Room (DCR), used for privacy-safe + and secured data sharing. + type: object + defaultExchangeConfig: + description: Default Analytics Hub data exchange, used for + secured data sharing. + type: object + type: object type: object initProvider: description: |- @@ -152,6 +167,21 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + sharingEnvironmentConfig: + description: |- + Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + Structure is documented below. + properties: + dcrExchangeConfig: + description: Data Clean Room (DCR), used for privacy-safe + and secured data sharing. + type: object + defaultExchangeConfig: + description: Default Analytics Hub data exchange, used for + secured data sharing. + type: object + type: object type: object managementPolicies: default: @@ -382,6 +412,21 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + sharingEnvironmentConfig: + description: |- + Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + Structure is documented below. + properties: + dcrExchangeConfig: + description: Data Clean Room (DCR), used for privacy-safe + and secured data sharing. + type: object + defaultExchangeConfig: + description: Default Analytics Hub data exchange, used for + secured data sharing. + type: object + type: object type: object conditions: description: Conditions of the resource. diff --git a/package/crds/bigquery.gcp.upbound.io_analyticshublistings.yaml b/package/crds/bigquery.gcp.upbound.io_analyticshublistings.yaml index 8e63f68a3..eab2581bf 100644 --- a/package/crds/bigquery.gcp.upbound.io_analyticshublistings.yaml +++ b/package/crds/bigquery.gcp.upbound.io_analyticshublistings.yaml @@ -996,6 +996,94 @@ spec: type: string type: object type: object + selectedResources: + description: |- + Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + Structure is documented below. + items: + properties: + table: + description: 'Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} + Example:"projects/test_project/datasets/test_dataset/tables/test_table"' + type: string + tableRef: + description: Reference to a Table in bigquery to populate + table. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + tableSelector: + description: Selector for a Table in bigquery to populate + table. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array type: object categories: description: Categories of the listing. Up to two categories are @@ -1118,7 +1206,8 @@ spec: description: The name of the location this data exchange listing. type: string primaryContact: - description: Email or URL of the listing publisher. + description: Email or URL of the primary point of contact of the + listing. type: string project: description: |- @@ -1255,6 +1344,94 @@ spec: type: string type: object type: object + selectedResources: + description: |- + Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + Structure is documented below. + items: + properties: + table: + description: 'Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} + Example:"projects/test_project/datasets/test_dataset/tables/test_table"' + type: string + tableRef: + description: Reference to a Table in bigquery to populate + table. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + tableSelector: + description: Selector for a Table in bigquery to populate + table. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array type: object categories: description: Categories of the listing. Up to two categories are @@ -1293,7 +1470,8 @@ spec: description: Base64 encoded image representing the listing. type: string primaryContact: - description: Email or URL of the listing publisher. + description: Email or URL of the primary point of contact of the + listing. type: string project: description: |- @@ -1520,6 +1698,18 @@ spec: description: Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 type: string + selectedResources: + description: |- + Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + Structure is documented below. + items: + properties: + table: + description: 'Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} + Example:"projects/test_project/datasets/test_dataset/tables/test_table"' + type: string + type: object + type: array type: object categories: description: Categories of the listing. Up to two categories are @@ -1572,7 +1762,8 @@ spec: description: The resource name of the listing. e.g. "projects/myproject/locations/US/dataExchanges/123/listings/456" type: string primaryContact: - description: Email or URL of the listing publisher. + description: Email or URL of the primary point of contact of the + listing. type: string project: description: |- @@ -1603,6 +1794,11 @@ spec: enabled: description: If true, enable restricted export. type: boolean + restrictDirectTableAccess: + description: |- + (Output) + If true, restrict direct table access(read api/tabledata.list) on linked table. + type: boolean restrictQueryResult: description: If true, restrict export of query result derived from restricted linked dataset table. diff --git a/package/crds/bigquery.gcp.upbound.io_datasetaccesses.yaml b/package/crds/bigquery.gcp.upbound.io_datasetaccesses.yaml index 33aac8a86..2a2ede898 100644 --- a/package/crds/bigquery.gcp.upbound.io_datasetaccesses.yaml +++ b/package/crds/bigquery.gcp.upbound.io_datasetaccesses.yaml @@ -1960,6 +1960,32 @@ spec: type: string forProvider: properties: + condition: + description: |- + Condition for the binding. If CEL expression in this field is true, this + access binding will be considered. + Structure is documented below. + properties: + description: + description: |- + Description of the expression. This is a longer text which describes the expression, + e.g. when hovered over it in a UI. + type: string + expression: + description: Textual representation of an expression in Common + Expression Language syntax. + type: string + location: + description: |- + String indicating the location of the expression for error reporting, e.g. a file + name and a position in the file. + type: string + title: + description: |- + Title for the expression, i.e. a short string describing its purpose. + This can be used e.g. in UIs which allow to enter the expression. + type: string + type: object dataset: description: |- Grants all resources of particular types in a particular dataset read access to the current dataset. @@ -2692,6 +2718,32 @@ spec: for example because of an external controller is managing them, like an autoscaler. properties: + condition: + description: |- + Condition for the binding. If CEL expression in this field is true, this + access binding will be considered. + Structure is documented below. + properties: + description: + description: |- + Description of the expression. This is a longer text which describes the expression, + e.g. when hovered over it in a UI. + type: string + expression: + description: Textual representation of an expression in Common + Expression Language syntax. + type: string + location: + description: |- + String indicating the location of the expression for error reporting, e.g. a file + name and a position in the file. + type: string + title: + description: |- + Title for the expression, i.e. a short string describing its purpose. + This can be used e.g. in UIs which allow to enter the expression. + type: string + type: object dataset: description: |- Grants all resources of particular types in a particular dataset read access to the current dataset. @@ -3585,6 +3637,32 @@ spec: properties: apiUpdatedMember: type: boolean + condition: + description: |- + Condition for the binding. If CEL expression in this field is true, this + access binding will be considered. + Structure is documented below. + properties: + description: + description: |- + Description of the expression. This is a longer text which describes the expression, + e.g. when hovered over it in a UI. + type: string + expression: + description: Textual representation of an expression in Common + Expression Language syntax. + type: string + location: + description: |- + String indicating the location of the expression for error reporting, e.g. a file + name and a position in the file. + type: string + title: + description: |- + Title for the expression, i.e. a short string describing its purpose. + This can be used e.g. in UIs which allow to enter the expression. + type: string + type: object dataset: description: |- Grants all resources of particular types in a particular dataset read access to the current dataset. diff --git a/package/crds/bigquery.gcp.upbound.io_datasets.yaml b/package/crds/bigquery.gcp.upbound.io_datasets.yaml index c54eb8fa2..7eaa93fe0 100644 --- a/package/crds/bigquery.gcp.upbound.io_datasets.yaml +++ b/package/crds/bigquery.gcp.upbound.io_datasets.yaml @@ -2019,6 +2019,36 @@ spec: Structure is documented below. items: properties: + condition: + description: |- + Condition for the binding. If CEL expression in this field is true, this + access binding will be considered. + Structure is documented below. + properties: + description: + description: A user-friendly description of the dataset + type: string + expression: + description: Textual representation of an expression + in Common Expression Language syntax. + type: string + location: + description: |- + The geographic location where the dataset should reside. + See official docs. + There are two types of locations, regional or multi-regional. A regional + location is a specific geographic place, such as Tokyo, and a multi-regional + location is a large geographic area, such as the United States, that + contains at least two geographic places. + The default value is multi-regional location US. + Changing this forces a new resource to be created. + type: string + title: + description: |- + Title for the expression, i.e. a short string describing its purpose. + This can be used e.g. in UIs which allow to enter the expression. + type: string + type: object dataset: description: |- Grants all resources of particular types in a particular dataset read access to the current dataset. @@ -2729,6 +2759,36 @@ spec: Structure is documented below. items: properties: + condition: + description: |- + Condition for the binding. If CEL expression in this field is true, this + access binding will be considered. + Structure is documented below. + properties: + description: + description: A user-friendly description of the dataset + type: string + expression: + description: Textual representation of an expression + in Common Expression Language syntax. + type: string + location: + description: |- + The geographic location where the dataset should reside. + See official docs. + There are two types of locations, regional or multi-regional. A regional + location is a specific geographic place, such as Tokyo, and a multi-regional + location is a large geographic area, such as the United States, that + contains at least two geographic places. + The default value is multi-regional location US. + Changing this forces a new resource to be created. + type: string + title: + description: |- + Title for the expression, i.e. a short string describing its purpose. + This can be used e.g. in UIs which allow to enter the expression. + type: string + type: object dataset: description: |- Grants all resources of particular types in a particular dataset read access to the current dataset. @@ -3593,6 +3653,36 @@ spec: Structure is documented below. items: properties: + condition: + description: |- + Condition for the binding. If CEL expression in this field is true, this + access binding will be considered. + Structure is documented below. + properties: + description: + description: A user-friendly description of the dataset + type: string + expression: + description: Textual representation of an expression + in Common Expression Language syntax. + type: string + location: + description: |- + The geographic location where the dataset should reside. + See official docs. + There are two types of locations, regional or multi-regional. A regional + location is a specific geographic place, such as Tokyo, and a multi-regional + location is a large geographic area, such as the United States, that + contains at least two geographic places. + The default value is multi-regional location US. + Changing this forces a new resource to be created. + type: string + title: + description: |- + Title for the expression, i.e. a short string describing its purpose. + This can be used e.g. in UIs which allow to enter the expression. + type: string + type: object dataset: description: |- Grants all resources of particular types in a particular dataset read access to the current dataset. diff --git a/package/crds/bigquery.gcp.upbound.io_datatransferconfigs.yaml b/package/crds/bigquery.gcp.upbound.io_datatransferconfigs.yaml index adcf7bd0b..a3ad889e4 100644 --- a/package/crds/bigquery.gcp.upbound.io_datatransferconfigs.yaml +++ b/package/crds/bigquery.gcp.upbound.io_datatransferconfigs.yaml @@ -1064,6 +1064,90 @@ spec: transfer run failures. type: boolean type: object + encryptionConfiguration: + description: |- + Represents the encryption configuration for a transfer. + Structure is documented below. + properties: + kmsKeyName: + description: The name of the KMS key used for encrypting BigQuery + data. + type: string + kmsKeyNameRef: + description: Reference to a CryptoKey in kms to populate kmsKeyName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyNameSelector: + description: Selector for a CryptoKey in kms to populate kmsKeyName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object location: description: |- The geographic location where the transfer config should reside. @@ -1288,6 +1372,90 @@ spec: transfer run failures. type: boolean type: object + encryptionConfiguration: + description: |- + Represents the encryption configuration for a transfer. + Structure is documented below. + properties: + kmsKeyName: + description: The name of the KMS key used for encrypting BigQuery + data. + type: string + kmsKeyNameRef: + description: Reference to a CryptoKey in kms to populate kmsKeyName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyNameSelector: + description: Selector for a CryptoKey in kms to populate kmsKeyName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object location: description: |- The geographic location where the transfer config should reside. @@ -1612,6 +1780,16 @@ spec: transfer run failures. type: boolean type: object + encryptionConfiguration: + description: |- + Represents the encryption configuration for a transfer. + Structure is documented below. + properties: + kmsKeyName: + description: The name of the KMS key used for encrypting BigQuery + data. + type: string + type: object id: description: an identifier for the resource with format {{name}} type: string diff --git a/package/crds/bigquery.gcp.upbound.io_jobs.yaml b/package/crds/bigquery.gcp.upbound.io_jobs.yaml index 1a8087608..a2e2dda1b 100644 --- a/package/crds/bigquery.gcp.upbound.io_jobs.yaml +++ b/package/crds/bigquery.gcp.upbound.io_jobs.yaml @@ -3764,82 +3764,6 @@ spec: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. type: string - kmsKeyNameRef: - description: Reference to a CryptoKey in kms to populate - kmsKeyName. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - kmsKeyNameSelector: - description: Selector for a CryptoKey in kms to populate - kmsKeyName. - properties: - matchControllerRef: - description: |- - MatchControllerRef ensures an object with the same controller reference - as the selecting object is selected. - type: boolean - matchLabels: - additionalProperties: - type: string - description: MatchLabels ensures an object with matching - labels is selected. - type: object - policy: - description: Policies for selection. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - type: object type: object destinationTable: description: |- @@ -5072,82 +4996,6 @@ spec: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. type: string - kmsKeyNameRef: - description: Reference to a CryptoKey in kms to populate - kmsKeyName. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - kmsKeyNameSelector: - description: Selector for a CryptoKey in kms to populate - kmsKeyName. - properties: - matchControllerRef: - description: |- - MatchControllerRef ensures an object with the same controller reference - as the selecting object is selected. - type: boolean - matchLabels: - additionalProperties: - type: string - description: MatchLabels ensures an object with matching - labels is selected. - type: object - policy: - description: Policies for selection. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - type: object type: object destinationTable: description: |- diff --git a/package/crds/bigquery.gcp.upbound.io_reservations.yaml b/package/crds/bigquery.gcp.upbound.io_reservations.yaml index a848bb32d..b190f5836 100644 --- a/package/crds/bigquery.gcp.upbound.io_reservations.yaml +++ b/package/crds/bigquery.gcp.upbound.io_reservations.yaml @@ -555,11 +555,6 @@ spec: The geographic location where the transfer config should reside. Examples: US, EU, asia-northeast1. The default value is US. type: string - multiRegionAuxiliary: - description: |- - Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). - If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region. - type: boolean project: description: |- The ID of the project in which the resource belongs. @@ -610,11 +605,6 @@ spec: the same admin project. If true, a query using this reservation will execute with the slot capacity specified above at most. type: boolean - multiRegionAuxiliary: - description: |- - Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). - If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region. - type: boolean slotCapacity: description: |- Minimum slots available to this reservation. A slot is a unit of computational power in BigQuery, and serves as the @@ -837,11 +827,6 @@ spec: The geographic location where the transfer config should reside. Examples: US, EU, asia-northeast1. The default value is US. type: string - multiRegionAuxiliary: - description: |- - Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). - If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region. - type: boolean project: description: |- The ID of the project in which the resource belongs. diff --git a/package/crds/bigquery.gcp.upbound.io_tables.yaml b/package/crds/bigquery.gcp.upbound.io_tables.yaml index a1c79e0b3..a54980d8e 100644 --- a/package/crds/bigquery.gcp.upbound.io_tables.yaml +++ b/package/crds/bigquery.gcp.upbound.io_tables.yaml @@ -2441,12 +2441,30 @@ spec: type: string forProvider: properties: - allowResourceTagsOnDeletion: - description: |- - If set to true, it allows table - deletion when there are still resource tags attached. The default value is - false. - type: boolean + biglakeConfiguration: + description: Specifies the configuration of a BigLake managed + table. Structure is documented below + properties: + connectionId: + description: |- + The connection specifying the credentials to be used to + read and write to external storage, such as Cloud Storage. The connection_id can + have the form ".." or + projects//locations//connections/". + type: string + fileFormat: + description: The file format the table data is stored in. + type: string + storageUri: + description: |- + The fully qualified location prefix of the external folder where table data + is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/" + type: string + tableFormat: + description: The table format the metadata only snapshots + are stored in. + type: string + type: object clustering: description: |- Specifies column names to use for data clustering. @@ -2534,10 +2552,6 @@ spec: type: string type: object type: object - deletionProtection: - description: When the field is set to false, deleting the table - is allowed.. - type: boolean description: description: The field description. type: string @@ -2572,7 +2586,7 @@ spec: properties: autodetect: description: |- - - Let BigQuery try to autodetect the schema + Let BigQuery try to autodetect the schema and format of the table. type: boolean avroOptions: @@ -2927,7 +2941,7 @@ spec: type: object maxStaleness: description: |- - : The maximum staleness of data that could be + The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type. @@ -2997,36 +3011,38 @@ spec: properties: columnReferences: description: |- - : The pair of the foreign key column and primary key column. + The pair of the foreign key column and primary key column. Structure is documented below. properties: referencedColumn: description: |- - : The column in the primary key that are + The column in the primary key that are referenced by the referencingColumn type: string referencingColumn: - description: ': The column that composes the foreign - key.' + description: The column that composes the foreign + key. type: string type: object name: - description: ': Set only if the foreign key constraint - is named.' + description: |- + ) + Name of the SerDe. + The maximum length is 256 characters. type: string referencedTable: description: |- - : The table that holds the primary key + The table that holds the primary key and is referenced by this foreign key. Structure is documented below. properties: datasetId: - description: ': The ID of the dataset containing - this table.' + description: The ID of the dataset containing this + table. type: string projectId: - description: ': The ID of the project containing - this table.' + description: The ID of the project containing this + table. type: string tableId: description: |- @@ -3046,8 +3062,8 @@ spec: Structure is documented below. properties: columns: - description: ': The columns that are composed of the - primary key constraint.' + description: The columns that are composed of the primary + key constraint. items: type: string type: array @@ -3133,12 +3149,30 @@ spec: for example because of an external controller is managing them, like an autoscaler. properties: - allowResourceTagsOnDeletion: - description: |- - If set to true, it allows table - deletion when there are still resource tags attached. The default value is - false. - type: boolean + biglakeConfiguration: + description: Specifies the configuration of a BigLake managed + table. Structure is documented below + properties: + connectionId: + description: |- + The connection specifying the credentials to be used to + read and write to external storage, such as Cloud Storage. The connection_id can + have the form ".." or + projects//locations//connections/". + type: string + fileFormat: + description: The file format the table data is stored in. + type: string + storageUri: + description: |- + The fully qualified location prefix of the external folder where table data + is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/" + type: string + tableFormat: + description: The table format the metadata only snapshots + are stored in. + type: string + type: object clustering: description: |- Specifies column names to use for data clustering. @@ -3147,10 +3181,6 @@ spec: items: type: string type: array - deletionProtection: - description: When the field is set to false, deleting the table - is allowed.. - type: boolean description: description: The field description. type: string @@ -3185,7 +3215,7 @@ spec: properties: autodetect: description: |- - - Let BigQuery try to autodetect the schema + Let BigQuery try to autodetect the schema and format of the table. type: boolean avroOptions: @@ -3540,7 +3570,7 @@ spec: type: object maxStaleness: description: |- - : The maximum staleness of data that could be + The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type. @@ -3605,32 +3635,34 @@ spec: properties: columnReferences: description: |- - : The pair of the foreign key column and primary key column. + The pair of the foreign key column and primary key column. Structure is documented below. properties: referencedColumn: description: |- - : The column in the primary key that are + The column in the primary key that are referenced by the referencingColumn type: string referencingColumn: - description: ': The column that composes the foreign - key.' + description: The column that composes the foreign + key. type: string type: object name: - description: ': Set only if the foreign key constraint - is named.' + description: |- + ) + Name of the SerDe. + The maximum length is 256 characters. type: string referencedTable: description: |- - : The table that holds the primary key + The table that holds the primary key and is referenced by this foreign key. Structure is documented below. properties: projectId: - description: ': The ID of the project containing - this table.' + description: The ID of the project containing this + table. type: string tableId: description: |- @@ -3648,8 +3680,8 @@ spec: Structure is documented below. properties: columns: - description: ': The columns that are composed of the - primary key constraint.' + description: The columns that are composed of the primary + key constraint. items: type: string type: array @@ -3894,12 +3926,30 @@ spec: properties: atProvider: properties: - allowResourceTagsOnDeletion: - description: |- - If set to true, it allows table - deletion when there are still resource tags attached. The default value is - false. - type: boolean + biglakeConfiguration: + description: Specifies the configuration of a BigLake managed + table. Structure is documented below + properties: + connectionId: + description: |- + The connection specifying the credentials to be used to + read and write to external storage, such as Cloud Storage. The connection_id can + have the form ".." or + projects//locations//connections/". + type: string + fileFormat: + description: The file format the table data is stored in. + type: string + storageUri: + description: |- + The fully qualified location prefix of the external folder where table data + is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/" + type: string + tableFormat: + description: The table format the metadata only snapshots + are stored in. + type: string + type: object clustering: description: |- Specifies column names to use for data clustering. @@ -3967,7 +4017,7 @@ spec: properties: autodetect: description: |- - - Let BigQuery try to autodetect the schema + Let BigQuery try to autodetect the schema and format of the table. type: boolean avroOptions: @@ -4291,7 +4341,7 @@ spec: description: A descriptive name for the table. type: string id: - description: an identifier for the resource with format projects/{{project}}/datasets/{{dataset}}/tables/{{name}} + description: An identifier for the resource with format projects/{{project}}/datasets/{{dataset}}/tables/{{name}} type: string labels: additionalProperties: @@ -4333,7 +4383,7 @@ spec: type: object maxStaleness: description: |- - : The maximum staleness of data that could be + The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type. @@ -4418,36 +4468,38 @@ spec: properties: columnReferences: description: |- - : The pair of the foreign key column and primary key column. + The pair of the foreign key column and primary key column. Structure is documented below. properties: referencedColumn: description: |- - : The column in the primary key that are + The column in the primary key that are referenced by the referencingColumn type: string referencingColumn: - description: ': The column that composes the foreign - key.' + description: The column that composes the foreign + key. type: string type: object name: - description: ': Set only if the foreign key constraint - is named.' + description: |- + ) + Name of the SerDe. + The maximum length is 256 characters. type: string referencedTable: description: |- - : The table that holds the primary key + The table that holds the primary key and is referenced by this foreign key. Structure is documented below. properties: datasetId: - description: ': The ID of the dataset containing - this table.' + description: The ID of the dataset containing this + table. type: string projectId: - description: ': The ID of the project containing - this table.' + description: The ID of the project containing this + table. type: string tableId: description: |- @@ -4465,8 +4517,8 @@ spec: Structure is documented below. properties: columns: - description: ': The columns that are composed of the - primary key constraint.' + description: The columns that are composed of the primary + key constraint. items: type: string type: array diff --git a/package/crds/bigtable.gcp.upbound.io_appprofiles.yaml b/package/crds/bigtable.gcp.upbound.io_appprofiles.yaml index 983cfc986..6e77ad907 100644 --- a/package/crds/bigtable.gcp.upbound.io_appprofiles.yaml +++ b/package/crds/bigtable.gcp.upbound.io_appprofiles.yaml @@ -778,6 +778,8 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + rowAffinity: + type: boolean singleClusterRouting: description: |- Use a single-cluster routing policy. @@ -852,6 +854,8 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + rowAffinity: + type: boolean singleClusterRouting: description: |- Use a single-cluster routing policy. @@ -1096,6 +1100,8 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + rowAffinity: + type: boolean singleClusterRouting: description: |- Use a single-cluster routing policy. diff --git a/package/crds/bigtable.gcp.upbound.io_instances.yaml b/package/crds/bigtable.gcp.upbound.io_instances.yaml index 10f13ce98..92cfbf992 100644 --- a/package/crds/bigtable.gcp.upbound.io_instances.yaml +++ b/package/crds/bigtable.gcp.upbound.io_instances.yaml @@ -754,10 +754,6 @@ spec: type: string type: object type: array - deletionProtection: - description: When the field is set to false, deleting the instance - is allowed. - type: boolean displayName: description: The human-readable display name of the Bigtable instance. Defaults to the instance name. @@ -865,10 +861,6 @@ spec: type: string type: object type: array - deletionProtection: - description: When the field is set to false, deleting the instance - is allowed. - type: boolean displayName: description: The human-readable display name of the Bigtable instance. Defaults to the instance name. diff --git a/package/crds/bigtable.gcp.upbound.io_tables.yaml b/package/crds/bigtable.gcp.upbound.io_tables.yaml index a3fe8b950..76b2c18d8 100644 --- a/package/crds/bigtable.gcp.upbound.io_tables.yaml +++ b/package/crds/bigtable.gcp.upbound.io_tables.yaml @@ -96,15 +96,11 @@ spec: family: description: The name of the column family. type: string + type: + description: The type of the column family. + type: string type: object type: array - deletionProtection: - description: A field to make the table protected against data - loss i.e. when set to PROTECTED, deleting the table, the column - families in the table, and the instance containing the table - would be prohibited. If not provided, deletion protection will - be set to UNPROTECTED. - type: string instanceName: description: The name of the Bigtable instance. type: string @@ -229,15 +225,11 @@ spec: family: description: The name of the column family. type: string + type: + description: The type of the column family. + type: string type: object type: array - deletionProtection: - description: A field to make the table protected against data - loss i.e. when set to PROTECTED, deleting the table, the column - families in the table, and the instance containing the table - would be prohibited. If not provided, deletion protection will - be set to UNPROTECTED. - type: string project: description: |- The ID of the project in which the resource belongs. If it @@ -444,6 +436,9 @@ spec: family: description: The name of the column family. type: string + type: + description: The type of the column family. + type: string type: object type: array deletionProtection: diff --git a/package/crds/certificatemanager.gcp.upbound.io_certificates.yaml b/package/crds/certificatemanager.gcp.upbound.io_certificates.yaml index f3d946fb5..c87b4ee73 100644 --- a/package/crds/certificatemanager.gcp.upbound.io_certificates.yaml +++ b/package/crds/certificatemanager.gcp.upbound.io_certificates.yaml @@ -1981,6 +1981,12 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + sanDnsnames: + description: The list of Subject Alternative Names of dnsName + type defined in the certificate (see RFC 5280 4.2.1.6) + items: + type: string + type: array scope: description: |- The scope of the certificate. diff --git a/package/crds/cloudbuild.gcp.upbound.io_workerpools.yaml b/package/crds/cloudbuild.gcp.upbound.io_workerpools.yaml index 3f7825d4d..3de401ec1 100644 --- a/package/crds/cloudbuild.gcp.upbound.io_workerpools.yaml +++ b/package/crds/cloudbuild.gcp.upbound.io_workerpools.yaml @@ -855,6 +855,13 @@ spec: type: object type: object type: object + privateServiceConnect: + properties: + networkAttachment: + type: string + routeAllTraffic: + type: boolean + type: object project: description: The project for the resource type: string @@ -864,14 +871,12 @@ spec: properties: diskSizeGb: description: Size of the disk attached to the worker, in GB. - See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). - Specify a value of up to 1000. If 0 is specified, Cloud - Build will use a standard disk size. + See diskSizeGb. Specify a value of up to 1000. If 0 is specified, + Cloud Build will use a standard disk size. type: number machineType: description: Machine type of a worker, such as n1-standard-1. - See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). - If left blank, Cloud Build will use n1-standard-1. + See machineType. If left blank, Cloud Build will use n1-standard-1. type: string noExternalIp: description: If true, workers are created without any public @@ -1002,6 +1007,13 @@ spec: type: object type: object type: object + privateServiceConnect: + properties: + networkAttachment: + type: string + routeAllTraffic: + type: boolean + type: object project: description: The project for the resource type: string @@ -1011,14 +1023,12 @@ spec: properties: diskSizeGb: description: Size of the disk attached to the worker, in GB. - See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). - Specify a value of up to 1000. If 0 is specified, Cloud - Build will use a standard disk size. + See diskSizeGb. Specify a value of up to 1000. If 0 is specified, + Cloud Build will use a standard disk size. type: number machineType: description: Machine type of a worker, such as n1-standard-1. - See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). - If left blank, Cloud Build will use n1-standard-1. + See machineType. If left blank, Cloud Build will use n1-standard-1. type: string noExternalIp: description: If true, workers are created without any public @@ -1249,6 +1259,13 @@ spec: peered VPC. If unspecified, a value of /24 will be used. type: string type: object + privateServiceConnect: + properties: + networkAttachment: + type: string + routeAllTraffic: + type: boolean + type: object project: description: The project for the resource type: string @@ -1269,14 +1286,12 @@ spec: properties: diskSizeGb: description: Size of the disk attached to the worker, in GB. - See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). - Specify a value of up to 1000. If 0 is specified, Cloud - Build will use a standard disk size. + See diskSizeGb. Specify a value of up to 1000. If 0 is specified, + Cloud Build will use a standard disk size. type: number machineType: description: Machine type of a worker, such as n1-standard-1. - See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). - If left blank, Cloud Build will use n1-standard-1. + See machineType. If left blank, Cloud Build will use n1-standard-1. type: string noExternalIp: description: If true, workers are created without any public diff --git a/package/crds/cloudfunctions2.gcp.upbound.io_functions.yaml b/package/crds/cloudfunctions2.gcp.upbound.io_functions.yaml index d7f3cda5e..16025cf82 100644 --- a/package/crds/cloudfunctions2.gcp.upbound.io_functions.yaml +++ b/package/crds/cloudfunctions2.gcp.upbound.io_functions.yaml @@ -3400,7 +3400,7 @@ spec: NOT match the revision regex. type: boolean projectId: - description: Project identifier (preferrably project + description: Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that @@ -4033,7 +4033,7 @@ spec: description: Name of the environment variable. type: string projectId: - description: Project identifier (preferrably project + description: Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret @@ -4142,7 +4142,7 @@ spec: any other secrets. Recommended mount path: /etc/secrets' type: string projectId: - description: Project identifier (preferrably project + description: Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret @@ -4588,7 +4588,7 @@ spec: NOT match the revision regex. type: boolean projectId: - description: Project identifier (preferrably project + description: Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that @@ -5218,7 +5218,7 @@ spec: description: Name of the environment variable. type: string projectId: - description: Project identifier (preferrably project + description: Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret @@ -5327,7 +5327,7 @@ spec: any other secrets. Recommended mount path: /etc/secrets' type: string projectId: - description: Project identifier (preferrably project + description: Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret @@ -5790,7 +5790,7 @@ spec: NOT match the revision regex. type: boolean projectId: - description: Project identifier (preferrably project + description: Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that @@ -5991,7 +5991,7 @@ spec: description: Name of the environment variable. type: string projectId: - description: Project identifier (preferrably project + description: Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret @@ -6024,7 +6024,7 @@ spec: any other secrets. Recommended mount path: /etc/secrets' type: string projectId: - description: Project identifier (preferrably project + description: Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret diff --git a/package/crds/cloudplatform.gcp.upbound.io_folders.yaml b/package/crds/cloudplatform.gcp.upbound.io_folders.yaml index 4824d4c89..4f455149b 100644 --- a/package/crds/cloudplatform.gcp.upbound.io_folders.yaml +++ b/package/crds/cloudplatform.gcp.upbound.io_folders.yaml @@ -159,6 +159,19 @@ spec: type: string type: object type: object + tags: + additionalProperties: + type: string + description: A map of resource manager tags. Resource manager + tag keys and values have the same definition as resource manager + tags. Keys must be in the format tagKeys/{tag_key_id}, and values + are in the format tagValues/456. The field is ignored when empty. + The field is immutable and causes resource replacement when mutated. + This field is only set at create time and modifying this field + after creation will trigger recreation. To apply tags to an + existing resource, see the google_tags_tag_value resource. + type: object + x-kubernetes-map-type: granular type: object initProvider: description: |- @@ -259,6 +272,19 @@ spec: type: string type: object type: object + tags: + additionalProperties: + type: string + description: A map of resource manager tags. Resource manager + tag keys and values have the same definition as resource manager + tags. Keys must be in the format tagKeys/{tag_key_id}, and values + are in the format tagValues/456. The field is ignored when empty. + The field is immutable and causes resource replacement when mutated. + This field is only set at create time and modifying this field + after creation will trigger recreation. To apply tags to an + existing resource, see the google_tags_tag_value resource. + type: object + x-kubernetes-map-type: granular type: object managementPolicies: default: @@ -442,6 +468,8 @@ spec: Timestamp when the Folder was created. Assigned by the server. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". type: string + deletionProtection: + type: boolean displayName: description: |- The folder’s display name. @@ -464,6 +492,19 @@ spec: The resource name of the parent Folder or Organization. Must be of the form folders/{folder_id} or organizations/{org_id}. type: string + tags: + additionalProperties: + type: string + description: A map of resource manager tags. Resource manager + tag keys and values have the same definition as resource manager + tags. Keys must be in the format tagKeys/{tag_key_id}, and values + are in the format tagValues/456. The field is ignored when empty. + The field is immutable and causes resource replacement when mutated. + This field is only set at create time and modifying this field + after creation will trigger recreation. To apply tags to an + existing resource, see the google_tags_tag_value resource. + type: object + x-kubernetes-map-type: granular type: object conditions: description: Conditions of the resource. diff --git a/package/crds/cloudplatform.gcp.upbound.io_projects.yaml b/package/crds/cloudplatform.gcp.upbound.io_projects.yaml index a48db0ef0..0dd3af36f 100644 --- a/package/crds/cloudplatform.gcp.upbound.io_projects.yaml +++ b/package/crds/cloudplatform.gcp.upbound.io_projects.yaml @@ -91,7 +91,7 @@ spec: deletionPolicy: description: |- The deletion policy for the Project. Setting ABANDON allows the resource - to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is DELETE. + to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is PREVENT. type: string folderId: description: |- @@ -203,13 +203,20 @@ spec: description: The project ID. Changing this forces a new project to be created. type: string - skipDelete: - description: |- - skip_delete is deprecated and will be - removed in 6.0.0. Please use deletion_policy instead. A skip_delete value of false - can be changed to a deletion_policy value of DELETE and a skip_delete value of true - to a deletion_policy value of ABANDON for equivalent behavior. - type: boolean + tags: + additionalProperties: + type: string + description: A map of resource manager tags. Resource manager + tag keys and values have the same definition as resource manager + tags. Keys must be in the format tagKeys/{tag_key_id}, and values + are in the format tagValues/456. The field is ignored when empty. + The field is immutable and causes resource replacement when + mutated. This field is only set at create time and modifying + this field after creation will trigger recreation. To apply + tags to an existing resource, see the google_tags_tag_value + resource. + type: object + x-kubernetes-map-type: granular type: object initProvider: description: |- @@ -242,7 +249,7 @@ spec: deletionPolicy: description: |- The deletion policy for the Project. Setting ABANDON allows the resource - to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is DELETE. + to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is PREVENT. type: string folderId: description: |- @@ -354,13 +361,20 @@ spec: description: The project ID. Changing this forces a new project to be created. type: string - skipDelete: - description: |- - skip_delete is deprecated and will be - removed in 6.0.0. Please use deletion_policy instead. A skip_delete value of false - can be changed to a deletion_policy value of DELETE and a skip_delete value of true - to a deletion_policy value of ABANDON for equivalent behavior. - type: boolean + tags: + additionalProperties: + type: string + description: A map of resource manager tags. Resource manager + tag keys and values have the same definition as resource manager + tags. Keys must be in the format tagKeys/{tag_key_id}, and values + are in the format tagValues/456. The field is ignored when empty. + The field is immutable and causes resource replacement when + mutated. This field is only set at create time and modifying + this field after creation will trigger recreation. To apply + tags to an existing resource, see the google_tags_tag_value + resource. + type: object + x-kubernetes-map-type: granular type: object managementPolicies: default: @@ -561,7 +575,7 @@ spec: deletionPolicy: description: |- The deletion policy for the Project. Setting ABANDON allows the resource - to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is DELETE. + to be abandoned rather than deleted, i.e. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is PREVENT. type: string effectiveLabels: additionalProperties: @@ -608,13 +622,20 @@ spec: description: The project ID. Changing this forces a new project to be created. type: string - skipDelete: - description: |- - skip_delete is deprecated and will be - removed in 6.0.0. Please use deletion_policy instead. A skip_delete value of false - can be changed to a deletion_policy value of DELETE and a skip_delete value of true - to a deletion_policy value of ABANDON for equivalent behavior. - type: boolean + tags: + additionalProperties: + type: string + description: A map of resource manager tags. Resource manager + tag keys and values have the same definition as resource manager + tags. Keys must be in the format tagKeys/{tag_key_id}, and values + are in the format tagValues/456. The field is ignored when empty. + The field is immutable and causes resource replacement when + mutated. This field is only set at create time and modifying + this field after creation will trigger recreation. To apply + tags to an existing resource, see the google_tags_tag_value + resource. + type: object + x-kubernetes-map-type: granular terraformLabels: additionalProperties: type: string diff --git a/package/crds/cloudrun.gcp.upbound.io_services.yaml b/package/crds/cloudrun.gcp.upbound.io_services.yaml index fc4e3b917..bb4814922 100644 --- a/package/crds/cloudrun.gcp.upbound.io_services.yaml +++ b/package/crds/cloudrun.gcp.upbound.io_services.yaml @@ -3543,9 +3543,74 @@ spec: Structure is documented below. items: properties: + csi: + description: |- + A filesystem specified by the Container Storage Interface (CSI). + Structure is documented below. + properties: + driver: + description: 'Unique name representing the type + of file system to be created. Cloud Run supports + the following values:' + type: string + readOnly: + description: If true, mount the NFS volume as + read only in all mounts. Defaults to false. + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: 'Driver-specific attributes. The + following options are supported for available + drivers:' + type: object + x-kubernetes-map-type: granular + type: object + emptyDir: + description: |- + Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). + Structure is documented below. + properties: + medium: + description: The medium on which the data is + stored. The default is "" which means to use + the node's default medium. Must be an empty + string (default) or Memory. + type: string + sizeLimit: + description: 'Limit on the storage usable by + this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum + usage on memory medium EmptyDir would be the + minimum value between the SizeLimit specified + here and the sum of memory limits of all containers + in a pod. This field''s values are of the + ''Quantity'' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. + The default is nil which means that the limit + is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.' + type: string + type: object name: description: Volume's name. type: string + nfs: + description: |- + A filesystem backed by a Network File System share. This filesystem requires the + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" + Structure is documented below. + properties: + path: + description: Path exported by the NFS server + type: string + readOnly: + description: If true, mount the NFS volume as + read only in all mounts. Defaults to false. + type: boolean + server: + description: IP address or hostname of the NFS + server + type: string + type: object secret: description: |- The secret's value will be presented as the content of a file whose @@ -4359,9 +4424,74 @@ spec: Structure is documented below. items: properties: + csi: + description: |- + A filesystem specified by the Container Storage Interface (CSI). + Structure is documented below. + properties: + driver: + description: 'Unique name representing the type + of file system to be created. Cloud Run supports + the following values:' + type: string + readOnly: + description: If true, mount the NFS volume as + read only in all mounts. Defaults to false. + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: 'Driver-specific attributes. The + following options are supported for available + drivers:' + type: object + x-kubernetes-map-type: granular + type: object + emptyDir: + description: |- + Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). + Structure is documented below. + properties: + medium: + description: The medium on which the data is + stored. The default is "" which means to use + the node's default medium. Must be an empty + string (default) or Memory. + type: string + sizeLimit: + description: 'Limit on the storage usable by + this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum + usage on memory medium EmptyDir would be the + minimum value between the SizeLimit specified + here and the sum of memory limits of all containers + in a pod. This field''s values are of the + ''Quantity'' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. + The default is nil which means that the limit + is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.' + type: string + type: object name: description: Volume's name. type: string + nfs: + description: |- + A filesystem backed by a Network File System share. This filesystem requires the + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" + Structure is documented below. + properties: + path: + description: Path exported by the NFS server + type: string + readOnly: + description: If true, mount the NFS volume as + read only in all mounts. Defaults to false. + type: boolean + server: + description: IP address or hostname of the NFS + server + type: string + type: object secret: description: |- The secret's value will be presented as the content of a file whose @@ -5363,9 +5493,74 @@ spec: Structure is documented below. items: properties: + csi: + description: |- + A filesystem specified by the Container Storage Interface (CSI). + Structure is documented below. + properties: + driver: + description: 'Unique name representing the type + of file system to be created. Cloud Run supports + the following values:' + type: string + readOnly: + description: If true, mount the NFS volume as + read only in all mounts. Defaults to false. + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: 'Driver-specific attributes. The + following options are supported for available + drivers:' + type: object + x-kubernetes-map-type: granular + type: object + emptyDir: + description: |- + Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). + Structure is documented below. + properties: + medium: + description: The medium on which the data is + stored. The default is "" which means to use + the node's default medium. Must be an empty + string (default) or Memory. + type: string + sizeLimit: + description: 'Limit on the storage usable by + this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum + usage on memory medium EmptyDir would be the + minimum value between the SizeLimit specified + here and the sum of memory limits of all containers + in a pod. This field''s values are of the + ''Quantity'' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. + The default is nil which means that the limit + is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.' + type: string + type: object name: description: Volume's name. type: string + nfs: + description: |- + A filesystem backed by a Network File System share. This filesystem requires the + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" + Structure is documented below. + properties: + path: + description: Path exported by the NFS server + type: string + readOnly: + description: If true, mount the NFS volume as + read only in all mounts. Defaults to false. + type: boolean + server: + description: IP address or hostname of the NFS + server + type: string + type: object secret: description: |- The secret's value will be presented as the content of a file whose diff --git a/package/crds/cloudrun.gcp.upbound.io_v2jobs.yaml b/package/crds/cloudrun.gcp.upbound.io_v2jobs.yaml index 1018aa2e3..76ad52e39 100644 --- a/package/crds/cloudrun.gcp.upbound.io_v2jobs.yaml +++ b/package/crds/cloudrun.gcp.upbound.io_v2jobs.yaml @@ -2783,9 +2783,66 @@ spec: type: object type: object type: object + emptyDir: + description: |- + Ephemeral storage used as a shared volume. + Structure is documented below. + properties: + medium: + description: |- + The different types of medium supported for EmptyDir. + Default value is MEMORY. + Possible values are: MEMORY. + type: string + sizeLimit: + description: 'Limit on the storage usable by + this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum + usage on memory medium EmptyDir would be the + minimum value between the SizeLimit specified + here and the sum of memory limits of all containers + in a pod. This field''s values are of the + ''Quantity'' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. + The default is nil which means that the limit + is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.' + type: string + type: object + gcs: + description: |- + Cloud Storage bucket mounted as a volume using GCSFuse. + Structure is documented below. + properties: + bucket: + description: Name of the cloud storage bucket + to back the volume. The resource service account + must have permission to access the bucket. + type: string + readOnly: + description: If true, mount this volume as read-only + in all mounts. + type: boolean + type: object name: description: Volume's name. type: string + nfs: + description: |- + NFS share mounted as a volume. + Structure is documented below. + properties: + path: + description: Path that is exported by the NFS + server. + type: string + readOnly: + description: If true, mount this volume as read-only + in all mounts. + type: boolean + server: + description: Hostname or IP address of the NFS + server. + type: string + type: object secret: description: |- Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret @@ -3414,9 +3471,66 @@ spec: type: object type: object type: object + emptyDir: + description: |- + Ephemeral storage used as a shared volume. + Structure is documented below. + properties: + medium: + description: |- + The different types of medium supported for EmptyDir. + Default value is MEMORY. + Possible values are: MEMORY. + type: string + sizeLimit: + description: 'Limit on the storage usable by + this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum + usage on memory medium EmptyDir would be the + minimum value between the SizeLimit specified + here and the sum of memory limits of all containers + in a pod. This field''s values are of the + ''Quantity'' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. + The default is nil which means that the limit + is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.' + type: string + type: object + gcs: + description: |- + Cloud Storage bucket mounted as a volume using GCSFuse. + Structure is documented below. + properties: + bucket: + description: Name of the cloud storage bucket + to back the volume. The resource service account + must have permission to access the bucket. + type: string + readOnly: + description: If true, mount this volume as read-only + in all mounts. + type: boolean + type: object name: description: Volume's name. type: string + nfs: + description: |- + NFS share mounted as a volume. + Structure is documented below. + properties: + path: + description: Path that is exported by the NFS + server. + type: string + readOnly: + description: If true, mount this volume as read-only + in all mounts. + type: boolean + server: + description: Hostname or IP address of the NFS + server. + type: string + type: object secret: description: |- Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret @@ -3852,6 +3966,11 @@ spec: deleteTime: description: The deletion time. type: string + deletionProtection: + description: |- + Defaults to true. + When the field is set to false, deleting the job is allowed. + type: boolean effectiveAnnotations: additionalProperties: type: string @@ -3872,7 +3991,7 @@ spec: type: number expireTime: description: For a deleted resource, the time after which it will - be permamently deleted. + be permanently deleted. type: string generation: description: A number that monotonically increases every time @@ -4180,9 +4299,66 @@ spec: type: string type: array type: object + emptyDir: + description: |- + Ephemeral storage used as a shared volume. + Structure is documented below. + properties: + medium: + description: |- + The different types of medium supported for EmptyDir. + Default value is MEMORY. + Possible values are: MEMORY. + type: string + sizeLimit: + description: 'Limit on the storage usable by + this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum + usage on memory medium EmptyDir would be the + minimum value between the SizeLimit specified + here and the sum of memory limits of all containers + in a pod. This field''s values are of the + ''Quantity'' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. + The default is nil which means that the limit + is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.' + type: string + type: object + gcs: + description: |- + Cloud Storage bucket mounted as a volume using GCSFuse. + Structure is documented below. + properties: + bucket: + description: Name of the cloud storage bucket + to back the volume. The resource service account + must have permission to access the bucket. + type: string + readOnly: + description: If true, mount this volume as read-only + in all mounts. + type: boolean + type: object name: description: Volume's name. type: string + nfs: + description: |- + NFS share mounted as a volume. + Structure is documented below. + properties: + path: + description: Path that is exported by the NFS + server. + type: string + readOnly: + description: If true, mount this volume as read-only + in all mounts. + type: boolean + server: + description: Hostname or IP address of the NFS + server. + type: string + type: object secret: description: |- Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret diff --git a/package/crds/cloudrun.gcp.upbound.io_v2services.yaml b/package/crds/cloudrun.gcp.upbound.io_v2services.yaml index 6b7966d3a..0ffbf3a43 100644 --- a/package/crds/cloudrun.gcp.upbound.io_v2services.yaml +++ b/package/crds/cloudrun.gcp.upbound.io_v2services.yaml @@ -3378,6 +3378,11 @@ spec: Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. Possible values are: INGRESS_TRAFFIC_ALL, INGRESS_TRAFFIC_INTERNAL_ONLY, INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER. type: string + invokerIamDisabled: + description: Disables IAM permission check for run.routes.invoke + for callers of this service. This feature is available by invitation + only. For more information, visit https://cloud.google.com/run/docs/securing/managing-access#invoker_check. + type: boolean labels: additionalProperties: type: string @@ -3403,6 +3408,17 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + scaling: + description: |- + Scaling settings for this Revision. + Structure is documented below. + properties: + minInstanceCount: + description: Minimum number of serving instances that this + resource should have. Defaults to 0. Must not be greater + than maximum instance count. + type: number + type: object template: description: |- The template used to create revisions for this Service. @@ -3693,13 +3709,13 @@ spec: limits: additionalProperties: type: string - description: 'Only memory and CPU are supported. - Use key cpu for CPU limit and memory for memory - limit. Note: The only supported values for CPU - are ''1'', ''2'', ''4'', and ''8''. Setting 4 - CPU requires at least 2Gi of memory. The values - of the map is string form of the ''quantity'' - k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go' + description: 'Only memory, CPU, and nvidia.com/gpu + are supported. Use key cpu for CPU limit, memory + for memory limit, nvidia.com/gpu for gpu limit. + Note: The only supported values for CPU are ''1'', + ''2'', ''4'', and ''8''. Setting 4 CPU requires + at least 2Gi of memory. The values of the map + is string form of the ''quantity'' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go' type: object x-kubernetes-map-type: granular startupCpuBoost: @@ -3863,12 +3879,14 @@ spec: Structure is documented below. properties: maxInstanceCount: - description: Maximum number of serving instances that - this resource should have. + description: |- + Maximum number of serving instances that this resource should have. Must not be less than minimum instance count. If absent, Cloud Run will calculate + a default value based on the project's available container instances quota in the region and specified instance size. type: number minInstanceCount: description: Minimum number of serving instances that - this resource should have. + this resource should have. Defaults to 0. Must not be + greater than maximum instance count. type: number type: object serviceAccount: @@ -3988,9 +4006,32 @@ spec: type: object type: object type: object + emptyDir: + description: |- + Ephemeral storage used as a shared volume. + Structure is documented below. + properties: + medium: + description: |- + The different types of medium supported for EmptyDir. + Default value is MEMORY. + Possible values are: MEMORY. + type: string + sizeLimit: + description: 'Limit on the storage usable by this + EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory + limits of all containers in a pod. This field''s + values are of the ''Quantity'' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. + The default is nil which means that the limit + is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.' + type: string + type: object gcs: description: |- - Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. Structure is documented below. properties: bucket: @@ -4350,6 +4391,11 @@ spec: Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. Possible values are: INGRESS_TRAFFIC_ALL, INGRESS_TRAFFIC_INTERNAL_ONLY, INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER. type: string + invokerIamDisabled: + description: Disables IAM permission check for run.routes.invoke + for callers of this service. This feature is available by invitation + only. For more information, visit https://cloud.google.com/run/docs/securing/managing-access#invoker_check. + type: boolean labels: additionalProperties: type: string @@ -4372,6 +4418,17 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + scaling: + description: |- + Scaling settings for this Revision. + Structure is documented below. + properties: + minInstanceCount: + description: Minimum number of serving instances that this + resource should have. Defaults to 0. Must not be greater + than maximum instance count. + type: number + type: object template: description: |- The template used to create revisions for this Service. @@ -4662,13 +4719,13 @@ spec: limits: additionalProperties: type: string - description: 'Only memory and CPU are supported. - Use key cpu for CPU limit and memory for memory - limit. Note: The only supported values for CPU - are ''1'', ''2'', ''4'', and ''8''. Setting 4 - CPU requires at least 2Gi of memory. The values - of the map is string form of the ''quantity'' - k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go' + description: 'Only memory, CPU, and nvidia.com/gpu + are supported. Use key cpu for CPU limit, memory + for memory limit, nvidia.com/gpu for gpu limit. + Note: The only supported values for CPU are ''1'', + ''2'', ''4'', and ''8''. Setting 4 CPU requires + at least 2Gi of memory. The values of the map + is string form of the ''quantity'' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go' type: object x-kubernetes-map-type: granular startupCpuBoost: @@ -4832,12 +4889,14 @@ spec: Structure is documented below. properties: maxInstanceCount: - description: Maximum number of serving instances that - this resource should have. + description: |- + Maximum number of serving instances that this resource should have. Must not be less than minimum instance count. If absent, Cloud Run will calculate + a default value based on the project's available container instances quota in the region and specified instance size. type: number minInstanceCount: description: Minimum number of serving instances that - this resource should have. + this resource should have. Defaults to 0. Must not be + greater than maximum instance count. type: number type: object serviceAccount: @@ -4957,9 +5016,32 @@ spec: type: object type: object type: object + emptyDir: + description: |- + Ephemeral storage used as a shared volume. + Structure is documented below. + properties: + medium: + description: |- + The different types of medium supported for EmptyDir. + Default value is MEMORY. + Possible values are: MEMORY. + type: string + sizeLimit: + description: 'Limit on the storage usable by this + EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory + limits of all containers in a pod. This field''s + values are of the ''Quantity'' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. + The default is nil which means that the limit + is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.' + type: string + type: object gcs: description: |- - Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. Structure is documented below. properties: bucket: @@ -5530,6 +5612,11 @@ spec: deleteTime: description: The deletion time. type: string + deletionProtection: + description: |- + Defaults to true. + When the field is set to false, deleting the service is allowed. + type: boolean description: description: User-provided description of the Service. This field currently has a 512-character limit. @@ -5551,7 +5638,7 @@ spec: type: string expireTime: description: For a deleted resource, the time after which it will - be permamently deleted. + be permanently deleted. type: string generation: description: A number that monotonically increases every time @@ -5567,6 +5654,11 @@ spec: Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. Possible values are: INGRESS_TRAFFIC_ALL, INGRESS_TRAFFIC_INTERNAL_ONLY, INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER. type: string + invokerIamDisabled: + description: Disables IAM permission check for run.routes.invoke + for callers of this service. This feature is available by invitation + only. For more information, visit https://cloud.google.com/run/docs/securing/managing-access#invoker_check. + type: boolean labels: additionalProperties: type: string @@ -5619,6 +5711,17 @@ spec: If reconciliation succeeded, the following fields will match: traffic and trafficStatuses, observedGeneration and generation, latestReadyRevision and latestCreatedRevision. If reconciliation failed, trafficStatuses, observedGeneration, and latestReadyRevision will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in terminalCondition and conditions. type: boolean + scaling: + description: |- + Scaling settings for this Revision. + Structure is documented below. + properties: + minInstanceCount: + description: Minimum number of serving instances that this + resource should have. Defaults to 0. Must not be greater + than maximum instance count. + type: number + type: object template: description: |- The template used to create revisions for this Service. @@ -5832,13 +5935,13 @@ spec: limits: additionalProperties: type: string - description: 'Only memory and CPU are supported. - Use key cpu for CPU limit and memory for memory - limit. Note: The only supported values for CPU - are ''1'', ''2'', ''4'', and ''8''. Setting 4 - CPU requires at least 2Gi of memory. The values - of the map is string form of the ''quantity'' - k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go' + description: 'Only memory, CPU, and nvidia.com/gpu + are supported. Use key cpu for CPU limit, memory + for memory limit, nvidia.com/gpu for gpu limit. + Note: The only supported values for CPU are ''1'', + ''2'', ''4'', and ''8''. Setting 4 CPU requires + at least 2Gi of memory. The values of the map + is string form of the ''quantity'' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go' type: object x-kubernetes-map-type: granular startupCpuBoost: @@ -6002,12 +6105,14 @@ spec: Structure is documented below. properties: maxInstanceCount: - description: Maximum number of serving instances that - this resource should have. + description: |- + Maximum number of serving instances that this resource should have. Must not be less than minimum instance count. If absent, Cloud Run will calculate + a default value based on the project's available container instances quota in the region and specified instance size. type: number minInstanceCount: description: Minimum number of serving instances that - this resource should have. + this resource should have. Defaults to 0. Must not be + greater than maximum instance count. type: number type: object serviceAccount: @@ -6048,9 +6153,32 @@ spec: type: array x-kubernetes-list-type: set type: object + emptyDir: + description: |- + Ephemeral storage used as a shared volume. + Structure is documented below. + properties: + medium: + description: |- + The different types of medium supported for EmptyDir. + Default value is MEMORY. + Possible values are: MEMORY. + type: string + sizeLimit: + description: 'Limit on the storage usable by this + EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory + limits of all containers in a pod. This field''s + values are of the ''Quantity'' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. + The default is nil which means that the limit + is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.' + type: string + type: object gcs: description: |- - Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. Structure is documented below. properties: bucket: @@ -6290,6 +6418,11 @@ spec: uri: description: The main URI in which this Service is serving traffic. type: string + urls: + description: All URLs serving traffic for this Service. + items: + type: string + type: array type: object conditions: description: Conditions of the resource. diff --git a/package/crds/cloudtasks.gcp.upbound.io_queues.yaml b/package/crds/cloudtasks.gcp.upbound.io_queues.yaml index 76c822215..4003f1044 100644 --- a/package/crds/cloudtasks.gcp.upbound.io_queues.yaml +++ b/package/crds/cloudtasks.gcp.upbound.io_queues.yaml @@ -903,6 +903,293 @@ spec: By default, the task is sent to the version which is the default version when the task is attempted. type: string type: object + httpTarget: + description: |- + Modifies HTTP target for HTTP tasks. + Structure is documented below. + properties: + headerOverrides: + description: |- + HTTP target headers. + This map contains the header field names and values. + Headers will be set when running the CreateTask and/or BufferTask. + These headers represent a subset of the headers that will be configured for the task's HTTP request. + Some HTTP request headers will be ignored or replaced. + Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + Structure is documented below. + items: + properties: + header: + description: |- + Header embodying a key and a value. + Structure is documented below. + properties: + key: + description: The Key of the header. + type: string + value: + description: The Value of the header. + type: string + type: object + type: object + type: array + httpMethod: + description: |- + The HTTP method to use for the request. + When specified, it overrides HttpRequest for the task. + Note that if the value is set to GET the body of the task will be ignored at execution time. + Possible values are: HTTP_METHOD_UNSPECIFIED, POST, GET, HEAD, PUT, DELETE, PATCH, OPTIONS. + type: string + oauthToken: + description: |- + If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + Structure is documented below. + properties: + scope: + description: |- + OAuth scope to be used for generating OAuth access token. + If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + type: string + serviceAccountEmail: + description: |- + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + type: string + serviceAccountEmailRef: + description: Reference to a ServiceAccount in cloudplatform + to populate serviceAccountEmail. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountEmailSelector: + description: Selector for a ServiceAccount in cloudplatform + to populate serviceAccountEmail. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + oidcToken: + description: |- + If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + Structure is documented below. + properties: + audience: + description: Audience to be used when generating OIDC + token. If not specified, the URI specified in target + will be used. + type: string + serviceAccountEmail: + description: |- + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + type: string + serviceAccountEmailRef: + description: Reference to a ServiceAccount in cloudplatform + to populate serviceAccountEmail. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountEmailSelector: + description: Selector for a ServiceAccount in cloudplatform + to populate serviceAccountEmail. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + uriOverride: + description: |- + URI override. + When specified, overrides the execution URI for all the tasks in the queue. + Structure is documented below. + properties: + host: + description: |- + Host override. + When specified, replaces the host part of the task URL. + For example, if the task URL is "https://www.google.com", and host value + is set to "example.net", the overridden URI will be changed to "https://example.net". + Host value cannot be an empty string (INVALID_ARGUMENT). + type: string + pathOverride: + description: |- + URI path. + When specified, replaces the existing path of the task URL. + Setting the path value to an empty string clears the URI path segment. + Structure is documented below. + properties: + path: + description: The URI path (e.g., /users/1234). Default + is an empty string. + type: string + type: object + port: + description: |- + Port override. + When specified, replaces the port part of the task URI. + For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + Note that the port value must be a positive integer. + Setting the port to 0 (Zero) clears the URI port. + type: string + queryOverride: + description: |- + URI query. + When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + Structure is documented below. + properties: + queryParams: + description: The query parameters (e.g., qparam1=123&qparam2=456). + Default is an empty string. + type: string + type: object + scheme: + description: |- + Scheme override. + When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + Possible values are: HTTP, HTTPS. + type: string + uriOverrideEnforceMode: + description: |- + URI Override Enforce Mode + When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + Possible values are: ALWAYS, IF_NOT_EXISTS. + type: string + type: object + type: object location: description: The location of the queue type: string @@ -1099,6 +1386,293 @@ spec: By default, the task is sent to the version which is the default version when the task is attempted. type: string type: object + httpTarget: + description: |- + Modifies HTTP target for HTTP tasks. + Structure is documented below. + properties: + headerOverrides: + description: |- + HTTP target headers. + This map contains the header field names and values. + Headers will be set when running the CreateTask and/or BufferTask. + These headers represent a subset of the headers that will be configured for the task's HTTP request. + Some HTTP request headers will be ignored or replaced. + Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + Structure is documented below. + items: + properties: + header: + description: |- + Header embodying a key and a value. + Structure is documented below. + properties: + key: + description: The Key of the header. + type: string + value: + description: The Value of the header. + type: string + type: object + type: object + type: array + httpMethod: + description: |- + The HTTP method to use for the request. + When specified, it overrides HttpRequest for the task. + Note that if the value is set to GET the body of the task will be ignored at execution time. + Possible values are: HTTP_METHOD_UNSPECIFIED, POST, GET, HEAD, PUT, DELETE, PATCH, OPTIONS. + type: string + oauthToken: + description: |- + If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + Structure is documented below. + properties: + scope: + description: |- + OAuth scope to be used for generating OAuth access token. + If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + type: string + serviceAccountEmail: + description: |- + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + type: string + serviceAccountEmailRef: + description: Reference to a ServiceAccount in cloudplatform + to populate serviceAccountEmail. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountEmailSelector: + description: Selector for a ServiceAccount in cloudplatform + to populate serviceAccountEmail. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + oidcToken: + description: |- + If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + Structure is documented below. + properties: + audience: + description: Audience to be used when generating OIDC + token. If not specified, the URI specified in target + will be used. + type: string + serviceAccountEmail: + description: |- + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + type: string + serviceAccountEmailRef: + description: Reference to a ServiceAccount in cloudplatform + to populate serviceAccountEmail. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountEmailSelector: + description: Selector for a ServiceAccount in cloudplatform + to populate serviceAccountEmail. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + uriOverride: + description: |- + URI override. + When specified, overrides the execution URI for all the tasks in the queue. + Structure is documented below. + properties: + host: + description: |- + Host override. + When specified, replaces the host part of the task URL. + For example, if the task URL is "https://www.google.com", and host value + is set to "example.net", the overridden URI will be changed to "https://example.net". + Host value cannot be an empty string (INVALID_ARGUMENT). + type: string + pathOverride: + description: |- + URI path. + When specified, replaces the existing path of the task URL. + Setting the path value to an empty string clears the URI path segment. + Structure is documented below. + properties: + path: + description: The URI path (e.g., /users/1234). Default + is an empty string. + type: string + type: object + port: + description: |- + Port override. + When specified, replaces the port part of the task URI. + For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + Note that the port value must be a positive integer. + Setting the port to 0 (Zero) clears the URI port. + type: string + queryOverride: + description: |- + URI query. + When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + Structure is documented below. + properties: + queryParams: + description: The query parameters (e.g., qparam1=123&qparam2=456). + Default is an empty string. + type: string + type: object + scheme: + description: |- + Scheme override. + When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + Possible values are: HTTP, HTTPS. + type: string + uriOverrideEnforceMode: + description: |- + URI Override Enforce Mode + When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + Possible values are: ALWAYS, IF_NOT_EXISTS. + type: string + type: object + type: object project: description: |- The ID of the project in which the resource belongs. @@ -1454,6 +2028,141 @@ spec: By default, the task is sent to the version which is the default version when the task is attempted. type: string type: object + httpTarget: + description: |- + Modifies HTTP target for HTTP tasks. + Structure is documented below. + properties: + headerOverrides: + description: |- + HTTP target headers. + This map contains the header field names and values. + Headers will be set when running the CreateTask and/or BufferTask. + These headers represent a subset of the headers that will be configured for the task's HTTP request. + Some HTTP request headers will be ignored or replaced. + Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + Structure is documented below. + items: + properties: + header: + description: |- + Header embodying a key and a value. + Structure is documented below. + properties: + key: + description: The Key of the header. + type: string + value: + description: The Value of the header. + type: string + type: object + type: object + type: array + httpMethod: + description: |- + The HTTP method to use for the request. + When specified, it overrides HttpRequest for the task. + Note that if the value is set to GET the body of the task will be ignored at execution time. + Possible values are: HTTP_METHOD_UNSPECIFIED, POST, GET, HEAD, PUT, DELETE, PATCH, OPTIONS. + type: string + oauthToken: + description: |- + If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + Structure is documented below. + properties: + scope: + description: |- + OAuth scope to be used for generating OAuth access token. + If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + type: string + serviceAccountEmail: + description: |- + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + type: string + type: object + oidcToken: + description: |- + If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + Structure is documented below. + properties: + audience: + description: Audience to be used when generating OIDC + token. If not specified, the URI specified in target + will be used. + type: string + serviceAccountEmail: + description: |- + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + type: string + type: object + uriOverride: + description: |- + URI override. + When specified, overrides the execution URI for all the tasks in the queue. + Structure is documented below. + properties: + host: + description: |- + Host override. + When specified, replaces the host part of the task URL. + For example, if the task URL is "https://www.google.com", and host value + is set to "example.net", the overridden URI will be changed to "https://example.net". + Host value cannot be an empty string (INVALID_ARGUMENT). + type: string + pathOverride: + description: |- + URI path. + When specified, replaces the existing path of the task URL. + Setting the path value to an empty string clears the URI path segment. + Structure is documented below. + properties: + path: + description: The URI path (e.g., /users/1234). Default + is an empty string. + type: string + type: object + port: + description: |- + Port override. + When specified, replaces the port part of the task URI. + For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + Note that the port value must be a positive integer. + Setting the port to 0 (Zero) clears the URI port. + type: string + queryOverride: + description: |- + URI query. + When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + Structure is documented below. + properties: + queryParams: + description: The query parameters (e.g., qparam1=123&qparam2=456). + Default is an empty string. + type: string + type: object + scheme: + description: |- + Scheme override. + When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + Possible values are: HTTP, HTTPS. + type: string + uriOverrideEnforceMode: + description: |- + URI Override Enforce Mode + When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + Possible values are: ALWAYS, IF_NOT_EXISTS. + type: string + type: object + type: object id: description: an identifier for the resource with format projects/{{project}}/locations/{{location}}/queues/{{name}} type: string diff --git a/package/crds/composer.gcp.upbound.io_environments.yaml b/package/crds/composer.gcp.upbound.io_environments.yaml index 46012da78..d52ce8e71 100644 --- a/package/crds/composer.gcp.upbound.io_environments.yaml +++ b/package/crds/composer.gcp.upbound.io_environments.yaml @@ -2687,9 +2687,20 @@ spec: properties: dataRetentionConfig: description: |- - Configuration setting for Airflow database retention mechanism. Structure is + Configuration setting for airflow data rentention mechanism. Structure is documented below. properties: + airflowMetadataRetentionConfig: + description: Configuration parameters for this environment Structure + is documented below. + items: + properties: + retentionDays: + type: number + retentionMode: + type: string + type: object + type: array taskLogsRetentionConfig: description: |- The configuration setting for Task Logs. Structure is @@ -2725,6 +2736,15 @@ spec: belong to the enclosing environment's project and region. type: string type: object + enablePrivateBuildsOnly: + description: |- + If true, builds performed during operations that install Python packages have only private connectivity to Google services. + If false, the builds also have access to the internet. + type: boolean + enablePrivateEnvironment: + description: If true, a private Composer environment will + be created. + type: boolean encryptionConfig: description: |- The encryption options for the Cloud Composer environment and its @@ -2798,6 +2818,17 @@ spec: description: The configuration used for the Kubernetes Engine cluster. Structure is documented below. properties: + composerInternalIpv4CidrBlock: + description: |- + /20 IPv4 cidr range that will be used by Composer internal components. + Cannot be updated. + type: string + composerNetworkAttachment: + description: |- + PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment + and point Cloud Composer environment to use. It is possible to share network attachment among many environments, + provided enough IP addresses are available. + type: string diskSizeGb: description: |- The disk size in GB used for node VMs. Minimum size is 20GB. @@ -3236,6 +3267,17 @@ spec: separated by a hyphen, for example "core-dags_are_paused_at_creation". type: object x-kubernetes-map-type: granular + cloudDataLineageIntegration: + description: |- + The configuration for Cloud Data Lineage integration. Structure is + documented below. + properties: + enabled: + description: When enabled, Cloud Composer periodically + saves snapshots of your environment to a Cloud Storage + bucket. + type: boolean + type: object envVariables: additionalProperties: type: string @@ -3269,6 +3311,10 @@ spec: schedulerCount: description: The number of schedulers for Airflow. type: number + webServerPluginsMode: + description: Web server plugins configuration. Can be + either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. + type: string type: object webServerConfig: description: The configuration settings for the Airflow web @@ -3310,6 +3356,25 @@ spec: The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. properties: + dagProcessor: + description: Configuration for resources used by DAG processor. + properties: + count: + description: The number of Airflow triggerers. + type: number + cpu: + description: The number of CPUs for a single Airflow + worker. + type: number + memoryGb: + description: The amount of memory (GB) for a single + Airflow worker. + type: number + storageGb: + description: The amount of storage (GB) for a single + Airflow worker. + type: number + type: object scheduler: description: Configuration for resources used by Airflow schedulers. @@ -3523,9 +3588,20 @@ spec: properties: dataRetentionConfig: description: |- - Configuration setting for Airflow database retention mechanism. Structure is + Configuration setting for airflow data rentention mechanism. Structure is documented below. properties: + airflowMetadataRetentionConfig: + description: Configuration parameters for this environment Structure + is documented below. + items: + properties: + retentionDays: + type: number + retentionMode: + type: string + type: object + type: array taskLogsRetentionConfig: description: |- The configuration setting for Task Logs. Structure is @@ -3561,6 +3637,15 @@ spec: belong to the enclosing environment's project and region. type: string type: object + enablePrivateBuildsOnly: + description: |- + If true, builds performed during operations that install Python packages have only private connectivity to Google services. + If false, the builds also have access to the internet. + type: boolean + enablePrivateEnvironment: + description: If true, a private Composer environment will + be created. + type: boolean encryptionConfig: description: |- The encryption options for the Cloud Composer environment and its @@ -3634,6 +3719,17 @@ spec: description: The configuration used for the Kubernetes Engine cluster. Structure is documented below. properties: + composerInternalIpv4CidrBlock: + description: |- + /20 IPv4 cidr range that will be used by Composer internal components. + Cannot be updated. + type: string + composerNetworkAttachment: + description: |- + PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment + and point Cloud Composer environment to use. It is possible to share network attachment among many environments, + provided enough IP addresses are available. + type: string diskSizeGb: description: |- The disk size in GB used for node VMs. Minimum size is 20GB. @@ -4072,6 +4168,17 @@ spec: separated by a hyphen, for example "core-dags_are_paused_at_creation". type: object x-kubernetes-map-type: granular + cloudDataLineageIntegration: + description: |- + The configuration for Cloud Data Lineage integration. Structure is + documented below. + properties: + enabled: + description: When enabled, Cloud Composer periodically + saves snapshots of your environment to a Cloud Storage + bucket. + type: boolean + type: object envVariables: additionalProperties: type: string @@ -4105,6 +4212,10 @@ spec: schedulerCount: description: The number of schedulers for Airflow. type: number + webServerPluginsMode: + description: Web server plugins configuration. Can be + either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. + type: string type: object webServerConfig: description: The configuration settings for the Airflow web @@ -4146,6 +4257,25 @@ spec: The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. properties: + dagProcessor: + description: Configuration for resources used by DAG processor. + properties: + count: + description: The number of Airflow triggerers. + type: number + cpu: + description: The number of CPUs for a single Airflow + worker. + type: number + memoryGb: + description: The amount of memory (GB) for a single + Airflow worker. + type: number + storageGb: + description: The amount of storage (GB) for a single + Airflow worker. + type: number + type: object scheduler: description: Configuration for resources used by Airflow schedulers. @@ -4526,9 +4656,20 @@ spec: type: string dataRetentionConfig: description: |- - Configuration setting for Airflow database retention mechanism. Structure is + Configuration setting for airflow data rentention mechanism. Structure is documented below. properties: + airflowMetadataRetentionConfig: + description: Configuration parameters for this environment Structure + is documented below. + items: + properties: + retentionDays: + type: number + retentionMode: + type: string + type: object + type: array taskLogsRetentionConfig: description: |- The configuration setting for Task Logs. Structure is @@ -4564,6 +4705,15 @@ spec: belong to the enclosing environment's project and region. type: string type: object + enablePrivateBuildsOnly: + description: |- + If true, builds performed during operations that install Python packages have only private connectivity to Google services. + If false, the builds also have access to the internet. + type: boolean + enablePrivateEnvironment: + description: If true, a private Composer environment will + be created. + type: boolean encryptionConfig: description: |- The encryption options for the Cloud Composer environment and its @@ -4641,6 +4791,17 @@ spec: description: The configuration used for the Kubernetes Engine cluster. Structure is documented below. properties: + composerInternalIpv4CidrBlock: + description: |- + /20 IPv4 cidr range that will be used by Composer internal components. + Cannot be updated. + type: string + composerNetworkAttachment: + description: |- + PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment + and point Cloud Composer environment to use. It is possible to share network attachment among many environments, + provided enough IP addresses are available. + type: string diskSizeGb: description: |- The disk size in GB used for node VMs. Minimum size is 20GB. @@ -4851,6 +5012,17 @@ spec: separated by a hyphen, for example "core-dags_are_paused_at_creation". type: object x-kubernetes-map-type: granular + cloudDataLineageIntegration: + description: |- + The configuration for Cloud Data Lineage integration. Structure is + documented below. + properties: + enabled: + description: When enabled, Cloud Composer periodically + saves snapshots of your environment to a Cloud Storage + bucket. + type: boolean + type: object envVariables: additionalProperties: type: string @@ -4884,6 +5056,10 @@ spec: schedulerCount: description: The number of schedulers for Airflow. type: number + webServerPluginsMode: + description: Web server plugins configuration. Can be + either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. + type: string type: object webServerConfig: description: The configuration settings for the Airflow web @@ -4925,6 +5101,25 @@ spec: The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. properties: + dagProcessor: + description: Configuration for resources used by DAG processor. + properties: + count: + description: The number of Airflow triggerers. + type: number + cpu: + description: The number of CPUs for a single Airflow + worker. + type: number + memoryGb: + description: The amount of memory (GB) for a single + Airflow worker. + type: number + storageGb: + description: The amount of storage (GB) for a single + Airflow worker. + type: number + type: object scheduler: description: Configuration for resources used by Airflow schedulers. diff --git a/package/crds/compute.gcp.upbound.io_attacheddisks.yaml b/package/crds/compute.gcp.upbound.io_attacheddisks.yaml index fdf1d3d35..b683e947f 100644 --- a/package/crds/compute.gcp.upbound.io_attacheddisks.yaml +++ b/package/crds/compute.gcp.upbound.io_attacheddisks.yaml @@ -239,6 +239,9 @@ spec: type: string type: object type: object + interface: + description: The disk interface used for attaching this disk. + type: string mode: description: |- The mode in which to attach this disk, either READ_WRITE or @@ -435,6 +438,9 @@ spec: type: string type: object type: object + interface: + description: The disk interface used for attaching this disk. + type: string mode: description: |- The mode in which to attach this disk, either READ_WRITE or @@ -645,6 +651,9 @@ spec: self link. If only the name is used then zone and project must be defined as properties on the resource or provider. type: string + interface: + description: The disk interface used for attaching this disk. + type: string mode: description: |- The mode in which to attach this disk, either READ_WRITE or diff --git a/package/crds/compute.gcp.upbound.io_autoscalers.yaml b/package/crds/compute.gcp.upbound.io_autoscalers.yaml index 21512a1fe..5fc46977b 100644 --- a/package/crds/compute.gcp.upbound.io_autoscalers.yaml +++ b/package/crds/compute.gcp.upbound.io_autoscalers.yaml @@ -1179,7 +1179,7 @@ spec: properties: maxScaledInReplicas: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: fixed: @@ -1442,7 +1442,7 @@ spec: properties: maxScaledInReplicas: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: fixed: @@ -1864,7 +1864,7 @@ spec: properties: maxScaledInReplicas: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: fixed: diff --git a/package/crds/compute.gcp.upbound.io_backendservices.yaml b/package/crds/compute.gcp.upbound.io_backendservices.yaml index 68edfc343..1c27d4f4c 100644 --- a/package/crds/compute.gcp.upbound.io_backendservices.yaml +++ b/package/crds/compute.gcp.upbound.io_backendservices.yaml @@ -2926,7 +2926,6 @@ spec: and CONNECTION (for TCP/SSL). See the Backend Services Overview for an explanation of load balancing modes. - From version 6.0.0 default value will be UTILIZATION to match default GCP value. Default value is UTILIZATION. Possible values are: UTILIZATION, RATE, CONNECTION. type: string @@ -3205,8 +3204,8 @@ spec: type: number ttl: description: |- - The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + Lifetime of the cookie. + Structure is documented below. type: number type: object type: array @@ -3293,23 +3292,15 @@ spec: Structure is documented below. properties: name: - description: |- - The name of a locality load balancer policy to be used. The value - should be one of the predefined ones as supported by localityLbPolicy, - although at the moment only ROUND_ROBIN is supported. - This field should only be populated when the customPolicy field is not - used. - Note that specifying the same policy more than once for a backend is - not a valid configuration and will be rejected. - The possible values are: + description: Name of the cookie. type: string path: description: Path to set for the cookie. type: string ttl: description: |- - The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + Lifetime of the cookie. + Structure is documented below. properties: nanos: description: |- @@ -3462,6 +3453,10 @@ spec: Settings for enabling Cloud Identity Aware Proxy Structure is documented below. properties: + enabled: + description: Whether the serving infrastructure will authenticate + and authorize all incoming requests. + type: boolean oauth2ClientId: description: OAuth2 Client ID for IAP type: string @@ -3485,6 +3480,11 @@ spec: - namespace type: object type: object + ipAddressSelectionPolicy: + description: |- + Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + type: string loadBalancingScheme: description: |- Indicates whether the backend service will be used with internal or @@ -3517,15 +3517,7 @@ spec: by a locally installed custom policy implementation. type: string name: - description: |- - The name of a locality load balancer policy to be used. The value - should be one of the predefined ones as supported by localityLbPolicy, - although at the moment only ROUND_ROBIN is supported. - This field should only be populated when the customPolicy field is not - used. - Note that specifying the same policy more than once for a backend is - not a valid configuration and will be rejected. - The possible values are: + description: Name of the cookie. type: string type: object policy: @@ -3534,15 +3526,7 @@ spec: Structure is documented below. properties: name: - description: |- - The name of a locality load balancer policy to be used. The value - should be one of the predefined ones as supported by localityLbPolicy, - although at the moment only ROUND_ROBIN is supported. - This field should only be populated when the customPolicy field is not - used. - Note that specifying the same policy more than once for a backend is - not a valid configuration and will be rejected. - The possible values are: + description: Name of the cookie. type: string type: object type: object @@ -3575,8 +3559,6 @@ spec: Settings controlling eviction of unhealthy hosts from the load balancing pool. Applicable backend service types can be a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - From version 6.0. - Default values are enforce by GCP without providing them. Structure is documented below. properties: baseEjectionTime: @@ -3775,8 +3757,38 @@ spec: description: |- Type of session affinity to use. The default is NONE. Session affinity is not applicable if the protocol is UDP. - Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE. + Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, STRONG_COOKIE_AFFINITY. type: string + strongSessionAffinityCookie: + description: |- + Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + Structure is documented below. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: |- + Lifetime of the cookie. + Structure is documented below. + properties: + nanos: + description: |- + Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + type: number + seconds: + description: |- + Span of time at a resolution of a second. + Must be from 0 to 315,576,000,000 inclusive. + type: number + type: object + type: object timeoutSec: description: |- The backend service timeout has a different meaning depending on the type of load balancer. @@ -3820,7 +3832,6 @@ spec: and CONNECTION (for TCP/SSL). See the Backend Services Overview for an explanation of load balancing modes. - From version 6.0.0 default value will be UTILIZATION to match default GCP value. Default value is UTILIZATION. Possible values are: UTILIZATION, RATE, CONNECTION. type: string @@ -4099,8 +4110,8 @@ spec: type: number ttl: description: |- - The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + Lifetime of the cookie. + Structure is documented below. type: number type: object type: array @@ -4187,23 +4198,15 @@ spec: Structure is documented below. properties: name: - description: |- - The name of a locality load balancer policy to be used. The value - should be one of the predefined ones as supported by localityLbPolicy, - although at the moment only ROUND_ROBIN is supported. - This field should only be populated when the customPolicy field is not - used. - Note that specifying the same policy more than once for a backend is - not a valid configuration and will be rejected. - The possible values are: + description: Name of the cookie. type: string path: description: Path to set for the cookie. type: string ttl: description: |- - The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + Lifetime of the cookie. + Structure is documented below. properties: nanos: description: |- @@ -4356,6 +4359,10 @@ spec: Settings for enabling Cloud Identity Aware Proxy Structure is documented below. properties: + enabled: + description: Whether the serving infrastructure will authenticate + and authorize all incoming requests. + type: boolean oauth2ClientId: description: OAuth2 Client ID for IAP type: string @@ -4378,9 +4385,12 @@ spec: - name - namespace type: object - required: - - oauth2ClientSecretSecretRef type: object + ipAddressSelectionPolicy: + description: |- + Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + type: string loadBalancingScheme: description: |- Indicates whether the backend service will be used with internal or @@ -4413,15 +4423,7 @@ spec: by a locally installed custom policy implementation. type: string name: - description: |- - The name of a locality load balancer policy to be used. The value - should be one of the predefined ones as supported by localityLbPolicy, - although at the moment only ROUND_ROBIN is supported. - This field should only be populated when the customPolicy field is not - used. - Note that specifying the same policy more than once for a backend is - not a valid configuration and will be rejected. - The possible values are: + description: Name of the cookie. type: string type: object policy: @@ -4430,15 +4432,7 @@ spec: Structure is documented below. properties: name: - description: |- - The name of a locality load balancer policy to be used. The value - should be one of the predefined ones as supported by localityLbPolicy, - although at the moment only ROUND_ROBIN is supported. - This field should only be populated when the customPolicy field is not - used. - Note that specifying the same policy more than once for a backend is - not a valid configuration and will be rejected. - The possible values are: + description: Name of the cookie. type: string type: object type: object @@ -4471,8 +4465,6 @@ spec: Settings controlling eviction of unhealthy hosts from the load balancing pool. Applicable backend service types can be a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - From version 6.0. - Default values are enforce by GCP without providing them. Structure is documented below. properties: baseEjectionTime: @@ -4671,8 +4663,38 @@ spec: description: |- Type of session affinity to use. The default is NONE. Session affinity is not applicable if the protocol is UDP. - Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE. + Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, STRONG_COOKIE_AFFINITY. type: string + strongSessionAffinityCookie: + description: |- + Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + Structure is documented below. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: |- + Lifetime of the cookie. + Structure is documented below. + properties: + nanos: + description: |- + Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + type: number + seconds: + description: |- + Span of time at a resolution of a second. + Must be from 0 to 315,576,000,000 inclusive. + type: number + type: object + type: object timeoutSec: description: |- The backend service timeout has a different meaning depending on the type of load balancer. @@ -4875,7 +4897,6 @@ spec: and CONNECTION (for TCP/SSL). See the Backend Services Overview for an explanation of load balancing modes. - From version 6.0.0 default value will be UTILIZATION to match default GCP value. Default value is UTILIZATION. Possible values are: UTILIZATION, RATE, CONNECTION. type: string @@ -5078,8 +5099,8 @@ spec: type: number ttl: description: |- - The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + Lifetime of the cookie. + Structure is documented below. type: number type: object type: array @@ -5166,23 +5187,15 @@ spec: Structure is documented below. properties: name: - description: |- - The name of a locality load balancer policy to be used. The value - should be one of the predefined ones as supported by localityLbPolicy, - although at the moment only ROUND_ROBIN is supported. - This field should only be populated when the customPolicy field is not - used. - Note that specifying the same policy more than once for a backend is - not a valid configuration and will be rejected. - The possible values are: + description: Name of the cookie. type: string path: description: Path to set for the cookie. type: string ttl: description: |- - The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + Lifetime of the cookie. + Structure is documented below. properties: nanos: description: |- @@ -5268,6 +5281,10 @@ spec: Settings for enabling Cloud Identity Aware Proxy Structure is documented below. properties: + enabled: + description: Whether the serving infrastructure will authenticate + and authorize all incoming requests. + type: boolean oauth2ClientId: description: OAuth2 Client ID for IAP type: string @@ -5275,6 +5292,11 @@ spec: id: description: an identifier for the resource with format projects/{{project}}/global/backendServices/{{name}} type: string + ipAddressSelectionPolicy: + description: |- + Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + type: string loadBalancingScheme: description: |- Indicates whether the backend service will be used with internal or @@ -5307,15 +5329,7 @@ spec: by a locally installed custom policy implementation. type: string name: - description: |- - The name of a locality load balancer policy to be used. The value - should be one of the predefined ones as supported by localityLbPolicy, - although at the moment only ROUND_ROBIN is supported. - This field should only be populated when the customPolicy field is not - used. - Note that specifying the same policy more than once for a backend is - not a valid configuration and will be rejected. - The possible values are: + description: Name of the cookie. type: string type: object policy: @@ -5324,15 +5338,7 @@ spec: Structure is documented below. properties: name: - description: |- - The name of a locality load balancer policy to be used. The value - should be one of the predefined ones as supported by localityLbPolicy, - although at the moment only ROUND_ROBIN is supported. - This field should only be populated when the customPolicy field is not - used. - Note that specifying the same policy more than once for a backend is - not a valid configuration and will be rejected. - The possible values are: + description: Name of the cookie. type: string type: object type: object @@ -5365,8 +5371,6 @@ spec: Settings controlling eviction of unhealthy hosts from the load balancing pool. Applicable backend service types can be a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - From version 6.0. - Default values are enforce by GCP without providing them. Structure is documented below. properties: baseEjectionTime: @@ -5548,8 +5552,38 @@ spec: description: |- Type of session affinity to use. The default is NONE. Session affinity is not applicable if the protocol is UDP. - Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE. + Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, STRONG_COOKIE_AFFINITY. type: string + strongSessionAffinityCookie: + description: |- + Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + Structure is documented below. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: |- + Lifetime of the cookie. + Structure is documented below. + properties: + nanos: + description: |- + Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + type: number + seconds: + description: |- + Span of time at a resolution of a second. + Must be from 0 to 315,576,000,000 inclusive. + type: number + type: object + type: object timeoutSec: description: |- The backend service timeout has a different meaning depending on the type of load balancer. diff --git a/package/crds/compute.gcp.upbound.io_disks.yaml b/package/crds/compute.gcp.upbound.io_disks.yaml index b020d73dd..51f941198 100644 --- a/package/crds/compute.gcp.upbound.io_disks.yaml +++ b/package/crds/compute.gcp.upbound.io_disks.yaml @@ -1335,7 +1335,7 @@ spec: type: string asyncPrimaryDisk: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: disk: @@ -1634,7 +1634,7 @@ spec: type: object storagePool: description: |- - The URL of the storage pool in which the new disk is created. + The URL or the name of the storage pool in which the new disk is created. For example: type: string type: @@ -1668,7 +1668,7 @@ spec: type: string asyncPrimaryDisk: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: disk: @@ -1967,7 +1967,7 @@ spec: type: object storagePool: description: |- - The URL of the storage pool in which the new disk is created. + The URL or the name of the storage pool in which the new disk is created. For example: type: string type: @@ -2155,7 +2155,7 @@ spec: type: string asyncPrimaryDisk: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: disk: @@ -2410,7 +2410,7 @@ spec: type: string storagePool: description: |- - The URL of the storage pool in which the new disk is created. + The URL or the name of the storage pool in which the new disk is created. For example: type: string terraformLabels: diff --git a/package/crds/compute.gcp.upbound.io_externalvpngateways.yaml b/package/crds/compute.gcp.upbound.io_externalvpngateways.yaml index b7987514c..562e6e69a 100644 --- a/package/crds/compute.gcp.upbound.io_externalvpngateways.yaml +++ b/package/crds/compute.gcp.upbound.io_externalvpngateways.yaml @@ -94,6 +94,15 @@ spec: your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. type: string + ipv6Address: + description: |- + IPv6 address of the interface in the external VPN gateway. This IPv6 + address can be either from your on-premise gateway or another Cloud + provider's VPN gateway, it cannot be an IP address from Google Compute + Engine. Must specify an IPv6 address (not IPV4-mapped) using any format + described in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format + is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0). + type: string type: object type: array labels: @@ -150,6 +159,15 @@ spec: your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. type: string + ipv6Address: + description: |- + IPv6 address of the interface in the external VPN gateway. This IPv6 + address can be either from your on-premise gateway or another Cloud + provider's VPN gateway, it cannot be an IP address from Google Compute + Engine. Must specify an IPv6 address (not IPV4-mapped) using any format + described in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format + is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0). + type: string type: object type: array labels: @@ -373,6 +391,15 @@ spec: your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. type: string + ipv6Address: + description: |- + IPv6 address of the interface in the external VPN gateway. This IPv6 + address can be either from your on-premise gateway or another Cloud + provider's VPN gateway, it cannot be an IP address from Google Compute + Engine. Must specify an IPv6 address (not IPV4-mapped) using any format + described in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format + is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0). + type: string type: object type: array labelFingerprint: diff --git a/package/crds/compute.gcp.upbound.io_firewallpolicyassociations.yaml b/package/crds/compute.gcp.upbound.io_firewallpolicyassociations.yaml index 5b11a4dfc..d226b8f62 100644 --- a/package/crds/compute.gcp.upbound.io_firewallpolicyassociations.yaml +++ b/package/crds/compute.gcp.upbound.io_firewallpolicyassociations.yaml @@ -35,7 +35,8 @@ spec: schema: openAPIV3Schema: description: FirewallPolicyAssociation is the Schema for the FirewallPolicyAssociations - API. Applies a hierarchical firewall policy to a target resource + API. Allows associating hierarchical firewall policies with the target where + they are applied. properties: apiVersion: description: |- @@ -154,7 +155,7 @@ spec: type: object type: object firewallPolicy: - description: The firewall policy ID of the association. + description: The firewall policy of the resource. type: string firewallPolicyRef: description: Reference to a FirewallPolicy in compute to populate @@ -329,7 +330,7 @@ spec: type: object type: object firewallPolicy: - description: The firewall policy ID of the association. + description: The firewall policy of the resource. type: string firewallPolicyRef: description: Reference to a FirewallPolicy in compute to populate @@ -593,7 +594,7 @@ spec: description: The target that the firewall policy is attached to. type: string firewallPolicy: - description: The firewall policy ID of the association. + description: The firewall policy of the resource. type: string id: description: an identifier for the resource with format locations/global/firewallPolicies/{{firewall_policy}}/associations/{{name}} diff --git a/package/crds/compute.gcp.upbound.io_firewallpolicyrules.yaml b/package/crds/compute.gcp.upbound.io_firewallpolicyrules.yaml index c9f49a759..e7549cc6c 100644 --- a/package/crds/compute.gcp.upbound.io_firewallpolicyrules.yaml +++ b/package/crds/compute.gcp.upbound.io_firewallpolicyrules.yaml @@ -987,7 +987,9 @@ spec: schema: openAPIV3Schema: description: FirewallPolicyRule is the Schema for the FirewallPolicyRules - API. The Compute FirewallPolicyRule resource + API. Represents a rule that describes one or more match conditions along + with the action to be taken when traffic matches this condition (allow or + deny). properties: apiVersion: description: |- @@ -1034,20 +1036,22 @@ spec: description: An optional description for this resource. type: string direction: - description: 'The direction in which this rule applies. Possible - values: INGRESS, EGRESS' + description: |- + The direction in which this rule applies. + Possible values are: INGRESS, EGRESS. type: string disabled: - description: Denotes whether the firewall policy rule is disabled. - When set to true, the firewall policy rule is not enforced and - traffic behaves as if it did not exist. If this is unspecified, - the firewall policy rule will be enabled. + description: |- + Denotes whether the firewall policy rule is disabled. + When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + If this is unspecified, the firewall policy rule will be enabled. type: boolean enableLogging: - description: 'Denotes whether to enable logging for a particular - rule. If logging is enabled, logs will be exported to the configured - export destination in Stackdriver. Logs may be exported to BigQuery - or Pub/Sub. Note: you cannot enable logging on "goto_next" rules.' + description: |- + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + Logs may be exported to BigQuery or Pub/Sub. + Note: you cannot enable logging on "goto_next" rules. type: boolean firewallPolicy: description: The firewall policy of the resource. @@ -1129,15 +1133,14 @@ spec: type: object type: object match: - description: A match condition that incoming traffic is evaluated - against. If it evaluates to true, the corresponding 'action' - is enforced. + description: |- + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. properties: destAddressGroups: description: Address groups which should be matched against the traffic destination. Maximum number of destination address - groups is 10. Destination address groups is only supported - in Egress rules. + groups is 10. items: type: string type: array @@ -1221,42 +1224,43 @@ spec: type: object type: object destFqdns: - description: Domain names that will be used to match against - the resolved domain name of destination of traffic. Can - only be specified if DIRECTION is egress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic destination. Maximum number of + destination fqdn allowed is 100. items: type: string type: array destIpRanges: description: CIDR IP address range. Maximum number of destination - CIDR IP ranges allowed is 256. + CIDR IP ranges allowed is 5000. items: type: string type: array destRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is egress. + description: Region codes whose IP addresses will be used + to match for destination of traffic. Should be specified + as 2 letter country code defined as per ISO 3166 alpha-2 + country codes. ex."US" Maximum number of dest region codes + allowed is 5000. items: type: string type: array destThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic destination. items: type: string type: array layer4Configs: - description: Pairs of IP protocols and ports that the rule - should match. + description: |- + Pairs of IP protocols and ports that the rule should match. + Structure is documented below. items: properties: ipProtocol: - description: The IP protocol to which this rule applies. - The protocol type is required when creating a firewall - rule. This value can either be one of the following - well known protocol strings (tcp, udp, icmp, esp, - ah, ipip, sctp), or the IP protocol number. + description: |- + The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. type: string ports: description: An optional list of ports to which this @@ -1272,55 +1276,56 @@ spec: srcAddressGroups: description: Address groups which should be matched against the traffic source. Maximum number of source address groups - is 10. Source address groups is only supported in Ingress - rules. + is 10. items: type: string type: array srcFqdns: - description: Domain names that will be used to match against - the resolved domain name of source of traffic. Can only - be specified if DIRECTION is ingress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic source. Maximum number of source + fqdn allowed is 100. items: type: string type: array srcIpRanges: description: CIDR IP address range. Maximum number of source - CIDR IP ranges allowed is 256. + CIDR IP ranges allowed is 5000. items: type: string type: array srcRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is ingress. + description: Region codes whose IP addresses will be used + to match for source of traffic. Should be specified as 2 + letter country code defined as per ISO 3166 alpha-2 country + codes. ex."US" Maximum number of source region codes allowed + is 5000. items: type: string type: array srcThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic source. items: type: string type: array type: object priority: - description: An integer indicating the priority of a rule in the - list. The priority must be a positive value between 0 and 2147483647. - Rules are evaluated from highest to lowest priority where 0 - is the highest priority and 2147483647 is the lowest prority. + description: |- + An integer indicating the priority of a rule in the list. + The priority must be a positive value between 0 and 2147483647. + Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. type: number securityProfileGroup: - description: 'A fully-qualified URL of a SecurityProfileGroup - resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. - It must be specified if action = ''apply_security_profile_group'' - and cannot be specified for other actions.' + description: |- + A fully-qualified URL of a SecurityProfile resource instance. + Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. type: string targetResources: - description: A list of network resource URLs to which this rule - applies. This field allows you to control which network's VMs - get this rule. If this field is left blank, all VMs within the - organization will receive the rule. + description: |- + A list of network resource URLs to which this rule applies. + This field allows you to control which network's VMs get this rule. + If this field is left blank, all VMs within the organization will receive the rule. items: type: string type: array @@ -1331,9 +1336,9 @@ spec: type: string type: array tlsInspect: - description: Boolean flag indicating if the traffic should be - TLS decrypted. It can be set only if action = 'apply_security_profile_group' - and cannot be set for other actions. + description: |- + Boolean flag indicating if the traffic should be TLS decrypted. + Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. type: boolean type: object initProvider: @@ -1358,20 +1363,22 @@ spec: description: An optional description for this resource. type: string direction: - description: 'The direction in which this rule applies. Possible - values: INGRESS, EGRESS' + description: |- + The direction in which this rule applies. + Possible values are: INGRESS, EGRESS. type: string disabled: - description: Denotes whether the firewall policy rule is disabled. - When set to true, the firewall policy rule is not enforced and - traffic behaves as if it did not exist. If this is unspecified, - the firewall policy rule will be enabled. + description: |- + Denotes whether the firewall policy rule is disabled. + When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + If this is unspecified, the firewall policy rule will be enabled. type: boolean enableLogging: - description: 'Denotes whether to enable logging for a particular - rule. If logging is enabled, logs will be exported to the configured - export destination in Stackdriver. Logs may be exported to BigQuery - or Pub/Sub. Note: you cannot enable logging on "goto_next" rules.' + description: |- + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + Logs may be exported to BigQuery or Pub/Sub. + Note: you cannot enable logging on "goto_next" rules. type: boolean firewallPolicy: description: The firewall policy of the resource. @@ -1453,15 +1460,14 @@ spec: type: object type: object match: - description: A match condition that incoming traffic is evaluated - against. If it evaluates to true, the corresponding 'action' - is enforced. + description: |- + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. properties: destAddressGroups: description: Address groups which should be matched against the traffic destination. Maximum number of destination address - groups is 10. Destination address groups is only supported - in Egress rules. + groups is 10. items: type: string type: array @@ -1545,42 +1551,43 @@ spec: type: object type: object destFqdns: - description: Domain names that will be used to match against - the resolved domain name of destination of traffic. Can - only be specified if DIRECTION is egress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic destination. Maximum number of + destination fqdn allowed is 100. items: type: string type: array destIpRanges: description: CIDR IP address range. Maximum number of destination - CIDR IP ranges allowed is 256. + CIDR IP ranges allowed is 5000. items: type: string type: array destRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is egress. + description: Region codes whose IP addresses will be used + to match for destination of traffic. Should be specified + as 2 letter country code defined as per ISO 3166 alpha-2 + country codes. ex."US" Maximum number of dest region codes + allowed is 5000. items: type: string type: array destThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic destination. items: type: string type: array layer4Configs: - description: Pairs of IP protocols and ports that the rule - should match. + description: |- + Pairs of IP protocols and ports that the rule should match. + Structure is documented below. items: properties: ipProtocol: - description: The IP protocol to which this rule applies. - The protocol type is required when creating a firewall - rule. This value can either be one of the following - well known protocol strings (tcp, udp, icmp, esp, - ah, ipip, sctp), or the IP protocol number. + description: |- + The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. type: string ports: description: An optional list of ports to which this @@ -1596,55 +1603,56 @@ spec: srcAddressGroups: description: Address groups which should be matched against the traffic source. Maximum number of source address groups - is 10. Source address groups is only supported in Ingress - rules. + is 10. items: type: string type: array srcFqdns: - description: Domain names that will be used to match against - the resolved domain name of source of traffic. Can only - be specified if DIRECTION is ingress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic source. Maximum number of source + fqdn allowed is 100. items: type: string type: array srcIpRanges: description: CIDR IP address range. Maximum number of source - CIDR IP ranges allowed is 256. + CIDR IP ranges allowed is 5000. items: type: string type: array srcRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is ingress. + description: Region codes whose IP addresses will be used + to match for source of traffic. Should be specified as 2 + letter country code defined as per ISO 3166 alpha-2 country + codes. ex."US" Maximum number of source region codes allowed + is 5000. items: type: string type: array srcThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic source. items: type: string type: array type: object priority: - description: An integer indicating the priority of a rule in the - list. The priority must be a positive value between 0 and 2147483647. - Rules are evaluated from highest to lowest priority where 0 - is the highest priority and 2147483647 is the lowest prority. + description: |- + An integer indicating the priority of a rule in the list. + The priority must be a positive value between 0 and 2147483647. + Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. type: number securityProfileGroup: - description: 'A fully-qualified URL of a SecurityProfileGroup - resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. - It must be specified if action = ''apply_security_profile_group'' - and cannot be specified for other actions.' + description: |- + A fully-qualified URL of a SecurityProfile resource instance. + Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. type: string targetResources: - description: A list of network resource URLs to which this rule - applies. This field allows you to control which network's VMs - get this rule. If this field is left blank, all VMs within the - organization will receive the rule. + description: |- + A list of network resource URLs to which this rule applies. + This field allows you to control which network's VMs get this rule. + If this field is left blank, all VMs within the organization will receive the rule. items: type: string type: array @@ -1655,9 +1663,9 @@ spec: type: string type: array tlsInspect: - description: Boolean flag indicating if the traffic should be - TLS decrypted. It can be set only if action = 'apply_security_profile_group' - and cannot be set for other actions. + description: |- + Boolean flag indicating if the traffic should be TLS decrypted. + Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. type: boolean type: object managementPolicies: @@ -1854,24 +1862,29 @@ spec: triggers the rule. Valid actions are "allow", "deny", "goto_next" and "apply_security_profile_group". type: string + creationTimestamp: + description: Creation timestamp in RFC3339 text format. + type: string description: description: An optional description for this resource. type: string direction: - description: 'The direction in which this rule applies. Possible - values: INGRESS, EGRESS' + description: |- + The direction in which this rule applies. + Possible values are: INGRESS, EGRESS. type: string disabled: - description: Denotes whether the firewall policy rule is disabled. - When set to true, the firewall policy rule is not enforced and - traffic behaves as if it did not exist. If this is unspecified, - the firewall policy rule will be enabled. + description: |- + Denotes whether the firewall policy rule is disabled. + When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + If this is unspecified, the firewall policy rule will be enabled. type: boolean enableLogging: - description: 'Denotes whether to enable logging for a particular - rule. If logging is enabled, logs will be exported to the configured - export destination in Stackdriver. Logs may be exported to BigQuery - or Pub/Sub. Note: you cannot enable logging on "goto_next" rules.' + description: |- + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + Logs may be exported to BigQuery or Pub/Sub. + Note: you cannot enable logging on "goto_next" rules. type: boolean firewallPolicy: description: The firewall policy of the resource. @@ -1884,55 +1897,55 @@ spec: for firewall policy rules type: string match: - description: A match condition that incoming traffic is evaluated - against. If it evaluates to true, the corresponding 'action' - is enforced. + description: |- + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. properties: destAddressGroups: description: Address groups which should be matched against the traffic destination. Maximum number of destination address - groups is 10. Destination address groups is only supported - in Egress rules. + groups is 10. items: type: string type: array destFqdns: - description: Domain names that will be used to match against - the resolved domain name of destination of traffic. Can - only be specified if DIRECTION is egress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic destination. Maximum number of + destination fqdn allowed is 100. items: type: string type: array destIpRanges: description: CIDR IP address range. Maximum number of destination - CIDR IP ranges allowed is 256. + CIDR IP ranges allowed is 5000. items: type: string type: array destRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is egress. + description: Region codes whose IP addresses will be used + to match for destination of traffic. Should be specified + as 2 letter country code defined as per ISO 3166 alpha-2 + country codes. ex."US" Maximum number of dest region codes + allowed is 5000. items: type: string type: array destThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic destination. items: type: string type: array layer4Configs: - description: Pairs of IP protocols and ports that the rule - should match. + description: |- + Pairs of IP protocols and ports that the rule should match. + Structure is documented below. items: properties: ipProtocol: - description: The IP protocol to which this rule applies. - The protocol type is required when creating a firewall - rule. This value can either be one of the following - well known protocol strings (tcp, udp, icmp, esp, - ah, ipip, sctp), or the IP protocol number. + description: |- + The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. type: string ports: description: An optional list of ports to which this @@ -1948,59 +1961,60 @@ spec: srcAddressGroups: description: Address groups which should be matched against the traffic source. Maximum number of source address groups - is 10. Source address groups is only supported in Ingress - rules. + is 10. items: type: string type: array srcFqdns: - description: Domain names that will be used to match against - the resolved domain name of source of traffic. Can only - be specified if DIRECTION is ingress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic source. Maximum number of source + fqdn allowed is 100. items: type: string type: array srcIpRanges: description: CIDR IP address range. Maximum number of source - CIDR IP ranges allowed is 256. + CIDR IP ranges allowed is 5000. items: type: string type: array srcRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is ingress. + description: Region codes whose IP addresses will be used + to match for source of traffic. Should be specified as 2 + letter country code defined as per ISO 3166 alpha-2 country + codes. ex."US" Maximum number of source region codes allowed + is 5000. items: type: string type: array srcThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic source. items: type: string type: array type: object priority: - description: An integer indicating the priority of a rule in the - list. The priority must be a positive value between 0 and 2147483647. - Rules are evaluated from highest to lowest priority where 0 - is the highest priority and 2147483647 is the lowest prority. + description: |- + An integer indicating the priority of a rule in the list. + The priority must be a positive value between 0 and 2147483647. + Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. type: number ruleTupleCount: description: Calculation of the complexity of a single firewall policy rule. type: number securityProfileGroup: - description: 'A fully-qualified URL of a SecurityProfileGroup - resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. - It must be specified if action = ''apply_security_profile_group'' - and cannot be specified for other actions.' + description: |- + A fully-qualified URL of a SecurityProfile resource instance. + Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. type: string targetResources: - description: A list of network resource URLs to which this rule - applies. This field allows you to control which network's VMs - get this rule. If this field is left blank, all VMs within the - organization will receive the rule. + description: |- + A list of network resource URLs to which this rule applies. + This field allows you to control which network's VMs get this rule. + If this field is left blank, all VMs within the organization will receive the rule. items: type: string type: array @@ -2011,9 +2025,9 @@ spec: type: string type: array tlsInspect: - description: Boolean flag indicating if the traffic should be - TLS decrypted. It can be set only if action = 'apply_security_profile_group' - and cannot be set for other actions. + description: |- + Boolean flag indicating if the traffic should be TLS decrypted. + Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. type: boolean type: object conditions: diff --git a/package/crds/compute.gcp.upbound.io_firewalls.yaml b/package/crds/compute.gcp.upbound.io_firewalls.yaml index 1b335bc11..6aeab2ff0 100644 --- a/package/crds/compute.gcp.upbound.io_firewalls.yaml +++ b/package/crds/compute.gcp.upbound.io_firewalls.yaml @@ -1121,7 +1121,7 @@ spec: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. items: type: string @@ -1148,7 +1148,7 @@ spec: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. items: type: string @@ -1398,7 +1398,7 @@ spec: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. items: type: string @@ -1425,7 +1425,7 @@ spec: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. items: type: string @@ -1834,7 +1834,7 @@ spec: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. items: type: string @@ -1864,7 +1864,7 @@ spec: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. items: type: string diff --git a/package/crds/compute.gcp.upbound.io_globalforwardingrules.yaml b/package/crds/compute.gcp.upbound.io_globalforwardingrules.yaml index e922ad736..ea3aa489d 100644 --- a/package/crds/compute.gcp.upbound.io_globalforwardingrules.yaml +++ b/package/crds/compute.gcp.upbound.io_globalforwardingrules.yaml @@ -1968,6 +1968,19 @@ spec: type: string type: object type: object + networkTier: + description: |- + This signifies the networking tier used for configuring + this load balancer and can only take the following values: + PREMIUM, STANDARD. + For regional ForwardingRule, the valid values are PREMIUM and + STANDARD. For GlobalForwardingRule, the valid value is + PREMIUM. + If this field is not specified, it is assumed to be PREMIUM. + If IPAddress is specified, this value must be equal to the + networkTier of the Address. + Possible values are: PREMIUM, STANDARD. + type: string noAutomateDnsZone: description: This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC @@ -2533,6 +2546,19 @@ spec: type: string type: object type: object + networkTier: + description: |- + This signifies the networking tier used for configuring + this load balancer and can only take the following values: + PREMIUM, STANDARD. + For regional ForwardingRule, the valid values are PREMIUM and + STANDARD. For GlobalForwardingRule, the valid value is + PREMIUM. + If this field is not specified, it is assumed to be PREMIUM. + If IPAddress is specified, this value must be equal to the + networkTier of the Address. + Possible values are: PREMIUM, STANDARD. + type: string noAutomateDnsZone: description: This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC @@ -3011,6 +3037,10 @@ spec: description: for all of the labels present on the resource. type: object x-kubernetes-map-type: granular + forwardingRuleId: + description: The unique identifier number for the resource. This + identifier is defined by the server. + type: number id: description: an identifier for the resource with format projects/{{project}}/global/forwardingRules/{{name}} type: string @@ -3130,6 +3160,19 @@ spec: For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided. type: string + networkTier: + description: |- + This signifies the networking tier used for configuring + this load balancer and can only take the following values: + PREMIUM, STANDARD. + For regional ForwardingRule, the valid values are PREMIUM and + STANDARD. For GlobalForwardingRule, the valid value is + PREMIUM. + If this field is not specified, it is assumed to be PREMIUM. + If IPAddress is specified, this value must be equal to the + networkTier of the Address. + Possible values are: PREMIUM, STANDARD. + type: string noAutomateDnsZone: description: This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC diff --git a/package/crds/compute.gcp.upbound.io_healthchecks.yaml b/package/crds/compute.gcp.upbound.io_healthchecks.yaml index 9cb0e3690..2ae151394 100644 --- a/package/crds/compute.gcp.upbound.io_healthchecks.yaml +++ b/package/crds/compute.gcp.upbound.io_healthchecks.yaml @@ -1335,7 +1335,7 @@ spec: type: string grpcHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: grpcServiceName: @@ -1367,7 +1367,7 @@ spec: type: number http2HealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1412,7 +1412,7 @@ spec: type: object httpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1457,7 +1457,7 @@ spec: type: object httpsHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1529,7 +1529,7 @@ spec: type: array sslHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: @@ -1570,7 +1570,7 @@ spec: type: object tcpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: @@ -1646,7 +1646,7 @@ spec: type: string grpcHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: grpcServiceName: @@ -1678,7 +1678,7 @@ spec: type: number http2HealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1723,7 +1723,7 @@ spec: type: object httpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1768,7 +1768,7 @@ spec: type: object httpsHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1840,7 +1840,7 @@ spec: type: array sslHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: @@ -1881,7 +1881,7 @@ spec: type: object tcpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: @@ -2119,7 +2119,7 @@ spec: type: string grpcHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: grpcServiceName: @@ -2151,7 +2151,7 @@ spec: type: number http2HealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -2196,7 +2196,7 @@ spec: type: object httpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -2241,7 +2241,7 @@ spec: type: object httpsHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -2319,7 +2319,7 @@ spec: type: array sslHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: @@ -2360,7 +2360,7 @@ spec: type: object tcpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: diff --git a/package/crds/compute.gcp.upbound.io_images.yaml b/package/crds/compute.gcp.upbound.io_images.yaml index 845d24806..c1f4c21ae 100644 --- a/package/crds/compute.gcp.upbound.io_images.yaml +++ b/package/crds/compute.gcp.upbound.io_images.yaml @@ -806,7 +806,7 @@ spec: type: description: |- The type of supported feature. Read Enabling guest operating system features to see a list of available options. - Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. + Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, IDPF, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. type: string type: object type: array @@ -881,6 +881,80 @@ spec: You must provide either this property or the rawDisk.source property but not both to create an image. type: string + sourceDiskRef: + description: Reference to a Disk in compute to populate sourceDisk. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceDiskSelector: + description: Selector for a Disk in compute to populate sourceDisk. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object sourceImage: description: |- URL of the source image used to create this image. In order to create an image, you must provide the full or partial @@ -940,7 +1014,7 @@ spec: type: description: |- The type of supported feature. Read Enabling guest operating system features to see a list of available options. - Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. + Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, IDPF, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. type: string type: object type: array @@ -1015,6 +1089,80 @@ spec: You must provide either this property or the rawDisk.source property but not both to create an image. type: string + sourceDiskRef: + description: Reference to a Disk in compute to populate sourceDisk. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceDiskSelector: + description: Selector for a Disk in compute to populate sourceDisk. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object sourceImage: description: |- URL of the source image used to create this image. In order to create an image, you must provide the full or partial @@ -1246,7 +1394,7 @@ spec: type: description: |- The type of supported feature. Read Enabling guest operating system features to see a list of available options. - Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. + Possible values are: MULTI_IP_SUBNET, SECURE_BOOT, SEV_CAPABLE, UEFI_COMPATIBLE, VIRTIO_SCSI_MULTIQUEUE, WINDOWS, GVNIC, IDPF, SEV_LIVE_MIGRATABLE, SEV_SNP_CAPABLE, SUSPEND_RESUME_COMPATIBLE, TDX_CAPABLE, SEV_LIVE_MIGRATABLE_V2. type: string type: object type: array diff --git a/package/crds/compute.gcp.upbound.io_instancefromtemplates.yaml b/package/crds/compute.gcp.upbound.io_instancefromtemplates.yaml index 46c9601a2..7c831622f 100644 --- a/package/crds/compute.gcp.upbound.io_instancefromtemplates.yaml +++ b/package/crds/compute.gcp.upbound.io_instancefromtemplates.yaml @@ -1976,8 +1976,14 @@ spec: properties: enableNestedVirtualization: type: boolean + enableUefiNetworking: + type: boolean + performanceMonitoringUnit: + type: string threadsPerCore: type: number + turboMode: + type: string visibleCoreCount: type: number type: object @@ -1992,10 +1998,24 @@ spec: A unique name for the resource, required by GCE. Changing this forces a new resource to be created. type: string - diskEncryptionKeyRaw: - type: string - diskEncryptionKeySha256: - type: string + diskEncryptionKeyRawSecretRef: + description: A SecretKeySelector is a reference to a secret + key in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object kmsKeySelfLink: type: string mode: @@ -2051,6 +2071,10 @@ spec: type: string type: object x-kubernetes-map-type: granular + resourcePolicies: + items: + type: string + type: array size: type: number storagePool: @@ -2058,6 +2082,8 @@ spec: type: type: string type: object + interface: + type: string kmsKeySelfLink: type: string mode: @@ -2074,8 +2100,6 @@ spec: enableConfidentialCompute: type: boolean type: object - deletionProtection: - type: boolean description: type: string desiredStatus: @@ -2096,6 +2120,8 @@ spec: A unique name for the resource, required by GCE. Changing this forces a new resource to be created. type: string + keyRevocationActionType: + type: string labels: additionalProperties: type: string @@ -2375,6 +2401,8 @@ spec: properties: automaticRestart: type: boolean + availabilityDomain: + type: number instanceTerminationAction: type: string localSsdRecoveryTimeout: @@ -2562,8 +2590,14 @@ spec: properties: enableNestedVirtualization: type: boolean + enableUefiNetworking: + type: boolean + performanceMonitoringUnit: + type: string threadsPerCore: type: number + turboMode: + type: string visibleCoreCount: type: number type: object @@ -2578,10 +2612,24 @@ spec: A unique name for the resource, required by GCE. Changing this forces a new resource to be created. type: string - diskEncryptionKeyRaw: - type: string - diskEncryptionKeySha256: - type: string + diskEncryptionKeyRawSecretRef: + description: A SecretKeySelector is a reference to a secret + key in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object kmsKeySelfLink: type: string mode: @@ -2637,6 +2685,10 @@ spec: type: string type: object x-kubernetes-map-type: granular + resourcePolicies: + items: + type: string + type: array size: type: number storagePool: @@ -2644,6 +2696,8 @@ spec: type: type: string type: object + interface: + type: string kmsKeySelfLink: type: string mode: @@ -2660,8 +2714,6 @@ spec: enableConfidentialCompute: type: boolean type: object - deletionProtection: - type: boolean description: type: string desiredStatus: @@ -2682,6 +2734,8 @@ spec: A unique name for the resource, required by GCE. Changing this forces a new resource to be created. type: string + keyRevocationActionType: + type: string labels: additionalProperties: type: string @@ -2961,6 +3015,8 @@ spec: properties: automaticRestart: type: boolean + availabilityDomain: + type: number instanceTerminationAction: type: string localSsdRecoveryTimeout: @@ -3313,8 +3369,14 @@ spec: properties: enableNestedVirtualization: type: boolean + enableUefiNetworking: + type: boolean + performanceMonitoringUnit: + type: string threadsPerCore: type: number + turboMode: + type: string visibleCoreCount: type: number type: object @@ -3329,8 +3391,6 @@ spec: A unique name for the resource, required by GCE. Changing this forces a new resource to be created. type: string - diskEncryptionKeyRaw: - type: string diskEncryptionKeySha256: type: string kmsKeySelfLink: @@ -3372,6 +3432,10 @@ spec: type: string type: object x-kubernetes-map-type: granular + resourcePolicies: + items: + type: string + type: array size: type: number storagePool: @@ -3379,6 +3443,8 @@ spec: type: type: string type: object + interface: + type: string kmsKeySelfLink: type: string mode: @@ -3397,6 +3463,8 @@ spec: type: object cpuPlatform: type: string + creationTimestamp: + type: string currentStatus: type: string deletionProtection: @@ -3430,6 +3498,8 @@ spec: type: string instanceId: type: string + keyRevocationActionType: + type: string labelFingerprint: type: string labels: @@ -3568,6 +3638,8 @@ spec: properties: automaticRestart: type: boolean + availabilityDomain: + type: number instanceTerminationAction: type: string localSsdRecoveryTimeout: diff --git a/package/crds/compute.gcp.upbound.io_instancegroupmanagers.yaml b/package/crds/compute.gcp.upbound.io_instancegroupmanagers.yaml index 3f3201da3..602ec5240 100644 --- a/package/crds/compute.gcp.upbound.io_instancegroupmanagers.yaml +++ b/package/crds/compute.gcp.upbound.io_instancegroupmanagers.yaml @@ -2015,6 +2015,30 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + standbyPolicy: + description: The standby policy for stopped and suspended instances. + Structure is documented below. For more information, see the + official documentation. + properties: + initialDelaySec: + description: '- Specifies the number of seconds that the MIG + should wait to suspend or stop a VM after that VM was created. + The initial delay gives the initialization script the time + to prepare your VM for a quick scale out. The value of initial + delay must be between 0 and 3600 seconds. The default value + is 0.' + type: number + mode: + description: '- Defines how a MIG resumes or starts VMs from + a standby pool when the group scales out. Valid options + are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have + full control over which VMs are stopped and suspended in + the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the + standby pools to accelerate the scale out by resuming or + starting them and then automatically replenishes the standby + pool with new VMs to maintain the target sizes.' + type: string + type: object statefulDisk: description: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. @@ -2173,6 +2197,14 @@ spec: when using one. If a value is required, such as to specify a creation-time target size for the MIG, lifecycle. Defaults to 0. type: number + targetStoppedSize: + description: The target number of stopped instances for this managed + instance group. + type: number + targetSuspendedSize: + description: The target number of suspended instances for this + managed instance group. + type: number updatePolicy: description: The update policy for this managed instance group. Structure is documented below. For more information, see the @@ -2551,6 +2583,30 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + standbyPolicy: + description: The standby policy for stopped and suspended instances. + Structure is documented below. For more information, see the + official documentation. + properties: + initialDelaySec: + description: '- Specifies the number of seconds that the MIG + should wait to suspend or stop a VM after that VM was created. + The initial delay gives the initialization script the time + to prepare your VM for a quick scale out. The value of initial + delay must be between 0 and 3600 seconds. The default value + is 0.' + type: number + mode: + description: '- Defines how a MIG resumes or starts VMs from + a standby pool when the group scales out. Valid options + are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have + full control over which VMs are stopped and suspended in + the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the + standby pools to accelerate the scale out by resuming or + starting them and then automatically replenishes the standby + pool with new VMs to maintain the target sizes.' + type: string + type: object statefulDisk: description: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. @@ -2709,6 +2765,14 @@ spec: when using one. If a value is required, such as to specify a creation-time target size for the MIG, lifecycle. Defaults to 0. type: number + targetStoppedSize: + description: The target number of stopped instances for this managed + instance group. + type: number + targetSuspendedSize: + description: The target number of suspended instances for this + managed instance group. + type: number updatePolicy: description: The update policy for this managed instance group. Structure is documented below. For more information, see the @@ -3140,6 +3204,9 @@ spec: description: The full URL of the instance group created by the manager. type: string + instanceGroupManagerId: + description: an identifier for the resource with format projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}} + type: number instanceLifecyclePolicy: properties: defaultActionOnFailure: @@ -3191,6 +3258,30 @@ spec: selfLink: description: The URL of the created resource. type: string + standbyPolicy: + description: The standby policy for stopped and suspended instances. + Structure is documented below. For more information, see the + official documentation. + properties: + initialDelaySec: + description: '- Specifies the number of seconds that the MIG + should wait to suspend or stop a VM after that VM was created. + The initial delay gives the initialization script the time + to prepare your VM for a quick scale out. The value of initial + delay must be between 0 and 3600 seconds. The default value + is 0.' + type: number + mode: + description: '- Defines how a MIG resumes or starts VMs from + a standby pool when the group scales out. Valid options + are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have + full control over which VMs are stopped and suspended in + the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the + standby pools to accelerate the scale out by resuming or + starting them and then automatically replenishes the standby + pool with new VMs to maintain the target sizes.' + type: string + type: object statefulDisk: description: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. @@ -3342,6 +3433,14 @@ spec: when using one. If a value is required, such as to specify a creation-time target size for the MIG, lifecycle. Defaults to 0. type: number + targetStoppedSize: + description: The target number of stopped instances for this managed + instance group. + type: number + targetSuspendedSize: + description: The target number of suspended instances for this + managed instance group. + type: number updatePolicy: description: The update policy for this managed instance group. Structure is documented below. For more information, see the diff --git a/package/crds/compute.gcp.upbound.io_instances.yaml b/package/crds/compute.gcp.upbound.io_instances.yaml index 667fa0fc3..7d086d693 100644 --- a/package/crds/compute.gcp.upbound.io_instances.yaml +++ b/package/crds/compute.gcp.upbound.io_instances.yaml @@ -3243,10 +3243,24 @@ spec: description: Defines whether the instance should have nested virtualization enabled. Defaults to false. type: boolean + enableUefiNetworking: + description: Whether to enable UEFI networking for instance + creation. + type: boolean + performanceMonitoringUnit: + description: The PMU is a hardware component within the CPU + core that monitors how the processor runs code. Valid values + for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + type: string threadsPerCore: - description: he number of threads per physical core. To disable + description: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. type: number + turboMode: + description: Turbo frequency mode to use for the instance. + Supported modes are currently either ALL_CORE_MAX or unset + (default). + type: string visibleCoreCount: description: The number of physical cores to expose to an instance. visible cores info (VC). @@ -3480,6 +3494,14 @@ spec: a specific tag. This value is not returned by the API. type: object x-kubernetes-map-type: granular + resourcePolicies: + description: '- A list of self_links of resource policies + to attach to the instance. Modifying this list will + cause the instance to recreate. Currently a max of 1 + resource policy is supported.' + items: + type: string + type: array size: description: |- The size of the image in gigabytes. If not specified, it @@ -3487,7 +3509,7 @@ spec: type: number storagePool: description: |- - The URL of the storage pool in which the new disk is created. + The URL or the name of the storage pool in which the new disk is created. For example: type: string type: @@ -3495,6 +3517,10 @@ spec: can consume resources. type: string type: object + interface: + description: The disk interface to use for attaching this + disk; either SCSI or NVME. + type: string kmsKeySelfLink: description: |- The self_link of the encryption key that is @@ -3532,7 +3558,7 @@ spec: Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will - fail to create the VM. TDX is only available in beta.' + fail to create the VM.' type: string enableConfidentialCompute: description: Defines whether the instance should have confidential @@ -3542,18 +3568,13 @@ spec: TERMINATE or this will fail to create the VM. type: boolean type: object - deletionProtection: - description: |- - Enable deletion protection on this instance. Defaults to false. - Note: you must disable deletion protection before removing the resource (e.g. - type: boolean description: description: A brief description of this resource. type: string desiredStatus: description: |- Desired status of the instance. Either - "RUNNING" or "TERMINATED". + "RUNNING", "SUSPENDED" or "TERMINATED". type: string enableDisplay: description: |- @@ -3564,11 +3585,10 @@ spec: description: |- List of the type and count of accelerator cards attached to the instance. Structure documented below. Note: GPU accelerators can only be used with on_host_maintenance option set to TERMINATE. - Note: This field uses attr-as-block mode to avoid - breaking users during the 0.12 upgrade. To explicitly send a list - of zero objects you must use the following syntax: - example=[] - For more details about this behavior, see this section. + Note: As of 6.0.0, argument syntax + is no longer supported for this field in favor of block syntax. + To dynamically set a list of guest accelerators, use dynamic blocks. + To set an empty list, use a single guest_accelerator block with count = 0. items: properties: count: @@ -3587,6 +3607,10 @@ spec: Valid format is a series of labels 1-63 characters long matching the regular expression [a-z]([-a-z0-9]*[a-z0-9]), concatenated with periods. The entire hostname must not exceed 253 characters. Changing this forces a new resource to be created. type: string + keyRevocationActionType: + description: Action to be taken when a customer's encryption key + is revoked. Supports STOP and NONE, with NONE being the default. + type: string labels: additionalProperties: type: string @@ -3639,7 +3663,7 @@ spec: instance can be accessed via the Internet. Omit to ensure that the instance is not accessible from the Internet.g. via tunnel or because it is running on another cloud instance on that network). - This block can be repeated multiple times. Structure documented below. + This block can be specified once per network_interface. Structure documented below. items: properties: natIp: @@ -3810,7 +3834,9 @@ spec: type: object nicType: description: 'The type of vNIC to be used on this interface. - Possible values: GVNIC, VIRTIO_NET.' + Possible values: GVNIC, VIRTIO_NET, IDPF. In the beta + provider the additional values of MRDMA and IRDMA are + supported.' type: string queueCount: description: The networking queue count that's specified @@ -3836,7 +3862,7 @@ spec: subnetworkProject: description: |- The project in which the subnetwork belongs. - If the subnetwork is a self_link, this field is ignored in favor of the project + If the subnetwork is a self_link, this field is set to the project defined in the subnetwork self_link. If the subnetwork is a name and this field is not provided, the provider project is used. type: string @@ -3996,6 +4022,12 @@ spec: restarted if it was terminated by Compute Engine (not a user). Defaults to true. type: boolean + availabilityDomain: + description: Specifies the availability domain to place the + instance in. The value must be a number between 1 and the + number of availability domains specified in the spread placement + policy attached to the instance. + type: number instanceTerminationAction: description: Describe the type of termination action for VM. Can be STOP or DELETE. Read more on here @@ -4281,10 +4313,24 @@ spec: description: Defines whether the instance should have nested virtualization enabled. Defaults to false. type: boolean + enableUefiNetworking: + description: Whether to enable UEFI networking for instance + creation. + type: boolean + performanceMonitoringUnit: + description: The PMU is a hardware component within the CPU + core that monitors how the processor runs code. Valid values + for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + type: string threadsPerCore: - description: he number of threads per physical core. To disable + description: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. type: number + turboMode: + description: Turbo frequency mode to use for the instance. + Supported modes are currently either ALL_CORE_MAX or unset + (default). + type: string visibleCoreCount: description: The number of physical cores to expose to an instance. visible cores info (VC). @@ -4518,6 +4564,14 @@ spec: a specific tag. This value is not returned by the API. type: object x-kubernetes-map-type: granular + resourcePolicies: + description: '- A list of self_links of resource policies + to attach to the instance. Modifying this list will + cause the instance to recreate. Currently a max of 1 + resource policy is supported.' + items: + type: string + type: array size: description: |- The size of the image in gigabytes. If not specified, it @@ -4525,7 +4579,7 @@ spec: type: number storagePool: description: |- - The URL of the storage pool in which the new disk is created. + The URL or the name of the storage pool in which the new disk is created. For example: type: string type: @@ -4533,6 +4587,10 @@ spec: can consume resources. type: string type: object + interface: + description: The disk interface to use for attaching this + disk; either SCSI or NVME. + type: string kmsKeySelfLink: description: |- The self_link of the encryption key that is @@ -4570,7 +4628,7 @@ spec: Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will - fail to create the VM. TDX is only available in beta.' + fail to create the VM.' type: string enableConfidentialCompute: description: Defines whether the instance should have confidential @@ -4580,18 +4638,13 @@ spec: TERMINATE or this will fail to create the VM. type: boolean type: object - deletionProtection: - description: |- - Enable deletion protection on this instance. Defaults to false. - Note: you must disable deletion protection before removing the resource (e.g. - type: boolean description: description: A brief description of this resource. type: string desiredStatus: description: |- Desired status of the instance. Either - "RUNNING" or "TERMINATED". + "RUNNING", "SUSPENDED" or "TERMINATED". type: string enableDisplay: description: |- @@ -4602,11 +4655,10 @@ spec: description: |- List of the type and count of accelerator cards attached to the instance. Structure documented below. Note: GPU accelerators can only be used with on_host_maintenance option set to TERMINATE. - Note: This field uses attr-as-block mode to avoid - breaking users during the 0.12 upgrade. To explicitly send a list - of zero objects you must use the following syntax: - example=[] - For more details about this behavior, see this section. + Note: As of 6.0.0, argument syntax + is no longer supported for this field in favor of block syntax. + To dynamically set a list of guest accelerators, use dynamic blocks. + To set an empty list, use a single guest_accelerator block with count = 0. items: properties: count: @@ -4625,6 +4677,10 @@ spec: Valid format is a series of labels 1-63 characters long matching the regular expression [a-z]([-a-z0-9]*[a-z0-9]), concatenated with periods. The entire hostname must not exceed 253 characters. Changing this forces a new resource to be created. type: string + keyRevocationActionType: + description: Action to be taken when a customer's encryption key + is revoked. Supports STOP and NONE, with NONE being the default. + type: string labels: additionalProperties: type: string @@ -4677,7 +4733,7 @@ spec: instance can be accessed via the Internet. Omit to ensure that the instance is not accessible from the Internet.g. via tunnel or because it is running on another cloud instance on that network). - This block can be repeated multiple times. Structure documented below. + This block can be specified once per network_interface. Structure documented below. items: properties: natIp: @@ -4848,7 +4904,9 @@ spec: type: object nicType: description: 'The type of vNIC to be used on this interface. - Possible values: GVNIC, VIRTIO_NET.' + Possible values: GVNIC, VIRTIO_NET, IDPF. In the beta + provider the additional values of MRDMA and IRDMA are + supported.' type: string queueCount: description: The networking queue count that's specified @@ -4874,7 +4932,7 @@ spec: subnetworkProject: description: |- The project in which the subnetwork belongs. - If the subnetwork is a self_link, this field is ignored in favor of the project + If the subnetwork is a self_link, this field is set to the project defined in the subnetwork self_link. If the subnetwork is a name and this field is not provided, the provider project is used. type: string @@ -5034,6 +5092,12 @@ spec: restarted if it was terminated by Compute Engine (not a user). Defaults to true. type: boolean + availabilityDomain: + description: Specifies the availability domain to place the + instance in. The value must be a number between 1 and the + number of availability domains specified in the spread placement + policy attached to the instance. + type: number instanceTerminationAction: description: Describe the type of termination action for VM. Can be STOP or DELETE. Read more on here @@ -5485,10 +5549,24 @@ spec: description: Defines whether the instance should have nested virtualization enabled. Defaults to false. type: boolean + enableUefiNetworking: + description: Whether to enable UEFI networking for instance + creation. + type: boolean + performanceMonitoringUnit: + description: The PMU is a hardware component within the CPU + core that monitors how the processor runs code. Valid values + for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + type: string threadsPerCore: - description: he number of threads per physical core. To disable + description: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. type: number + turboMode: + description: Turbo frequency mode to use for the instance. + Supported modes are currently either ALL_CORE_MAX or unset + (default). + type: string visibleCoreCount: description: The number of physical cores to expose to an instance. visible cores info (VC). @@ -5615,6 +5693,14 @@ spec: a specific tag. This value is not returned by the API. type: object x-kubernetes-map-type: granular + resourcePolicies: + description: '- A list of self_links of resource policies + to attach to the instance. Modifying this list will + cause the instance to recreate. Currently a max of 1 + resource policy is supported.' + items: + type: string + type: array size: description: |- The size of the image in gigabytes. If not specified, it @@ -5622,7 +5708,7 @@ spec: type: number storagePool: description: |- - The URL of the storage pool in which the new disk is created. + The URL or the name of the storage pool in which the new disk is created. For example: type: string type: @@ -5630,6 +5716,10 @@ spec: can consume resources. type: string type: object + interface: + description: The disk interface to use for attaching this + disk; either SCSI or NVME. + type: string kmsKeySelfLink: description: |- The self_link of the encryption key that is @@ -5667,7 +5757,7 @@ spec: Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will - fail to create the VM. TDX is only available in beta.' + fail to create the VM.' type: string enableConfidentialCompute: description: Defines whether the instance should have confidential @@ -5680,12 +5770,15 @@ spec: cpuPlatform: description: The CPU platform used by this instance. type: string + creationTimestamp: + description: Creation timestamp in RFC3339 text format. + type: string currentStatus: description: 'The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see Instance - life cycle.`,' + life cycle.' type: string deletionProtection: description: |- @@ -5698,7 +5791,7 @@ spec: desiredStatus: description: |- Desired status of the instance. Either - "RUNNING" or "TERMINATED". + "RUNNING", "SUSPENDED" or "TERMINATED". type: string effectiveLabels: additionalProperties: @@ -5714,11 +5807,10 @@ spec: description: |- List of the type and count of accelerator cards attached to the instance. Structure documented below. Note: GPU accelerators can only be used with on_host_maintenance option set to TERMINATE. - Note: This field uses attr-as-block mode to avoid - breaking users during the 0.12 upgrade. To explicitly send a list - of zero objects you must use the following syntax: - example=[] - For more details about this behavior, see this section. + Note: As of 6.0.0, argument syntax + is no longer supported for this field in favor of block syntax. + To dynamically set a list of guest accelerators, use dynamic blocks. + To set an empty list, use a single guest_accelerator block with count = 0. items: properties: count: @@ -5743,6 +5835,10 @@ spec: instanceId: description: The server-assigned unique identifier of this instance. type: string + keyRevocationActionType: + description: Action to be taken when a customer's encryption key + is revoked. Supports STOP and NONE, with NONE being the default. + type: string labelFingerprint: description: The unique fingerprint of the labels. type: string @@ -5801,7 +5897,7 @@ spec: instance can be accessed via the Internet. Omit to ensure that the instance is not accessible from the Internet.g. via tunnel or because it is running on another cloud instance on that network). - This block can be repeated multiple times. Structure documented below. + This block can be specified once per network_interface. Structure documented below. items: properties: natIp: @@ -5906,7 +6002,9 @@ spec: type: string nicType: description: 'The type of vNIC to be used on this interface. - Possible values: GVNIC, VIRTIO_NET.' + Possible values: GVNIC, VIRTIO_NET, IDPF. In the beta + provider the additional values of MRDMA and IRDMA are + supported.' type: string queueCount: description: The networking queue count that's specified @@ -5932,7 +6030,7 @@ spec: subnetworkProject: description: |- The project in which the subnetwork belongs. - If the subnetwork is a self_link, this field is ignored in favor of the project + If the subnetwork is a self_link, this field is set to the project defined in the subnetwork self_link. If the subnetwork is a name and this field is not provided, the provider project is used. type: string @@ -6016,6 +6114,12 @@ spec: restarted if it was terminated by Compute Engine (not a user). Defaults to true. type: boolean + availabilityDomain: + description: Specifies the availability domain to place the + instance in. The value must be a number between 1 and the + number of availability domains specified in the spread placement + policy attached to the instance. + type: number instanceTerminationAction: description: Describe the type of termination action for VM. Can be STOP or DELETE. Read more on here diff --git a/package/crds/compute.gcp.upbound.io_instancetemplates.yaml b/package/crds/compute.gcp.upbound.io_instancetemplates.yaml index 7ab66d6e1..62155ac59 100644 --- a/package/crds/compute.gcp.upbound.io_instancetemplates.yaml +++ b/package/crds/compute.gcp.upbound.io_instancetemplates.yaml @@ -3085,10 +3085,24 @@ spec: description: Defines whether the instance should have nested virtualization enabled. Defaults to false. type: boolean + enableUefiNetworking: + description: Whether to enable UEFI networking for instance + creation. + type: boolean + performanceMonitoringUnit: + description: The PMU is a hardware component within the CPU + core that monitors how the processor runs code. Valid values + for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + type: string threadsPerCore: description: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. type: number + turboMode: + description: Turbo frequency mode to use for the instance. + Supported modes are currently either ALL_CORE_MAX or unset + (default). + type: string visibleCoreCount: description: The number of physical cores to expose to an instance. visible cores info (VC). @@ -3112,7 +3126,7 @@ spec: Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will - fail to create the VM. TDX is only available in beta.' + fail to create the VM.' type: string enableConfidentialCompute: description: Defines whether the instance should have confidential @@ -3200,6 +3214,8 @@ spec: Values must be between 10,000 and 120,000. For more details, see the Extreme persistent disk documentation. type: number + provisionedThroughput: + type: number resourceManagerTags: additionalProperties: type: string @@ -3454,6 +3470,10 @@ spec: A brief description to use for instances created from this template. type: string + keyRevocationActionType: + description: Action to be taken when a customer's encryption key + is revoked. Supports STOP and NONE, with NONE being the default. + type: string labels: additionalProperties: type: string @@ -3490,7 +3510,9 @@ spec: namePrefix: description: |- Creates a unique name beginning with the specified - prefix. Conflicts with name. + prefix. Conflicts with name. Max length is 54 characters. + Prefixes with lengths longer than 37 characters will use a shortened + UUID that will be more prone to collisions. type: string networkInterface: description: |- @@ -3503,7 +3525,7 @@ spec: description: |- Access configurations, i.e. IPs via which this instance can be accessed via the Internet.g. via tunnel or because it is running on another cloud instance - on that network). This block can be repeated multiple times. Structure documented below. + on that network). This block can be specified once per network_interface. Structure documented below. items: properties: natIp: @@ -3648,7 +3670,8 @@ spec: type: object nicType: description: 'The type of vNIC to be used on this interface. - Possible values: GVNIC, VIRTIO_NET.' + Possible values: GVNIC, VIRTIO_NET. In the beta provider + the additional values of MRDMA and IRDMA are supported.' type: string queueCount: description: The networking queue count that's specified @@ -3829,6 +3852,12 @@ spec: automatically restarted if it is terminated by Compute Engine (not terminated by a user). This defaults to true. type: boolean + availabilityDomain: + description: Specifies the availability domain to place the + instance in. The value must be a number between 1 and the + number of availability domains specified in the spread placement + policy attached to the instance. + type: number instanceTerminationAction: description: Describe the type of termination action for SPOT VM. Can be STOP or DELETE. Read more on here @@ -4082,10 +4111,24 @@ spec: description: Defines whether the instance should have nested virtualization enabled. Defaults to false. type: boolean + enableUefiNetworking: + description: Whether to enable UEFI networking for instance + creation. + type: boolean + performanceMonitoringUnit: + description: The PMU is a hardware component within the CPU + core that monitors how the processor runs code. Valid values + for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + type: string threadsPerCore: description: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. type: number + turboMode: + description: Turbo frequency mode to use for the instance. + Supported modes are currently either ALL_CORE_MAX or unset + (default). + type: string visibleCoreCount: description: The number of physical cores to expose to an instance. visible cores info (VC). @@ -4109,7 +4152,7 @@ spec: Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will - fail to create the VM. TDX is only available in beta.' + fail to create the VM.' type: string enableConfidentialCompute: description: Defines whether the instance should have confidential @@ -4197,6 +4240,8 @@ spec: Values must be between 10,000 and 120,000. For more details, see the Extreme persistent disk documentation. type: number + provisionedThroughput: + type: number resourceManagerTags: additionalProperties: type: string @@ -4451,6 +4496,10 @@ spec: A brief description to use for instances created from this template. type: string + keyRevocationActionType: + description: Action to be taken when a customer's encryption key + is revoked. Supports STOP and NONE, with NONE being the default. + type: string labels: additionalProperties: type: string @@ -4487,7 +4536,9 @@ spec: namePrefix: description: |- Creates a unique name beginning with the specified - prefix. Conflicts with name. + prefix. Conflicts with name. Max length is 54 characters. + Prefixes with lengths longer than 37 characters will use a shortened + UUID that will be more prone to collisions. type: string networkInterface: description: |- @@ -4500,7 +4551,7 @@ spec: description: |- Access configurations, i.e. IPs via which this instance can be accessed via the Internet.g. via tunnel or because it is running on another cloud instance - on that network). This block can be repeated multiple times. Structure documented below. + on that network). This block can be specified once per network_interface. Structure documented below. items: properties: natIp: @@ -4645,7 +4696,8 @@ spec: type: object nicType: description: 'The type of vNIC to be used on this interface. - Possible values: GVNIC, VIRTIO_NET.' + Possible values: GVNIC, VIRTIO_NET. In the beta provider + the additional values of MRDMA and IRDMA are supported.' type: string queueCount: description: The networking queue count that's specified @@ -4826,6 +4878,12 @@ spec: automatically restarted if it is terminated by Compute Engine (not terminated by a user). This defaults to true. type: boolean + availabilityDomain: + description: Specifies the availability domain to place the + instance in. The value must be a number between 1 and the + number of availability domains specified in the spread placement + policy attached to the instance. + type: number instanceTerminationAction: description: Describe the type of termination action for SPOT VM. Can be STOP or DELETE. Read more on here @@ -5247,10 +5305,24 @@ spec: description: Defines whether the instance should have nested virtualization enabled. Defaults to false. type: boolean + enableUefiNetworking: + description: Whether to enable UEFI networking for instance + creation. + type: boolean + performanceMonitoringUnit: + description: The PMU is a hardware component within the CPU + core that monitors how the processor runs code. Valid values + for the level of PMU are STANDARD, ENHANCED, and ARCHITECTURAL. + type: string threadsPerCore: description: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. type: number + turboMode: + description: Turbo frequency mode to use for the instance. + Supported modes are currently either ALL_CORE_MAX or unset + (default). + type: string visibleCoreCount: description: The number of physical cores to expose to an instance. visible cores info (VC). @@ -5274,7 +5346,7 @@ spec: Otherwise, on_host_maintenance has to be set to TERMINATE or this will fail to create the VM. If SEV_SNP, currently min_cpu_platform has to be set to "AMD Milan" or this will - fail to create the VM. TDX is only available in beta.' + fail to create the VM.' type: string enableConfidentialCompute: description: Defines whether the instance should have confidential @@ -5284,6 +5356,9 @@ spec: TERMINATE or this will fail to create the VM. type: boolean type: object + creationTimestamp: + description: Creation timestamp in RFC3339 text format. + type: string description: description: A brief description of this resource. type: string @@ -5362,6 +5437,8 @@ spec: Values must be between 10,000 and 120,000. For more details, see the Extreme persistent disk documentation. type: number + provisionedThroughput: + type: number resourceManagerTags: additionalProperties: type: string @@ -5469,6 +5546,10 @@ spec: A brief description to use for instances created from this template. type: string + keyRevocationActionType: + description: Action to be taken when a customer's encryption key + is revoked. Supports STOP and NONE, with NONE being the default. + type: string labels: additionalProperties: type: string @@ -5508,7 +5589,9 @@ spec: namePrefix: description: |- Creates a unique name beginning with the specified - prefix. Conflicts with name. + prefix. Conflicts with name. Max length is 54 characters. + Prefixes with lengths longer than 37 characters will use a shortened + UUID that will be more prone to collisions. type: string networkInterface: description: |- @@ -5521,7 +5604,7 @@ spec: description: |- Access configurations, i.e. IPs via which this instance can be accessed via the Internet.g. via tunnel or because it is running on another cloud instance - on that network). This block can be repeated multiple times. Structure documented below. + on that network). This block can be specified once per network_interface. Structure documented below. items: properties: natIp: @@ -5608,7 +5691,8 @@ spec: type: string nicType: description: 'The type of vNIC to be used on this interface. - Possible values: GVNIC, VIRTIO_NET.' + Possible values: GVNIC, VIRTIO_NET. In the beta provider + the additional values of MRDMA and IRDMA are supported.' type: string queueCount: description: The networking queue count that's specified @@ -5713,6 +5797,12 @@ spec: automatically restarted if it is terminated by Compute Engine (not terminated by a user). This defaults to true. type: boolean + availabilityDomain: + description: Specifies the availability domain to place the + instance in. The value must be a number between 1 and the + number of availability domains specified in the spread placement + policy attached to the instance. + type: number instanceTerminationAction: description: Describe the type of termination action for SPOT VM. Can be STOP or DELETE. Read more on here diff --git a/package/crds/compute.gcp.upbound.io_managedsslcertificates.yaml b/package/crds/compute.gcp.upbound.io_managedsslcertificates.yaml index fea61d205..9fd3c33ad 100644 --- a/package/crds/compute.gcp.upbound.io_managedsslcertificates.yaml +++ b/package/crds/compute.gcp.upbound.io_managedsslcertificates.yaml @@ -498,9 +498,6 @@ spec: type: string forProvider: properties: - certificateId: - description: The unique identifier for the resource. - type: number description: description: An optional description of this resource. type: string @@ -544,9 +541,6 @@ spec: for example because of an external controller is managing them, like an autoscaler. properties: - certificateId: - description: The unique identifier for the resource. - type: number description: description: An optional description of this resource. type: string diff --git a/package/crds/compute.gcp.upbound.io_networkfirewallpolicyassociations.yaml b/package/crds/compute.gcp.upbound.io_networkfirewallpolicyassociations.yaml index 9e5448c55..5c7ee2f8e 100644 --- a/package/crds/compute.gcp.upbound.io_networkfirewallpolicyassociations.yaml +++ b/package/crds/compute.gcp.upbound.io_networkfirewallpolicyassociations.yaml @@ -152,7 +152,7 @@ spec: type: object type: object firewallPolicy: - description: The firewall policy ID of the association. + description: The firewall policy of the resource. type: string firewallPolicyRef: description: Reference to a NetworkFirewallPolicy in compute to @@ -231,7 +231,9 @@ spec: type: object type: object project: - description: The project for the resource + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. type: string type: object initProvider: @@ -325,7 +327,9 @@ spec: type: object type: object project: - description: The project for the resource + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. type: string type: object managementPolicies: @@ -505,13 +509,15 @@ spec: description: The target that the firewall policy is attached to. type: string firewallPolicy: - description: The firewall policy ID of the association. + description: The firewall policy of the resource. type: string id: description: an identifier for the resource with format projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/associations/{{name}} type: string project: - description: The project for the resource + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. type: string shortName: description: The short name of the firewall policy of the association. diff --git a/package/crds/compute.gcp.upbound.io_networkfirewallpolicyrules.yaml b/package/crds/compute.gcp.upbound.io_networkfirewallpolicyrules.yaml index efc173ab5..2cb31ccf5 100644 --- a/package/crds/compute.gcp.upbound.io_networkfirewallpolicyrules.yaml +++ b/package/crds/compute.gcp.upbound.io_networkfirewallpolicyrules.yaml @@ -35,7 +35,9 @@ spec: schema: openAPIV3Schema: description: NetworkFirewallPolicyRule is the Schema for the NetworkFirewallPolicyRules - API. The Compute NetworkFirewallPolicyRule resource + API. Represents a rule that describes one or more match conditions along + with the action to be taken when traffic matches this condition (allow or + deny). properties: apiVersion: description: |- @@ -83,20 +85,22 @@ spec: description: An optional description for this resource. type: string direction: - description: 'The direction in which this rule applies. Possible - values: INGRESS, EGRESS' + description: |- + The direction in which this rule applies. + Possible values are: INGRESS, EGRESS. type: string disabled: - description: Denotes whether the firewall policy rule is disabled. - When set to true, the firewall policy rule is not enforced and - traffic behaves as if it did not exist. If this is unspecified, - the firewall policy rule will be enabled. + description: |- + Denotes whether the firewall policy rule is disabled. + When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + If this is unspecified, the firewall policy rule will be enabled. type: boolean enableLogging: - description: 'Denotes whether to enable logging for a particular - rule. If logging is enabled, logs will be exported to the configured - export destination in Stackdriver. Logs may be exported to BigQuery - or Pub/Sub. Note: you cannot enable logging on "goto_next" rules.' + description: |- + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + Logs may be exported to BigQuery or Pub/Sub. + Note: you cannot enable logging on "goto_next" rules. type: boolean firewallPolicy: description: The firewall policy of the resource. @@ -178,22 +182,21 @@ spec: type: object type: object match: - description: A match condition that incoming traffic is evaluated - against. If it evaluates to true, the corresponding 'action' - is enforced. + description: |- + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. properties: destAddressGroups: description: Address groups which should be matched against the traffic destination. Maximum number of destination address - groups is 10. Destination address groups is only supported - in Egress rules. + groups is 10. items: type: string type: array destFqdns: - description: Domain names that will be used to match against - the resolved domain name of destination of traffic. Can - only be specified if DIRECTION is egress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic destination. Maximum number of + destination fqdn allowed is 100. items: type: string type: array @@ -204,37 +207,35 @@ spec: type: string type: array destRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is egress. + description: Region codes whose IP addresses will be used + to match for destination of traffic. Should be specified + as 2 letter country code defined as per ISO 3166 alpha-2 + country codes. ex."US" Maximum number of dest region codes + allowed is 5000. items: type: string type: array destThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic destination. items: type: string type: array layer4Configs: - description: Pairs of IP protocols and ports that the rule - should match. + description: |- + Pairs of IP protocols and ports that the rule should match. + Structure is documented below. items: properties: ipProtocol: - description: The IP protocol to which this rule applies. - The protocol type is required when creating a firewall - rule. This value can either be one of the following - well known protocol strings (tcp, udp, icmp, esp, - ah, ipip, sctp), or the IP protocol number. + description: |- + The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. type: string ports: - description: 'An optional list of ports to which this - rule applies. This field is only applicable for UDP - or TCP protocol. Each entry must be either an integer - or a range. If not specified, this rule applies to - connections through any port. Example inputs include: - “.' + description: |- + An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. + Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. items: type: string type: array @@ -243,8 +244,7 @@ spec: srcAddressGroups: description: Address groups which should be matched against the traffic source. Maximum number of source address groups - is 10. Source address groups is only supported in Ingress - rules. + is 10. items: type: string type: array @@ -328,9 +328,9 @@ spec: type: object type: object srcFqdns: - description: Domain names that will be used to match against - the resolved domain name of source of traffic. Can only - be specified if DIRECTION is ingress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic source. Maximum number of source + fqdn allowed is 100. items: type: string type: array @@ -341,23 +341,23 @@ spec: type: string type: array srcRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is ingress. + description: Region codes whose IP addresses will be used + to match for source of traffic. Should be specified as 2 + letter country code defined as per ISO 3166 alpha-2 country + codes. ex."US" Maximum number of source region codes allowed + is 5000. items: type: string type: array srcSecureTags: - description: List of secure tag values, which should be matched - at the source of the traffic. For INGRESS rule, if all the - srcSecureTag are INEFFECTIVE, and there is no srcIpRange, - this rule will be ignored. Maximum number of source tag - values allowed is 256. + description: |- + List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. + Structure is documented below. items: properties: name: description: Name of the secure tag, created with TagManager's - TagValue API. @pattern tagValues/[0-9]+ + TagValue API. type: string nameRef: description: Reference to a TagValue in tags to populate @@ -438,47 +438,44 @@ spec: type: object type: array srcThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic source. items: type: string type: array type: object priority: - description: An integer indicating the priority of a rule in the - list. The priority must be a positive value between 0 and 2147483647. - Rules are evaluated from highest to lowest priority where 0 - is the highest priority and 2147483647 is the lowest prority. + description: |- + An integer indicating the priority of a rule in the list. + The priority must be a positive value between 0 and 2147483647. + Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. type: number project: - description: The project for the resource + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. type: string ruleName: description: An optional name for the rule. This field is not a unique identifier and can be updated. type: string securityProfileGroup: - description: 'A fully-qualified URL of a SecurityProfileGroup - resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. - It must be specified if action = ''apply_security_profile_group'' - and cannot be specified for other actions.' + description: |- + A fully-qualified URL of a SecurityProfile resource instance. + Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. type: string targetSecureTags: - description: A list of secure tags that controls which instances - the firewall rule applies to. If targetSecureTag are specified, - then the firewall rule applies only to instances in the VPC - network that have one of those EFFECTIVE secure tags, if all - the target_secure_tag are in INEFFECTIVE state, then this rule - will be ignored. targetSecureTag may not be set at the same - time as targetServiceAccounts. If neither targetServiceAccounts - nor targetSecureTag are specified, the firewall rule applies - to all instances on the specified network. Maximum number of - target label tags allowed is 256. + description: |- + A list of secure tags that controls which instances the firewall rule applies to. + If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored. + targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. + Structure is documented below. items: properties: name: description: Name of the secure tag, created with TagManager's - TagValue API. @pattern tagValues/[0-9]+ + TagValue API. type: string type: object type: array @@ -489,9 +486,9 @@ spec: type: string type: array tlsInspect: - description: Boolean flag indicating if the traffic should be - TLS decrypted. It can be set only if action = 'apply_security_profile_group' - and cannot be set for other actions. + description: |- + Boolean flag indicating if the traffic should be TLS decrypted. + Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. type: boolean required: - priority @@ -518,38 +515,39 @@ spec: description: An optional description for this resource. type: string direction: - description: 'The direction in which this rule applies. Possible - values: INGRESS, EGRESS' + description: |- + The direction in which this rule applies. + Possible values are: INGRESS, EGRESS. type: string disabled: - description: Denotes whether the firewall policy rule is disabled. - When set to true, the firewall policy rule is not enforced and - traffic behaves as if it did not exist. If this is unspecified, - the firewall policy rule will be enabled. + description: |- + Denotes whether the firewall policy rule is disabled. + When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + If this is unspecified, the firewall policy rule will be enabled. type: boolean enableLogging: - description: 'Denotes whether to enable logging for a particular - rule. If logging is enabled, logs will be exported to the configured - export destination in Stackdriver. Logs may be exported to BigQuery - or Pub/Sub. Note: you cannot enable logging on "goto_next" rules.' + description: |- + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + Logs may be exported to BigQuery or Pub/Sub. + Note: you cannot enable logging on "goto_next" rules. type: boolean match: - description: A match condition that incoming traffic is evaluated - against. If it evaluates to true, the corresponding 'action' - is enforced. + description: |- + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. properties: destAddressGroups: description: Address groups which should be matched against the traffic destination. Maximum number of destination address - groups is 10. Destination address groups is only supported - in Egress rules. + groups is 10. items: type: string type: array destFqdns: - description: Domain names that will be used to match against - the resolved domain name of destination of traffic. Can - only be specified if DIRECTION is egress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic destination. Maximum number of + destination fqdn allowed is 100. items: type: string type: array @@ -560,37 +558,35 @@ spec: type: string type: array destRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is egress. + description: Region codes whose IP addresses will be used + to match for destination of traffic. Should be specified + as 2 letter country code defined as per ISO 3166 alpha-2 + country codes. ex."US" Maximum number of dest region codes + allowed is 5000. items: type: string type: array destThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic destination. items: type: string type: array layer4Configs: - description: Pairs of IP protocols and ports that the rule - should match. + description: |- + Pairs of IP protocols and ports that the rule should match. + Structure is documented below. items: properties: ipProtocol: - description: The IP protocol to which this rule applies. - The protocol type is required when creating a firewall - rule. This value can either be one of the following - well known protocol strings (tcp, udp, icmp, esp, - ah, ipip, sctp), or the IP protocol number. + description: |- + The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. type: string ports: - description: 'An optional list of ports to which this - rule applies. This field is only applicable for UDP - or TCP protocol. Each entry must be either an integer - or a range. If not specified, this rule applies to - connections through any port. Example inputs include: - “.' + description: |- + An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. + Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. items: type: string type: array @@ -599,8 +595,7 @@ spec: srcAddressGroups: description: Address groups which should be matched against the traffic source. Maximum number of source address groups - is 10. Source address groups is only supported in Ingress - rules. + is 10. items: type: string type: array @@ -684,9 +679,9 @@ spec: type: object type: object srcFqdns: - description: Domain names that will be used to match against - the resolved domain name of source of traffic. Can only - be specified if DIRECTION is ingress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic source. Maximum number of source + fqdn allowed is 100. items: type: string type: array @@ -697,23 +692,23 @@ spec: type: string type: array srcRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is ingress. + description: Region codes whose IP addresses will be used + to match for source of traffic. Should be specified as 2 + letter country code defined as per ISO 3166 alpha-2 country + codes. ex."US" Maximum number of source region codes allowed + is 5000. items: type: string type: array srcSecureTags: - description: List of secure tag values, which should be matched - at the source of the traffic. For INGRESS rule, if all the - srcSecureTag are INEFFECTIVE, and there is no srcIpRange, - this rule will be ignored. Maximum number of source tag - values allowed is 256. + description: |- + List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. + Structure is documented below. items: properties: name: description: Name of the secure tag, created with TagManager's - TagValue API. @pattern tagValues/[0-9]+ + TagValue API. type: string nameRef: description: Reference to a TagValue in tags to populate @@ -794,41 +789,38 @@ spec: type: object type: array srcThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic source. items: type: string type: array type: object project: - description: The project for the resource + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. type: string ruleName: description: An optional name for the rule. This field is not a unique identifier and can be updated. type: string securityProfileGroup: - description: 'A fully-qualified URL of a SecurityProfileGroup - resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. - It must be specified if action = ''apply_security_profile_group'' - and cannot be specified for other actions.' + description: |- + A fully-qualified URL of a SecurityProfile resource instance. + Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. type: string targetSecureTags: - description: A list of secure tags that controls which instances - the firewall rule applies to. If targetSecureTag are specified, - then the firewall rule applies only to instances in the VPC - network that have one of those EFFECTIVE secure tags, if all - the target_secure_tag are in INEFFECTIVE state, then this rule - will be ignored. targetSecureTag may not be set at the same - time as targetServiceAccounts. If neither targetServiceAccounts - nor targetSecureTag are specified, the firewall rule applies - to all instances on the specified network. Maximum number of - target label tags allowed is 256. + description: |- + A list of secure tags that controls which instances the firewall rule applies to. + If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored. + targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. + Structure is documented below. items: properties: name: description: Name of the secure tag, created with TagManager's - TagValue API. @pattern tagValues/[0-9]+ + TagValue API. type: string type: object type: array @@ -839,9 +831,9 @@ spec: type: string type: array tlsInspect: - description: Boolean flag indicating if the traffic should be - TLS decrypted. It can be set only if action = 'apply_security_profile_group' - and cannot be set for other actions. + description: |- + Boolean flag indicating if the traffic should be TLS decrypted. + Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. type: boolean type: object managementPolicies: @@ -1035,24 +1027,29 @@ spec: triggers the rule. Valid actions are "allow", "deny", "goto_next" and "apply_security_profile_group". type: string + creationTimestamp: + description: Creation timestamp in RFC3339 text format. + type: string description: description: An optional description for this resource. type: string direction: - description: 'The direction in which this rule applies. Possible - values: INGRESS, EGRESS' + description: |- + The direction in which this rule applies. + Possible values are: INGRESS, EGRESS. type: string disabled: - description: Denotes whether the firewall policy rule is disabled. - When set to true, the firewall policy rule is not enforced and - traffic behaves as if it did not exist. If this is unspecified, - the firewall policy rule will be enabled. + description: |- + Denotes whether the firewall policy rule is disabled. + When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. + If this is unspecified, the firewall policy rule will be enabled. type: boolean enableLogging: - description: 'Denotes whether to enable logging for a particular - rule. If logging is enabled, logs will be exported to the configured - export destination in Stackdriver. Logs may be exported to BigQuery - or Pub/Sub. Note: you cannot enable logging on "goto_next" rules.' + description: |- + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the configured export destination in Stackdriver. + Logs may be exported to BigQuery or Pub/Sub. + Note: you cannot enable logging on "goto_next" rules. type: boolean firewallPolicy: description: The firewall policy of the resource. @@ -1065,22 +1062,21 @@ spec: for firewall policy rules type: string match: - description: A match condition that incoming traffic is evaluated - against. If it evaluates to true, the corresponding 'action' - is enforced. + description: |- + A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + Structure is documented below. properties: destAddressGroups: description: Address groups which should be matched against the traffic destination. Maximum number of destination address - groups is 10. Destination address groups is only supported - in Egress rules. + groups is 10. items: type: string type: array destFqdns: - description: Domain names that will be used to match against - the resolved domain name of destination of traffic. Can - only be specified if DIRECTION is egress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic destination. Maximum number of + destination fqdn allowed is 100. items: type: string type: array @@ -1091,37 +1087,35 @@ spec: type: string type: array destRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is egress. + description: Region codes whose IP addresses will be used + to match for destination of traffic. Should be specified + as 2 letter country code defined as per ISO 3166 alpha-2 + country codes. ex."US" Maximum number of dest region codes + allowed is 5000. items: type: string type: array destThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic destination. items: type: string type: array layer4Configs: - description: Pairs of IP protocols and ports that the rule - should match. + description: |- + Pairs of IP protocols and ports that the rule should match. + Structure is documented below. items: properties: ipProtocol: - description: The IP protocol to which this rule applies. - The protocol type is required when creating a firewall - rule. This value can either be one of the following - well known protocol strings (tcp, udp, icmp, esp, - ah, ipip, sctp), or the IP protocol number. + description: |- + The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. + This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. type: string ports: - description: 'An optional list of ports to which this - rule applies. This field is only applicable for UDP - or TCP protocol. Each entry must be either an integer - or a range. If not specified, this rule applies to - connections through any port. Example inputs include: - “.' + description: |- + An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. + Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. items: type: string type: array @@ -1130,15 +1124,14 @@ spec: srcAddressGroups: description: Address groups which should be matched against the traffic source. Maximum number of source address groups - is 10. Source address groups is only supported in Ingress - rules. + is 10. items: type: string type: array srcFqdns: - description: Domain names that will be used to match against - the resolved domain name of source of traffic. Can only - be specified if DIRECTION is ingress. + description: Fully Qualified Domain Name (FQDN) which should + be matched against traffic source. Maximum number of source + fqdn allowed is 100. items: type: string type: array @@ -1149,46 +1142,48 @@ spec: type: string type: array srcRegionCodes: - description: The Unicode country codes whose IP addresses - will be used to match against the source of traffic. Can - only be specified if DIRECTION is ingress. + description: Region codes whose IP addresses will be used + to match for source of traffic. Should be specified as 2 + letter country code defined as per ISO 3166 alpha-2 country + codes. ex."US" Maximum number of source region codes allowed + is 5000. items: type: string type: array srcSecureTags: - description: List of secure tag values, which should be matched - at the source of the traffic. For INGRESS rule, if all the - srcSecureTag are INEFFECTIVE, and there is no srcIpRange, - this rule will be ignored. Maximum number of source tag - values allowed is 256. + description: |- + List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. + Structure is documented below. items: properties: name: description: Name of the secure tag, created with TagManager's - TagValue API. @pattern tagValues/[0-9]+ + TagValue API. type: string state: - description: '[Output Only] State of the secure tag, - either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE - when it is deleted or its network is deleted.' + description: |- + (Output) + State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. type: string type: object type: array srcThreatIntelligences: - description: Name of the Google Cloud Threat Intelligence - list. + description: Names of Network Threat Intelligence lists. The + IPs in these lists will be matched against traffic source. items: type: string type: array type: object priority: - description: An integer indicating the priority of a rule in the - list. The priority must be a positive value between 0 and 2147483647. - Rules are evaluated from highest to lowest priority where 0 - is the highest priority and 2147483647 is the lowest prority. + description: |- + An integer indicating the priority of a rule in the list. + The priority must be a positive value between 0 and 2147483647. + Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. type: number project: - description: The project for the resource + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. type: string ruleName: description: An optional name for the rule. This field is not @@ -1199,32 +1194,27 @@ spec: policy rule. type: number securityProfileGroup: - description: 'A fully-qualified URL of a SecurityProfileGroup - resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. - It must be specified if action = ''apply_security_profile_group'' - and cannot be specified for other actions.' + description: |- + A fully-qualified URL of a SecurityProfile resource instance. + Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. type: string targetSecureTags: - description: A list of secure tags that controls which instances - the firewall rule applies to. If targetSecureTag are specified, - then the firewall rule applies only to instances in the VPC - network that have one of those EFFECTIVE secure tags, if all - the target_secure_tag are in INEFFECTIVE state, then this rule - will be ignored. targetSecureTag may not be set at the same - time as targetServiceAccounts. If neither targetServiceAccounts - nor targetSecureTag are specified, the firewall rule applies - to all instances on the specified network. Maximum number of - target label tags allowed is 256. + description: |- + A list of secure tags that controls which instances the firewall rule applies to. + If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored. + targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. + Structure is documented below. items: properties: name: description: Name of the secure tag, created with TagManager's - TagValue API. @pattern tagValues/[0-9]+ + TagValue API. type: string state: - description: '[Output Only] State of the secure tag, either - EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE - when it is deleted or its network is deleted.' + description: |- + (Output) + State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. type: string type: object type: array @@ -1235,9 +1225,9 @@ spec: type: string type: array tlsInspect: - description: Boolean flag indicating if the traffic should be - TLS decrypted. It can be set only if action = 'apply_security_profile_group' - and cannot be set for other actions. + description: |- + Boolean flag indicating if the traffic should be TLS decrypted. + Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. type: boolean type: object conditions: diff --git a/package/crds/compute.gcp.upbound.io_networks.yaml b/package/crds/compute.gcp.upbound.io_networks.yaml index 454e55cd9..a32a69fda 100644 --- a/package/crds/compute.gcp.upbound.io_networks.yaml +++ b/package/crds/compute.gcp.upbound.io_networks.yaml @@ -431,10 +431,15 @@ spec: Default value is AFTER_CLASSIC_FIREWALL. Possible values are: BEFORE_CLASSIC_FIREWALL, AFTER_CLASSIC_FIREWALL. type: string - numericId: + networkId: description: The unique identifier for the resource. This identifier is defined by the server. type: string + numericId: + description: |- + (Deprecated) + The unique identifier for the resource. This identifier is defined by the server. + type: string project: description: |- The ID of the project in which the resource belongs. diff --git a/package/crds/compute.gcp.upbound.io_nodetemplates.yaml b/package/crds/compute.gcp.upbound.io_nodetemplates.yaml index 318323250..4fc2603e5 100644 --- a/package/crds/compute.gcp.upbound.io_nodetemplates.yaml +++ b/package/crds/compute.gcp.upbound.io_nodetemplates.yaml @@ -610,6 +610,25 @@ spec: type: string forProvider: properties: + accelerators: + description: |- + List of the type and count of accelerator cards attached to the + node template + Structure is documented below. + items: + properties: + acceleratorCount: + description: |- + The number of the guest accelerator cards exposed to this + node template. + type: number + acceleratorType: + description: |- + Full or partial URL of the accelerator type resource to expose + to this node template. + type: string + type: object + type: array cpuOvercommitType: description: |- CPU overcommit. @@ -619,6 +638,27 @@ spec: description: description: An optional textual description of the resource. type: string + disks: + description: |- + List of the type, size and count of disks attached to the + node template + Structure is documented below. + items: + properties: + diskCount: + description: Specifies the number of such disks. + type: number + diskSizeGb: + description: Specifies the size of the disk in base-2 GB. + type: number + diskType: + description: 'Specifies the desired disk type on the node. + This disk type must be a local storage type (e.g.: local-ssd). + Note that for nodeTemplates, this should be the name of + the disk type and not its URL.' + type: string + type: object + type: array nodeAffinityLabels: additionalProperties: type: string @@ -696,6 +736,25 @@ spec: for example because of an external controller is managing them, like an autoscaler. properties: + accelerators: + description: |- + List of the type and count of accelerator cards attached to the + node template + Structure is documented below. + items: + properties: + acceleratorCount: + description: |- + The number of the guest accelerator cards exposed to this + node template. + type: number + acceleratorType: + description: |- + Full or partial URL of the accelerator type resource to expose + to this node template. + type: string + type: object + type: array cpuOvercommitType: description: |- CPU overcommit. @@ -705,6 +764,27 @@ spec: description: description: An optional textual description of the resource. type: string + disks: + description: |- + List of the type, size and count of disks attached to the + node template + Structure is documented below. + items: + properties: + diskCount: + description: Specifies the number of such disks. + type: number + diskSizeGb: + description: Specifies the size of the disk in base-2 GB. + type: number + diskType: + description: 'Specifies the desired disk type on the node. + This disk type must be a local storage type (e.g.: local-ssd). + Note that for nodeTemplates, this should be the name of + the disk type and not its URL.' + type: string + type: object + type: array nodeAffinityLabels: additionalProperties: type: string @@ -934,6 +1014,25 @@ spec: properties: atProvider: properties: + accelerators: + description: |- + List of the type and count of accelerator cards attached to the + node template + Structure is documented below. + items: + properties: + acceleratorCount: + description: |- + The number of the guest accelerator cards exposed to this + node template. + type: number + acceleratorType: + description: |- + Full or partial URL of the accelerator type resource to expose + to this node template. + type: string + type: object + type: array cpuOvercommitType: description: |- CPU overcommit. @@ -946,6 +1045,27 @@ spec: description: description: An optional textual description of the resource. type: string + disks: + description: |- + List of the type, size and count of disks attached to the + node template + Structure is documented below. + items: + properties: + diskCount: + description: Specifies the number of such disks. + type: number + diskSizeGb: + description: Specifies the size of the disk in base-2 GB. + type: number + diskType: + description: 'Specifies the desired disk type on the node. + This disk type must be a local storage type (e.g.: local-ssd). + Note that for nodeTemplates, this should be the name of + the disk type and not its URL.' + type: string + type: object + type: array id: description: an identifier for the resource with format projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}} type: string diff --git a/package/crds/compute.gcp.upbound.io_regionautoscalers.yaml b/package/crds/compute.gcp.upbound.io_regionautoscalers.yaml index 91f7a673c..16915cd69 100644 --- a/package/crds/compute.gcp.upbound.io_regionautoscalers.yaml +++ b/package/crds/compute.gcp.upbound.io_regionautoscalers.yaml @@ -1367,7 +1367,7 @@ spec: properties: maxScaledInReplicas: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: fixed: @@ -1677,7 +1677,7 @@ spec: properties: maxScaledInReplicas: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: fixed: @@ -2146,7 +2146,7 @@ spec: properties: maxScaledInReplicas: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: fixed: diff --git a/package/crds/compute.gcp.upbound.io_regionbackendservices.yaml b/package/crds/compute.gcp.upbound.io_regionbackendservices.yaml index d49bfddc6..2718cb53a 100644 --- a/package/crds/compute.gcp.upbound.io_regionbackendservices.yaml +++ b/package/crds/compute.gcp.upbound.io_regionbackendservices.yaml @@ -2536,8 +2536,7 @@ spec: Specifies the balancing mode for this backend. See the Backend Services Overview for an explanation of load balancing modes. - From version 6.0.0 default value will be UTILIZATION to match default GCP value. - Default value is CONNECTION. + Default value is UTILIZATION. Possible values are: UTILIZATION, RATE, CONNECTION. type: string capacityScaler: @@ -2868,7 +2867,6 @@ spec: description: |- Time for which instance will be drained (not accept new connections, but still work to finish started). - From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value. type: number consistentHash: description: |- @@ -2889,22 +2887,15 @@ spec: Structure is documented below. properties: name: - description: |- - Name of the resource. Provided by the client when the resource is - created. The name must be 1-63 characters long, and comply with - RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the - first character must be a lowercase letter, and all following - characters must be a dash, lowercase letter, or digit, except the last - character, which cannot be a dash. + description: Name of the cookie. type: string path: description: Path to set for the cookie. type: string ttl: description: |- - The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + Lifetime of the cookie. + Structure is documented below. properties: nanos: description: |- @@ -3071,6 +3062,10 @@ spec: Settings for enabling Cloud Identity Aware Proxy Structure is documented below. properties: + enabled: + description: Whether the serving infrastructure will authenticate + and authorize all incoming requests. + type: boolean oauth2ClientId: description: OAuth2 Client ID for IAP type: string @@ -3094,6 +3089,11 @@ spec: - namespace type: object type: object + ipAddressSelectionPolicy: + description: |- + Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + type: string loadBalancingScheme: description: is set to INTERNAL_MANAGED type: string @@ -3130,8 +3130,6 @@ spec: Settings controlling eviction of unhealthy hosts from the load balancing pool. This field is applicable only when the load_balancing_scheme is set to INTERNAL_MANAGED and the protocol is set to HTTP, HTTPS, or HTTP2. - From version 6.0. - Default values are enforce by GCP without providing them. Structure is documented below. properties: baseEjectionTime: @@ -3260,8 +3258,38 @@ spec: description: |- Type of session affinity to use. The default is NONE. Session affinity is not applicable if the protocol is UDP. - Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION. + Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION, STRONG_COOKIE_AFFINITY. type: string + strongSessionAffinityCookie: + description: |- + Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + Structure is documented below. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: |- + Lifetime of the cookie. + Structure is documented below. + properties: + nanos: + description: |- + Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + type: number + seconds: + description: |- + Span of time at a resolution of a second. + Must be from 0 to 315,576,000,000 inclusive. + type: number + type: object + type: object timeoutSec: description: |- The backend service timeout has a different meaning depending on the type of load balancer. @@ -3304,8 +3332,7 @@ spec: Specifies the balancing mode for this backend. See the Backend Services Overview for an explanation of load balancing modes. - From version 6.0.0 default value will be UTILIZATION to match default GCP value. - Default value is CONNECTION. + Default value is UTILIZATION. Possible values are: UTILIZATION, RATE, CONNECTION. type: string capacityScaler: @@ -3636,7 +3663,6 @@ spec: description: |- Time for which instance will be drained (not accept new connections, but still work to finish started). - From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value. type: number consistentHash: description: |- @@ -3657,22 +3683,15 @@ spec: Structure is documented below. properties: name: - description: |- - Name of the resource. Provided by the client when the resource is - created. The name must be 1-63 characters long, and comply with - RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the - first character must be a lowercase letter, and all following - characters must be a dash, lowercase letter, or digit, except the last - character, which cannot be a dash. + description: Name of the cookie. type: string path: description: Path to set for the cookie. type: string ttl: description: |- - The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + Lifetime of the cookie. + Structure is documented below. properties: nanos: description: |- @@ -3839,6 +3858,10 @@ spec: Settings for enabling Cloud Identity Aware Proxy Structure is documented below. properties: + enabled: + description: Whether the serving infrastructure will authenticate + and authorize all incoming requests. + type: boolean oauth2ClientId: description: OAuth2 Client ID for IAP type: string @@ -3861,9 +3884,12 @@ spec: - name - namespace type: object - required: - - oauth2ClientSecretSecretRef type: object + ipAddressSelectionPolicy: + description: |- + Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + type: string loadBalancingScheme: description: is set to INTERNAL_MANAGED type: string @@ -3900,8 +3926,6 @@ spec: Settings controlling eviction of unhealthy hosts from the load balancing pool. This field is applicable only when the load_balancing_scheme is set to INTERNAL_MANAGED and the protocol is set to HTTP, HTTPS, or HTTP2. - From version 6.0. - Default values are enforce by GCP without providing them. Structure is documented below. properties: baseEjectionTime: @@ -4025,8 +4049,38 @@ spec: description: |- Type of session affinity to use. The default is NONE. Session affinity is not applicable if the protocol is UDP. - Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION. + Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION, STRONG_COOKIE_AFFINITY. type: string + strongSessionAffinityCookie: + description: |- + Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + Structure is documented below. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: |- + Lifetime of the cookie. + Structure is documented below. + properties: + nanos: + description: |- + Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + type: number + seconds: + description: |- + Span of time at a resolution of a second. + Must be from 0 to 315,576,000,000 inclusive. + type: number + type: object + type: object timeoutSec: description: |- The backend service timeout has a different meaning depending on the type of load balancer. @@ -4227,8 +4281,7 @@ spec: Specifies the balancing mode for this backend. See the Backend Services Overview for an explanation of load balancing modes. - From version 6.0.0 default value will be UTILIZATION to match default GCP value. - Default value is CONNECTION. + Default value is UTILIZATION. Possible values are: UTILIZATION, RATE, CONNECTION. type: string capacityScaler: @@ -4483,7 +4536,6 @@ spec: description: |- Time for which instance will be drained (not accept new connections, but still work to finish started). - From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value. type: number consistentHash: description: |- @@ -4504,22 +4556,15 @@ spec: Structure is documented below. properties: name: - description: |- - Name of the resource. Provided by the client when the resource is - created. The name must be 1-63 characters long, and comply with - RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the - first character must be a lowercase letter, and all following - characters must be a dash, lowercase letter, or digit, except the last - character, which cannot be a dash. + description: Name of the cookie. type: string path: description: Path to set for the cookie. type: string ttl: description: |- - The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s - (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + Lifetime of the cookie. + Structure is documented below. properties: nanos: description: |- @@ -4619,6 +4664,10 @@ spec: Settings for enabling Cloud Identity Aware Proxy Structure is documented below. properties: + enabled: + description: Whether the serving infrastructure will authenticate + and authorize all incoming requests. + type: boolean oauth2ClientId: description: OAuth2 Client ID for IAP type: string @@ -4626,6 +4675,11 @@ spec: id: description: an identifier for the resource with format projects/{{project}}/regions/{{region}}/backendServices/{{name}} type: string + ipAddressSelectionPolicy: + description: |- + Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). + Possible values are: IPV4_ONLY, PREFER_IPV6, IPV6_ONLY. + type: string loadBalancingScheme: description: is set to INTERNAL_MANAGED type: string @@ -4662,8 +4716,6 @@ spec: Settings controlling eviction of unhealthy hosts from the load balancing pool. This field is applicable only when the load_balancing_scheme is set to INTERNAL_MANAGED and the protocol is set to HTTP, HTTPS, or HTTP2. - From version 6.0. - Default values are enforce by GCP without providing them. Structure is documented below. properties: baseEjectionTime: @@ -4795,8 +4847,38 @@ spec: description: |- Type of session affinity to use. The default is NONE. Session affinity is not applicable if the protocol is UDP. - Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION. + Possible values are: NONE, CLIENT_IP, CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, HEADER_FIELD, HTTP_COOKIE, CLIENT_IP_NO_DESTINATION, STRONG_COOKIE_AFFINITY. type: string + strongSessionAffinityCookie: + description: |- + Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY. + Structure is documented below. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: |- + Lifetime of the cookie. + Structure is documented below. + properties: + nanos: + description: |- + Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + type: number + seconds: + description: |- + Span of time at a resolution of a second. + Must be from 0 to 315,576,000,000 inclusive. + type: number + type: object + type: object timeoutSec: description: |- The backend service timeout has a different meaning depending on the type of load balancer. diff --git a/package/crds/compute.gcp.upbound.io_regiondisks.yaml b/package/crds/compute.gcp.upbound.io_regiondisks.yaml index b403e16ed..4f2de4d7a 100644 --- a/package/crds/compute.gcp.upbound.io_regiondisks.yaml +++ b/package/crds/compute.gcp.upbound.io_regiondisks.yaml @@ -1168,7 +1168,7 @@ spec: properties: asyncPrimaryDisk: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: disk: @@ -1473,7 +1473,7 @@ spec: properties: asyncPrimaryDisk: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: disk: @@ -1937,7 +1937,7 @@ spec: properties: asyncPrimaryDisk: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: disk: diff --git a/package/crds/compute.gcp.upbound.io_regionhealthchecks.yaml b/package/crds/compute.gcp.upbound.io_regionhealthchecks.yaml index bff539263..c884ef43a 100644 --- a/package/crds/compute.gcp.upbound.io_regionhealthchecks.yaml +++ b/package/crds/compute.gcp.upbound.io_regionhealthchecks.yaml @@ -1316,7 +1316,7 @@ spec: type: string grpcHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: grpcServiceName: @@ -1348,7 +1348,7 @@ spec: type: number http2HealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1393,7 +1393,7 @@ spec: type: object httpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1438,7 +1438,7 @@ spec: type: object httpsHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1504,7 +1504,7 @@ spec: type: string sslHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: @@ -1545,7 +1545,7 @@ spec: type: object tcpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: @@ -1623,7 +1623,7 @@ spec: type: string grpcHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: grpcServiceName: @@ -1655,7 +1655,7 @@ spec: type: number http2HealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1700,7 +1700,7 @@ spec: type: object httpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1745,7 +1745,7 @@ spec: type: object httpsHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -1806,7 +1806,7 @@ spec: type: string sslHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: @@ -1847,7 +1847,7 @@ spec: type: object tcpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: @@ -2085,7 +2085,7 @@ spec: type: string grpcHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: grpcServiceName: @@ -2110,6 +2110,10 @@ spec: following values: type: string type: object + healthCheckId: + description: The unique identifier number for the resource. This + identifier is defined by the server. + type: number healthyThreshold: description: |- A so-far unhealthy instance will be marked healthy after this many @@ -2117,7 +2121,7 @@ spec: type: number http2HealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -2162,7 +2166,7 @@ spec: type: object httpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -2207,7 +2211,7 @@ spec: type: object httpsHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: host: @@ -2279,7 +2283,7 @@ spec: type: string sslHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: @@ -2320,7 +2324,7 @@ spec: type: object tcpHealthCheck: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: port: diff --git a/package/crds/compute.gcp.upbound.io_regioninstancegroupmanagers.yaml b/package/crds/compute.gcp.upbound.io_regioninstancegroupmanagers.yaml index f75608ea9..7aad5d9bc 100644 --- a/package/crds/compute.gcp.upbound.io_regioninstancegroupmanagers.yaml +++ b/package/crds/compute.gcp.upbound.io_regioninstancegroupmanagers.yaml @@ -2097,6 +2097,45 @@ spec: type: string type: array x-kubernetes-list-type: set + instanceFlexibilityPolicy: + description: The flexibility policy for managed instance group. + Instance flexibility allows managed instance group to create + VMs from multiple types of machines. Instance flexibility configuration + on managed instance group overrides instance template configuration. + Structure is documented below. + properties: + instanceSelections: + description: ', Named instance selections configuring properties + that the group will use when creating new VMs. One can specify + multiple instance selection to allow managed instance group + to create VMs from multiple types of machines, based on + preference and availability. Structure is documented below.' + items: + properties: + machineTypes: + description: ', A list of full machine-type names, e.g. + "n1-standard-16".' + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: ', Name of the instance selection, e.g. + instance_selection_with_n1_machines_types. Instance + selection names must be unique within the flexibility + policy.' + type: string + rank: + description: ', Preference of this instance selection. + Lower number means higher preference. Managed instance + group will first try to create a VM based on the machine-type + with lowest rank and fallback to next rank based on + availability. Machine types and instance selections + with the same rank have the same preference.' + type: number + type: object + type: array + type: object instanceLifecyclePolicy: properties: defaultActionOnFailure: @@ -2154,6 +2193,30 @@ spec: description: The region where the managed instance group resides. If not provided, the provider region is used. type: string + standbyPolicy: + description: The standby policy for stopped and suspended instances. + Structure is documented below. For more information, see the + official documentation. + properties: + initialDelaySec: + description: '- Specifies the number of seconds that the MIG + should wait to suspend or stop a VM after that VM was created. + The initial delay gives the initialization script the time + to prepare your VM for a quick scale out. The value of initial + delay must be between 0 and 3600 seconds. The default value + is 0.' + type: number + mode: + description: '- Defines how a MIG resumes or starts VMs from + a standby pool when the group scales out. Valid options + are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have + full control over which VMs are stopped and suspended in + the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the + standby pools to accelerate the scale out by resuming or + starting them and then automatically replenishes the standby + pool with new VMs to maintain the target sizes.' + type: string + type: object statefulDisk: description: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. @@ -2315,6 +2378,14 @@ spec: when using one. If a value is required, such as to specify a creation-time target size for the MIG, lifecycle. Defaults to 0. type: number + targetStoppedSize: + description: The target number of stopped instances for this managed + instance group. + type: number + targetSuspendedSize: + description: The target number of suspended instances for this + managed instance group. + type: number updatePolicy: description: The update policy for this managed instance group. Structure is documented below. For more information, see the @@ -2658,6 +2729,45 @@ spec: type: string type: array x-kubernetes-list-type: set + instanceFlexibilityPolicy: + description: The flexibility policy for managed instance group. + Instance flexibility allows managed instance group to create + VMs from multiple types of machines. Instance flexibility configuration + on managed instance group overrides instance template configuration. + Structure is documented below. + properties: + instanceSelections: + description: ', Named instance selections configuring properties + that the group will use when creating new VMs. One can specify + multiple instance selection to allow managed instance group + to create VMs from multiple types of machines, based on + preference and availability. Structure is documented below.' + items: + properties: + machineTypes: + description: ', A list of full machine-type names, e.g. + "n1-standard-16".' + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: ', Name of the instance selection, e.g. + instance_selection_with_n1_machines_types. Instance + selection names must be unique within the flexibility + policy.' + type: string + rank: + description: ', Preference of this instance selection. + Lower number means higher preference. Managed instance + group will first try to create a VM based on the machine-type + with lowest rank and fallback to next rank based on + availability. Machine types and instance selections + with the same rank have the same preference.' + type: number + type: object + type: array + type: object instanceLifecyclePolicy: properties: defaultActionOnFailure: @@ -2715,6 +2825,30 @@ spec: description: The region where the managed instance group resides. If not provided, the provider region is used. type: string + standbyPolicy: + description: The standby policy for stopped and suspended instances. + Structure is documented below. For more information, see the + official documentation. + properties: + initialDelaySec: + description: '- Specifies the number of seconds that the MIG + should wait to suspend or stop a VM after that VM was created. + The initial delay gives the initialization script the time + to prepare your VM for a quick scale out. The value of initial + delay must be between 0 and 3600 seconds. The default value + is 0.' + type: number + mode: + description: '- Defines how a MIG resumes or starts VMs from + a standby pool when the group scales out. Valid options + are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have + full control over which VMs are stopped and suspended in + the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the + standby pools to accelerate the scale out by resuming or + starting them and then automatically replenishes the standby + pool with new VMs to maintain the target sizes.' + type: string + type: object statefulDisk: description: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. @@ -2876,6 +3010,14 @@ spec: when using one. If a value is required, such as to specify a creation-time target size for the MIG, lifecycle. Defaults to 0. type: number + targetStoppedSize: + description: The target number of stopped instances for this managed + instance group. + type: number + targetSuspendedSize: + description: The target number of suspended instances for this + managed instance group. + type: number updatePolicy: description: The update policy for this managed instance group. Structure is documented below. For more information, see the @@ -3329,10 +3471,52 @@ spec: id: description: an identifier for the resource with format projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}} type: string + instanceFlexibilityPolicy: + description: The flexibility policy for managed instance group. + Instance flexibility allows managed instance group to create + VMs from multiple types of machines. Instance flexibility configuration + on managed instance group overrides instance template configuration. + Structure is documented below. + properties: + instanceSelections: + description: ', Named instance selections configuring properties + that the group will use when creating new VMs. One can specify + multiple instance selection to allow managed instance group + to create VMs from multiple types of machines, based on + preference and availability. Structure is documented below.' + items: + properties: + machineTypes: + description: ', A list of full machine-type names, e.g. + "n1-standard-16".' + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: ', Name of the instance selection, e.g. + instance_selection_with_n1_machines_types. Instance + selection names must be unique within the flexibility + policy.' + type: string + rank: + description: ', Preference of this instance selection. + Lower number means higher preference. Managed instance + group will first try to create a VM based on the machine-type + with lowest rank and fallback to next rank based on + availability. Machine types and instance selections + with the same rank have the same preference.' + type: number + type: object + type: array + type: object instanceGroup: description: The full URL of the instance group created by the manager. type: string + instanceGroupManagerId: + description: an identifier for the resource with format projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}} + type: number instanceLifecyclePolicy: properties: defaultActionOnFailure: @@ -3393,6 +3577,30 @@ spec: selfLink: description: The URL of the created resource. type: string + standbyPolicy: + description: The standby policy for stopped and suspended instances. + Structure is documented below. For more information, see the + official documentation. + properties: + initialDelaySec: + description: '- Specifies the number of seconds that the MIG + should wait to suspend or stop a VM after that VM was created. + The initial delay gives the initialization script the time + to prepare your VM for a quick scale out. The value of initial + delay must be between 0 and 3600 seconds. The default value + is 0.' + type: number + mode: + description: '- Defines how a MIG resumes or starts VMs from + a standby pool when the group scales out. Valid options + are: MANUAL, SCALE_OUT_POOL. If MANUAL(default), you have + full control over which VMs are stopped and suspended in + the MIG. If SCALE_OUT_POOL, the MIG uses the VMs from the + standby pools to accelerate the scale out by resuming or + starting them and then automatically replenishes the standby + pool with new VMs to maintain the target sizes.' + type: string + type: object statefulDisk: description: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. @@ -3546,6 +3754,14 @@ spec: when using one. If a value is required, such as to specify a creation-time target size for the MIG, lifecycle. Defaults to 0. type: number + targetStoppedSize: + description: The target number of stopped instances for this managed + instance group. + type: number + targetSuspendedSize: + description: The target number of suspended instances for this + managed instance group. + type: number updatePolicy: description: The update policy for this managed instance group. Structure is documented below. For more information, see the diff --git a/package/crds/compute.gcp.upbound.io_regionnetworkendpointgroups.yaml b/package/crds/compute.gcp.upbound.io_regionnetworkendpointgroups.yaml index 446b69af9..77eab1f25 100644 --- a/package/crds/compute.gcp.upbound.io_regionnetworkendpointgroups.yaml +++ b/package/crds/compute.gcp.upbound.io_regionnetworkendpointgroups.yaml @@ -1843,6 +1843,19 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + pscData: + description: |- + This field is only used for PSC NEGs. + Structure is documented below. + properties: + producerPort: + description: |- + The PSC producer port to use when consumer PSC NEG connects to a producer. If + this flag isn't specified for a PSC NEG with endpoint type + private-service-connect, then PSC NEG will be connected to a first port in the + available PSC producer port range. + type: string + type: object pscTargetService: description: |- This field is only used for PSC and INTERNET NEGs. @@ -2355,6 +2368,19 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + pscData: + description: |- + This field is only used for PSC NEGs. + Structure is documented below. + properties: + producerPort: + description: |- + The PSC producer port to use when consumer PSC NEG connects to a producer. If + this flag isn't specified for a PSC NEG with endpoint type + private-service-connect, then PSC NEG will be connected to a first port in the + available PSC producer port range. + type: string + type: object pscTargetService: description: |- This field is only used for PSC and INTERNET NEGs. @@ -2798,6 +2824,19 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + pscData: + description: |- + This field is only used for PSC NEGs. + Structure is documented below. + properties: + producerPort: + description: |- + The PSC producer port to use when consumer PSC NEG connects to a producer. If + this flag isn't specified for a PSC NEG with endpoint type + private-service-connect, then PSC NEG will be connected to a first port in the + available PSC producer port range. + type: string + type: object pscTargetService: description: |- This field is only used for PSC and INTERNET NEGs. diff --git a/package/crds/compute.gcp.upbound.io_regionnetworkendpoints.yaml b/package/crds/compute.gcp.upbound.io_regionnetworkendpoints.yaml index b91b10c6d..2116ee71f 100644 --- a/package/crds/compute.gcp.upbound.io_regionnetworkendpoints.yaml +++ b/package/crds/compute.gcp.upbound.io_regionnetworkendpoints.yaml @@ -484,6 +484,10 @@ spec: IPv4 address external endpoint. This can only be specified when network_endpoint_type of the NEG is INTERNET_IP_PORT. type: string + networkEndpointId: + description: The unique identifier number for the resource. This + identifier is defined by the server. + type: number port: description: Port number of network endpoint. type: number diff --git a/package/crds/compute.gcp.upbound.io_regionnetworkfirewallpolicyassociations.yaml b/package/crds/compute.gcp.upbound.io_regionnetworkfirewallpolicyassociations.yaml index 5d37c0b37..7f5f8f314 100644 --- a/package/crds/compute.gcp.upbound.io_regionnetworkfirewallpolicyassociations.yaml +++ b/package/crds/compute.gcp.upbound.io_regionnetworkfirewallpolicyassociations.yaml @@ -153,7 +153,7 @@ spec: type: object type: object firewallPolicy: - description: The firewall policy ID of the association. + description: The firewall policy of the resource. type: string firewallPolicyRef: description: Reference to a RegionNetworkFirewallPolicy in compute @@ -232,7 +232,9 @@ spec: type: object type: object project: - description: The project for the resource + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. type: string region: description: The location of this resource. @@ -329,7 +331,9 @@ spec: type: object type: object project: - description: The project for the resource + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. type: string type: object managementPolicies: @@ -509,13 +513,15 @@ spec: description: The target that the firewall policy is attached to. type: string firewallPolicy: - description: The firewall policy ID of the association. + description: The firewall policy of the resource. type: string id: description: an identifier for the resource with format projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/associations/{{name}} type: string project: - description: The project for the resource + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. type: string region: description: The location of this resource. diff --git a/package/crds/compute.gcp.upbound.io_regiontargethttpproxies.yaml b/package/crds/compute.gcp.upbound.io_regiontargethttpproxies.yaml index 9147ebc1d..a80ff6d01 100644 --- a/package/crds/compute.gcp.upbound.io_regiontargethttpproxies.yaml +++ b/package/crds/compute.gcp.upbound.io_regiontargethttpproxies.yaml @@ -77,6 +77,14 @@ spec: description: description: An optional description of this resource. type: string + httpKeepAliveTimeoutSec: + description: |- + Specifies how long to keep a connection open, after completing a response, + while there is no matching traffic (in seconds). If an HTTP keepalive is + not specified, a default value (600 seconds) will be used. For Regional + HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + maximum allowed value is 600 seconds. + type: number project: description: |- The ID of the project in which the resource belongs. @@ -187,6 +195,14 @@ spec: description: description: An optional description of this resource. type: string + httpKeepAliveTimeoutSec: + description: |- + Specifies how long to keep a connection open, after completing a response, + while there is no matching traffic (in seconds). If an HTTP keepalive is + not specified, a default value (600 seconds) will be used. For Regional + HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + maximum allowed value is 600 seconds. + type: number project: description: |- The ID of the project in which the resource belongs. @@ -453,6 +469,14 @@ spec: description: description: An optional description of this resource. type: string + httpKeepAliveTimeoutSec: + description: |- + Specifies how long to keep a connection open, after completing a response, + while there is no matching traffic (in seconds). If an HTTP keepalive is + not specified, a default value (600 seconds) will be used. For Regional + HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + maximum allowed value is 600 seconds. + type: number id: description: an identifier for the resource with format projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}} type: string diff --git a/package/crds/compute.gcp.upbound.io_regiontargethttpsproxies.yaml b/package/crds/compute.gcp.upbound.io_regiontargethttpsproxies.yaml index 084e2cc8c..78455fc81 100644 --- a/package/crds/compute.gcp.upbound.io_regiontargethttpsproxies.yaml +++ b/package/crds/compute.gcp.upbound.io_regiontargethttpsproxies.yaml @@ -85,6 +85,14 @@ spec: description: description: An optional description of this resource. type: string + httpKeepAliveTimeoutSec: + description: |- + Specifies how long to keep a connection open, after completing a response, + while there is no matching traffic (in seconds). If an HTTP keepalive is + not specified, a default value (600 seconds) will be used. For Regioanl + HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + maximum allowed value is 600 seconds. + type: number project: description: |- The ID of the project in which the resource belongs. @@ -312,6 +320,14 @@ spec: description: description: An optional description of this resource. type: string + httpKeepAliveTimeoutSec: + description: |- + Specifies how long to keep a connection open, after completing a response, + while there is no matching traffic (in seconds). If an HTTP keepalive is + not specified, a default value (600 seconds) will be used. For Regioanl + HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + maximum allowed value is 600 seconds. + type: number project: description: |- The ID of the project in which the resource belongs. @@ -695,6 +711,14 @@ spec: description: description: An optional description of this resource. type: string + httpKeepAliveTimeoutSec: + description: |- + Specifies how long to keep a connection open, after completing a response, + while there is no matching traffic (in seconds). If an HTTP keepalive is + not specified, a default value (600 seconds) will be used. For Regioanl + HTTP(S) load balancer, the minimum allowed value is 5 seconds and the + maximum allowed value is 600 seconds. + type: number id: description: an identifier for the resource with format projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}} type: string diff --git a/package/crds/compute.gcp.upbound.io_routernats.yaml b/package/crds/compute.gcp.upbound.io_routernats.yaml index 03e971928..337a7dbc4 100644 --- a/package/crds/compute.gcp.upbound.io_routernats.yaml +++ b/package/crds/compute.gcp.upbound.io_routernats.yaml @@ -1602,6 +1602,14 @@ spec: description: Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. type: number + initialNatIps: + description: |- + Self-links of NAT IPs to be used as initial value for creation alongside a RouterNatAddress resource. + Conflicts with natIps and drainNatIps. Only valid if natIpAllocateOption is set to MANUAL_ONLY. + items: + type: string + type: array + x-kubernetes-list-type: set logConfig: description: |- Configuration for logging on NAT @@ -1637,6 +1645,9 @@ spec: description: |- Self-links of NAT IPs. Only valid if natIpAllocateOption is set to MANUAL_ONLY. + If this field is used alongside with a count created list of address resources google_compute_address.foobar.*.self_link, + the access level resource for the address resource must have a lifecycle block with create_before_destroy = true so + the number of resources can be increased/decreased without triggering the resourceInUseByAnotherResource error. items: type: string type: array @@ -2134,6 +2145,14 @@ spec: description: Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. type: number + initialNatIps: + description: |- + Self-links of NAT IPs to be used as initial value for creation alongside a RouterNatAddress resource. + Conflicts with natIps and drainNatIps. Only valid if natIpAllocateOption is set to MANUAL_ONLY. + items: + type: string + type: array + x-kubernetes-list-type: set logConfig: description: |- Configuration for logging on NAT @@ -2169,6 +2188,9 @@ spec: description: |- Self-links of NAT IPs. Only valid if natIpAllocateOption is set to MANUAL_ONLY. + If this field is used alongside with a count created list of address resources google_compute_address.foobar.*.self_link, + the access level resource for the address resource must have a lifecycle block with create_before_destroy = true so + the number of resources can be increased/decreased without triggering the resourceInUseByAnotherResource error. items: type: string type: array @@ -2751,6 +2773,14 @@ spec: id: description: an identifier for the resource with format {{project}}/{{region}}/{{router}}/{{name}} type: string + initialNatIps: + description: |- + Self-links of NAT IPs to be used as initial value for creation alongside a RouterNatAddress resource. + Conflicts with natIps and drainNatIps. Only valid if natIpAllocateOption is set to MANUAL_ONLY. + items: + type: string + type: array + x-kubernetes-list-type: set logConfig: description: |- Configuration for logging on NAT @@ -2786,6 +2816,9 @@ spec: description: |- Self-links of NAT IPs. Only valid if natIpAllocateOption is set to MANUAL_ONLY. + If this field is used alongside with a count created list of address resources google_compute_address.foobar.*.self_link, + the access level resource for the address resource must have a lifecycle block with create_before_destroy = true so + the number of resources can be increased/decreased without triggering the resourceInUseByAnotherResource error. items: type: string type: array diff --git a/package/crds/compute.gcp.upbound.io_routerpeers.yaml b/package/crds/compute.gcp.upbound.io_routerpeers.yaml index 1ae32fee3..7694c5451 100644 --- a/package/crds/compute.gcp.upbound.io_routerpeers.yaml +++ b/package/crds/compute.gcp.upbound.io_routerpeers.yaml @@ -1699,16 +1699,26 @@ spec: type: string type: object customLearnedIpRanges: + description: |- + The custom learned route IP address range. Must be a valid CIDR-formatted prefix. + If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, + a /32 singular IP address range, and, for IPv6, /128. + Structure is documented below. items: properties: range: description: |- - The IP range to advertise. The value must be a + The IP range to learn. The value must be a CIDR-formatted string. type: string type: object type: array customLearnedRoutePriority: + description: |- + The user-defined custom learned route priority for a BGP session. + This value is applied to all custom learned route ranges for the session. + You can choose a value from 0 to 65335. If you don't provide a value, + Google Cloud assigns a priority of 100 to the ranges. type: number enable: description: |- @@ -2280,16 +2290,26 @@ spec: type: string type: object customLearnedIpRanges: + description: |- + The custom learned route IP address range. Must be a valid CIDR-formatted prefix. + If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, + a /32 singular IP address range, and, for IPv6, /128. + Structure is documented below. items: properties: range: description: |- - The IP range to advertise. The value must be a + The IP range to learn. The value must be a CIDR-formatted string. type: string type: object type: array customLearnedRoutePriority: + description: |- + The user-defined custom learned route priority for a BGP session. + This value is applied to all custom learned route ranges for the session. + You can choose a value from 0 to 65335. If you don't provide a value, + Google Cloud assigns a priority of 100 to the ranges. type: number enable: description: |- @@ -2949,16 +2969,26 @@ spec: type: string type: object customLearnedIpRanges: + description: |- + The custom learned route IP address range. Must be a valid CIDR-formatted prefix. + If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, + a /32 singular IP address range, and, for IPv6, /128. + Structure is documented below. items: properties: range: description: |- - The IP range to advertise. The value must be a + The IP range to learn. The value must be a CIDR-formatted string. type: string type: object type: array customLearnedRoutePriority: + description: |- + The user-defined custom learned route priority for a BGP session. + This value is applied to all custom learned route ranges for the session. + You can choose a value from 0 to 65335. If you don't provide a value, + Google Cloud assigns a priority of 100 to the ranges. type: number enable: description: |- diff --git a/package/crds/compute.gcp.upbound.io_securitypolicies.yaml b/package/crds/compute.gcp.upbound.io_securitypolicies.yaml index b286e2d8d..42daf9638 100644 --- a/package/crds/compute.gcp.upbound.io_securitypolicies.yaml +++ b/package/crds/compute.gcp.upbound.io_securitypolicies.yaml @@ -1347,6 +1347,70 @@ spec: ruleVisibility: description: 'Rule visibility can be one of the following:' type: string + thresholdConfigs: + description: Configuration options for layer7 adaptive + protection for various customizable thresholds. Structure + is documented below. + items: + properties: + autoDeployConfidenceThreshold: + description: Confidence threshold above which Adaptive + Protection's auto-deploy takes actions. + type: number + autoDeployExpirationSec: + description: Duration over which Adaptive Protection's + auto-deployed actions last. + type: number + autoDeployImpactedBaselineThreshold: + description: Impacted baseline threshold below which + Adaptive Protection's auto-deploy takes actions. + type: number + autoDeployLoadThreshold: + description: Load threshold above which Adaptive + Protection automatically deploy threshold based + on the backend load threshold and detect a new + rule during an alerted attack. + type: number + detectionAbsoluteQps: + description: Detection threshold based on absolute + QPS. + type: number + detectionLoadThreshold: + description: Detection threshold based on the backend + service's load. + type: number + detectionRelativeToBaselineQps: + description: Detection threshold based on QPS relative + to the average of baseline traffic. + type: number + name: + description: The name of config. The name must be + 1-63 characters long, and comply with RFC1035. + The name must be unique within the security policy. + type: string + trafficGranularityConfigs: + description: Configuration options for enabling + Adaptive Protection to work on the specified service + granularity. Structure is documented below. + items: + properties: + enableEachUniqueValue: + description: If enabled, traffic matching + each unique value for the specified type + constitutes a separate traffic unit. It + can only be set to true if value is empty. + type: boolean + type: + description: Type of the redirect action. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + type: object + type: array type: object type: object advancedOptionsConfig: @@ -1513,6 +1577,106 @@ spec: Available options: type: string type: object + preconfiguredWafConfig: + description: Preconfigured WAF configuration to be applied + for the rule. If the rule does not evaluate preconfigured + WAF rules, i.e., if evaluatePreconfiguredWaf() is not + used, this field will have no effect. Structure is documented + below. + properties: + exclusion: + description: An exclusion to apply during preconfigured + WAF evaluation. Structure is documented below. + items: + properties: + requestCookie: + description: Request cookie whose value will be + excluded from inspection during preconfigured + WAF evaluation. Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + requestHeader: + description: Request header whose value will be + excluded from inspection during preconfigured + WAF evaluation. Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + requestQueryParam: + description: Request query parameter whose value + will be excluded from inspection during preconfigured + WAF evaluation. Note that the parameter can + be in the query string or in the POST body. + Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + requestUri: + description: Request URI from the request line + to be excluded from inspection during preconfigured + WAF evaluation. When specifying this field, + the query or fragment part should be excluded. + Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + targetRuleIds: + description: A list of target rule IDs under the + WAF rule set to apply the preconfigured WAF + exclusion. If omitted, it refers to all the + rule IDs under the WAF rule set. + items: + type: string + type: array + x-kubernetes-list-type: set + targetRuleSet: + description: Target WAF rule set to apply the + preconfigured WAF exclusion. + type: string + type: object + type: array + type: object preview: description: |- When set to true, the action specified above is not enforced. @@ -1557,6 +1721,27 @@ spec: description: Determines the key to enforce the rate_limit_threshold on. If not specified, defaults to ALL. type: string + enforceOnKeyConfigs: + description: If specified, any combination of values + of enforce_on_key_type/enforce_on_key_name is treated + as the key on which rate limit threshold/action is + enforced. You can specify up to 3 enforce_on_key_configs. + If enforce_on_key_configs is specified, enforce_on_key + must be set to an empty string. Structure is documented + below. + items: + properties: + enforceOnKeyName: + description: 'Rate limit key name applicable only + for the following key types:' + type: string + enforceOnKeyType: + description: Determines the key to enforce the + rate_limit_threshold on. If not specified, defaults + to ALL. + type: string + type: object + type: array enforceOnKeyName: description: 'Rate limit key name applicable only for the following key types:' @@ -1642,6 +1827,70 @@ spec: ruleVisibility: description: 'Rule visibility can be one of the following:' type: string + thresholdConfigs: + description: Configuration options for layer7 adaptive + protection for various customizable thresholds. Structure + is documented below. + items: + properties: + autoDeployConfidenceThreshold: + description: Confidence threshold above which Adaptive + Protection's auto-deploy takes actions. + type: number + autoDeployExpirationSec: + description: Duration over which Adaptive Protection's + auto-deployed actions last. + type: number + autoDeployImpactedBaselineThreshold: + description: Impacted baseline threshold below which + Adaptive Protection's auto-deploy takes actions. + type: number + autoDeployLoadThreshold: + description: Load threshold above which Adaptive + Protection automatically deploy threshold based + on the backend load threshold and detect a new + rule during an alerted attack. + type: number + detectionAbsoluteQps: + description: Detection threshold based on absolute + QPS. + type: number + detectionLoadThreshold: + description: Detection threshold based on the backend + service's load. + type: number + detectionRelativeToBaselineQps: + description: Detection threshold based on QPS relative + to the average of baseline traffic. + type: number + name: + description: The name of config. The name must be + 1-63 characters long, and comply with RFC1035. + The name must be unique within the security policy. + type: string + trafficGranularityConfigs: + description: Configuration options for enabling + Adaptive Protection to work on the specified service + granularity. Structure is documented below. + items: + properties: + enableEachUniqueValue: + description: If enabled, traffic matching + each unique value for the specified type + constitutes a separate traffic unit. It + can only be set to true if value is empty. + type: boolean + type: + description: Type of the redirect action. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + type: object + type: array type: object type: object advancedOptionsConfig: @@ -1808,6 +2057,106 @@ spec: Available options: type: string type: object + preconfiguredWafConfig: + description: Preconfigured WAF configuration to be applied + for the rule. If the rule does not evaluate preconfigured + WAF rules, i.e., if evaluatePreconfiguredWaf() is not + used, this field will have no effect. Structure is documented + below. + properties: + exclusion: + description: An exclusion to apply during preconfigured + WAF evaluation. Structure is documented below. + items: + properties: + requestCookie: + description: Request cookie whose value will be + excluded from inspection during preconfigured + WAF evaluation. Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + requestHeader: + description: Request header whose value will be + excluded from inspection during preconfigured + WAF evaluation. Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + requestQueryParam: + description: Request query parameter whose value + will be excluded from inspection during preconfigured + WAF evaluation. Note that the parameter can + be in the query string or in the POST body. + Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + requestUri: + description: Request URI from the request line + to be excluded from inspection during preconfigured + WAF evaluation. When specifying this field, + the query or fragment part should be excluded. + Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + targetRuleIds: + description: A list of target rule IDs under the + WAF rule set to apply the preconfigured WAF + exclusion. If omitted, it refers to all the + rule IDs under the WAF rule set. + items: + type: string + type: array + x-kubernetes-list-type: set + targetRuleSet: + description: Target WAF rule set to apply the + preconfigured WAF exclusion. + type: string + type: object + type: array + type: object preview: description: |- When set to true, the action specified above is not enforced. @@ -1852,6 +2201,27 @@ spec: description: Determines the key to enforce the rate_limit_threshold on. If not specified, defaults to ALL. type: string + enforceOnKeyConfigs: + description: If specified, any combination of values + of enforce_on_key_type/enforce_on_key_name is treated + as the key on which rate limit threshold/action is + enforced. You can specify up to 3 enforce_on_key_configs. + If enforce_on_key_configs is specified, enforce_on_key + must be set to an empty string. Structure is documented + below. + items: + properties: + enforceOnKeyName: + description: 'Rate limit key name applicable only + for the following key types:' + type: string + enforceOnKeyType: + description: Determines the key to enforce the + rate_limit_threshold on. If not specified, defaults + to ALL. + type: string + type: object + type: array enforceOnKeyName: description: 'Rate limit key name applicable only for the following key types:' @@ -2096,6 +2466,70 @@ spec: ruleVisibility: description: 'Rule visibility can be one of the following:' type: string + thresholdConfigs: + description: Configuration options for layer7 adaptive + protection for various customizable thresholds. Structure + is documented below. + items: + properties: + autoDeployConfidenceThreshold: + description: Confidence threshold above which Adaptive + Protection's auto-deploy takes actions. + type: number + autoDeployExpirationSec: + description: Duration over which Adaptive Protection's + auto-deployed actions last. + type: number + autoDeployImpactedBaselineThreshold: + description: Impacted baseline threshold below which + Adaptive Protection's auto-deploy takes actions. + type: number + autoDeployLoadThreshold: + description: Load threshold above which Adaptive + Protection automatically deploy threshold based + on the backend load threshold and detect a new + rule during an alerted attack. + type: number + detectionAbsoluteQps: + description: Detection threshold based on absolute + QPS. + type: number + detectionLoadThreshold: + description: Detection threshold based on the backend + service's load. + type: number + detectionRelativeToBaselineQps: + description: Detection threshold based on QPS relative + to the average of baseline traffic. + type: number + name: + description: The name of config. The name must be + 1-63 characters long, and comply with RFC1035. + The name must be unique within the security policy. + type: string + trafficGranularityConfigs: + description: Configuration options for enabling + Adaptive Protection to work on the specified service + granularity. Structure is documented below. + items: + properties: + enableEachUniqueValue: + description: If enabled, traffic matching + each unique value for the specified type + constitutes a separate traffic unit. It + can only be set to true if value is empty. + type: boolean + type: + description: Type of the redirect action. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + type: object + type: array type: object type: object advancedOptionsConfig: @@ -2268,6 +2702,106 @@ spec: Available options: type: string type: object + preconfiguredWafConfig: + description: Preconfigured WAF configuration to be applied + for the rule. If the rule does not evaluate preconfigured + WAF rules, i.e., if evaluatePreconfiguredWaf() is not + used, this field will have no effect. Structure is documented + below. + properties: + exclusion: + description: An exclusion to apply during preconfigured + WAF evaluation. Structure is documented below. + items: + properties: + requestCookie: + description: Request cookie whose value will be + excluded from inspection during preconfigured + WAF evaluation. Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + requestHeader: + description: Request header whose value will be + excluded from inspection during preconfigured + WAF evaluation. Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + requestQueryParam: + description: Request query parameter whose value + will be excluded from inspection during preconfigured + WAF evaluation. Note that the parameter can + be in the query string or in the POST body. + Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + requestUri: + description: Request URI from the request line + to be excluded from inspection during preconfigured + WAF evaluation. When specifying this field, + the query or fragment part should be excluded. + Structure is documented below. + items: + properties: + operator: + description: You can specify an exact match + or a partial match by using a field operator + and a field value. + type: string + value: + description: Requests that match this value + constitute a granular traffic unit. + type: string + type: object + type: array + targetRuleIds: + description: A list of target rule IDs under the + WAF rule set to apply the preconfigured WAF + exclusion. If omitted, it refers to all the + rule IDs under the WAF rule set. + items: + type: string + type: array + x-kubernetes-list-type: set + targetRuleSet: + description: Target WAF rule set to apply the + preconfigured WAF exclusion. + type: string + type: object + type: array + type: object preview: description: |- When set to true, the action specified above is not enforced. @@ -2312,6 +2846,27 @@ spec: description: Determines the key to enforce the rate_limit_threshold on. If not specified, defaults to ALL. type: string + enforceOnKeyConfigs: + description: If specified, any combination of values + of enforce_on_key_type/enforce_on_key_name is treated + as the key on which rate limit threshold/action is + enforced. You can specify up to 3 enforce_on_key_configs. + If enforce_on_key_configs is specified, enforce_on_key + must be set to an empty string. Structure is documented + below. + items: + properties: + enforceOnKeyName: + description: 'Rate limit key name applicable only + for the following key types:' + type: string + enforceOnKeyType: + description: Determines the key to enforce the + rate_limit_threshold on. If not specified, defaults + to ALL. + type: string + type: object + type: array enforceOnKeyName: description: 'Rate limit key name applicable only for the following key types:' diff --git a/package/crds/compute.gcp.upbound.io_serviceattachments.yaml b/package/crds/compute.gcp.upbound.io_serviceattachments.yaml index 09883d794..48c3279dd 100644 --- a/package/crds/compute.gcp.upbound.io_serviceattachments.yaml +++ b/package/crds/compute.gcp.upbound.io_serviceattachments.yaml @@ -292,6 +292,14 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + propagatedConnectionLimit: + description: |- + The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center. + This limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer. + If the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list. + If the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint. + If unspecified, the default propagated connection limit is 250. + type: number reconcileConnections: description: |- This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. @@ -616,6 +624,14 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + propagatedConnectionLimit: + description: |- + The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center. + This limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer. + If the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list. + If the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint. + If unspecified, the default propagated connection limit is 250. + type: number reconcileConnections: description: |- This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. @@ -891,11 +907,26 @@ spec: Structure is documented below. items: properties: + consumerNetwork: + description: |- + (Output) + The url of the consumer network. + type: string endpoint: description: |- (Output) The URL of the consumer forwarding rule. type: string + propagatedConnectionCount: + description: |- + (Output) + The number of consumer Network Connectivity Center spokes that the connected Private Service Connect endpoint has propagated to. + type: number + pscConnectionId: + description: |- + (Output) + The PSC connection id of the connected endpoint. + type: string status: description: |- (Output) @@ -977,6 +1008,14 @@ spec: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + propagatedConnectionLimit: + description: |- + The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center. + This limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer. + If the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list. + If the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint. + If unspecified, the default propagated connection limit is 250. + type: number reconcileConnections: description: |- This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. diff --git a/package/crds/compute.gcp.upbound.io_subnetworks.yaml b/package/crds/compute.gcp.upbound.io_subnetworks.yaml index de3ca2e59..5f9ff955c 100644 --- a/package/crds/compute.gcp.upbound.io_subnetworks.yaml +++ b/package/crds/compute.gcp.upbound.io_subnetworks.yaml @@ -1051,6 +1051,7 @@ spec: Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. + Field is optional when reserved_internal_range is defined, otherwise required. type: string ipv6AccessType: description: |- @@ -1202,17 +1203,22 @@ spec: type: string purpose: description: |- - The purpose of the resource. This field can be either PRIVATE_RFC_1918, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). + The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnetwork in a given region with purpose set to GLOBAL_MANAGED_PROXY is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. A subnetwork with purpose set to PRIVATE_SERVICE_CONNECT reserves the subnet for hosting a Private Service Connect published service. A subnetwork with purpose set to PRIVATE_NAT is used as source range for Private NAT gateways. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. - If unspecified, the purpose defaults to PRIVATE_RFC_1918. + If unspecified, the purpose defaults to PRIVATE. type: string region: description: The GCP region for this subnetwork. type: string + reservedInternalRange: + description: |- + The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} + type: string role: description: |- The role of subnetwork. @@ -1240,6 +1246,7 @@ spec: range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. + Field is optional when reserved_internal_range is defined, otherwise required. type: string rangeName: description: |- @@ -1248,6 +1255,11 @@ spec: be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork. type: string + reservedInternalRange: + description: |- + The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} + type: string type: object type: array sendSecondaryIpRangeIfEmpty: @@ -1297,6 +1309,7 @@ spec: Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. + Field is optional when reserved_internal_range is defined, otherwise required. type: string ipv6AccessType: description: |- @@ -1448,13 +1461,18 @@ spec: type: string purpose: description: |- - The purpose of the resource. This field can be either PRIVATE_RFC_1918, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). + The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnetwork in a given region with purpose set to GLOBAL_MANAGED_PROXY is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. A subnetwork with purpose set to PRIVATE_SERVICE_CONNECT reserves the subnet for hosting a Private Service Connect published service. A subnetwork with purpose set to PRIVATE_NAT is used as source range for Private NAT gateways. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. - If unspecified, the purpose defaults to PRIVATE_RFC_1918. + If unspecified, the purpose defaults to PRIVATE. + type: string + reservedInternalRange: + description: |- + The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} type: string role: description: |- @@ -1483,6 +1501,7 @@ spec: range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. + Field is optional when reserved_internal_range is defined, otherwise required. type: string rangeName: description: |- @@ -1491,6 +1510,11 @@ spec: be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork. type: string + reservedInternalRange: + description: |- + The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} + type: string type: object type: array sendSecondaryIpRangeIfEmpty: @@ -1676,11 +1700,6 @@ spec: required: - forProvider type: object - x-kubernetes-validations: - - message: spec.forProvider.ipCidrRange is a required parameter - rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies - || ''Update'' in self.managementPolicies) || has(self.forProvider.ipCidrRange) - || (has(self.initProvider) && has(self.initProvider.ipCidrRange))' status: description: SubnetworkStatus defines the observed state of Subnetwork. properties: @@ -1719,6 +1738,7 @@ spec: Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. + Field is optional when reserved_internal_range is defined, otherwise required. type: string ipv6AccessType: description: |- @@ -1800,17 +1820,22 @@ spec: type: string purpose: description: |- - The purpose of the resource. This field can be either PRIVATE_RFC_1918, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). + The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, GLOBAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT or PRIVATE_NAT(Beta). A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnetwork in a given region with purpose set to GLOBAL_MANAGED_PROXY is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. A subnetwork with purpose set to PRIVATE_SERVICE_CONNECT reserves the subnet for hosting a Private Service Connect published service. A subnetwork with purpose set to PRIVATE_NAT is used as source range for Private NAT gateways. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. - If unspecified, the purpose defaults to PRIVATE_RFC_1918. + If unspecified, the purpose defaults to PRIVATE. type: string region: description: The GCP region for this subnetwork. type: string + reservedInternalRange: + description: |- + The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} + type: string role: description: |- The role of subnetwork. @@ -1838,6 +1863,7 @@ spec: range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. + Field is optional when reserved_internal_range is defined, otherwise required. type: string rangeName: description: |- @@ -1846,6 +1872,11 @@ spec: be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork. type: string + reservedInternalRange: + description: |- + The ID of the reserved internal range. Must be prefixed with networkconnectivity.googleapis.com + E.g. networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId} + type: string type: object type: array selfLink: @@ -1866,6 +1897,10 @@ spec: If not specified IPV4_ONLY will be used. Possible values are: IPV4_ONLY, IPV4_IPV6. type: string + subnetworkId: + description: The unique identifier number for the resource. This + identifier is defined by the server. + type: number type: object conditions: description: Conditions of the resource. diff --git a/package/crds/compute.gcp.upbound.io_targethttpproxies.yaml b/package/crds/compute.gcp.upbound.io_targethttpproxies.yaml index 696dbf665..a252056bb 100644 --- a/package/crds/compute.gcp.upbound.io_targethttpproxies.yaml +++ b/package/crds/compute.gcp.upbound.io_targethttpproxies.yaml @@ -81,10 +81,13 @@ spec: description: |- Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keepalive is - not specified, a default value (610 seconds) will be used. For Global - external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - the maximum allowed value is 1200 seconds. For Global external HTTP(S) - load balancer (classic), this option is not available publicly. + not specified, a default value will be used. For Global + external HTTP(S) load balancer, the default value is 610 seconds, the + minimum allowed value is 5 seconds and the maximum allowed value is 1200 + seconds. For cross-region internal HTTP(S) load balancer, the default + value is 600 seconds, the minimum allowed value is 5 seconds, and the + maximum allowed value is 600 seconds. For Global external HTTP(S) load + balancer (classic), this option is not available publicly. type: number project: description: |- @@ -196,10 +199,13 @@ spec: description: |- Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keepalive is - not specified, a default value (610 seconds) will be used. For Global - external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - the maximum allowed value is 1200 seconds. For Global external HTTP(S) - load balancer (classic), this option is not available publicly. + not specified, a default value will be used. For Global + external HTTP(S) load balancer, the default value is 610 seconds, the + minimum allowed value is 5 seconds and the maximum allowed value is 1200 + seconds. For cross-region internal HTTP(S) load balancer, the default + value is 600 seconds, the minimum allowed value is 5 seconds, and the + maximum allowed value is 600 seconds. For Global external HTTP(S) load + balancer (classic), this option is not available publicly. type: number project: description: |- @@ -473,10 +479,13 @@ spec: description: |- Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keepalive is - not specified, a default value (610 seconds) will be used. For Global - external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - the maximum allowed value is 1200 seconds. For Global external HTTP(S) - load balancer (classic), this option is not available publicly. + not specified, a default value will be used. For Global + external HTTP(S) load balancer, the default value is 610 seconds, the + minimum allowed value is 5 seconds and the maximum allowed value is 1200 + seconds. For cross-region internal HTTP(S) load balancer, the default + value is 600 seconds, the minimum allowed value is 5 seconds, and the + maximum allowed value is 600 seconds. For Global external HTTP(S) load + balancer (classic), this option is not available publicly. type: number id: description: an identifier for the resource with format projects/{{project}}/global/targetHttpProxies/{{name}} diff --git a/package/crds/compute.gcp.upbound.io_targethttpsproxies.yaml b/package/crds/compute.gcp.upbound.io_targethttpsproxies.yaml index 4435c2008..137c1a34e 100644 --- a/package/crds/compute.gcp.upbound.io_targethttpsproxies.yaml +++ b/package/crds/compute.gcp.upbound.io_targethttpsproxies.yaml @@ -98,10 +98,13 @@ spec: description: |- Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keepalive is - not specified, a default value (610 seconds) will be used. For Global - external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - the maximum allowed value is 1200 seconds. For Global external HTTP(S) - load balancer (classic), this option is not available publicly. + not specified, a default value will be used. For Global + external HTTP(S) load balancer, the default value is 610 seconds, the + minimum allowed value is 5 seconds and the maximum allowed value is 1200 + seconds. For cross-region internal HTTP(S) load balancer, the default + value is 600 seconds, the minimum allowed value is 5 seconds, and the + maximum allowed value is 600 seconds. For Global external HTTP(S) load + balancer (classic), this option is not available publicly. type: number project: description: |- @@ -133,6 +136,10 @@ spec: INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. type: string sslCertificates: description: |- @@ -352,10 +359,13 @@ spec: description: |- Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keepalive is - not specified, a default value (610 seconds) will be used. For Global - external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - the maximum allowed value is 1200 seconds. For Global external HTTP(S) - load balancer (classic), this option is not available publicly. + not specified, a default value will be used. For Global + external HTTP(S) load balancer, the default value is 610 seconds, the + minimum allowed value is 5 seconds and the maximum allowed value is 1200 + seconds. For cross-region internal HTTP(S) load balancer, the default + value is 600 seconds, the minimum allowed value is 5 seconds, and the + maximum allowed value is 600 seconds. For Global external HTTP(S) load + balancer (classic), this option is not available publicly. type: number project: description: |- @@ -387,6 +397,10 @@ spec: INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. type: string sslCertificates: description: |- @@ -768,10 +782,13 @@ spec: description: |- Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keepalive is - not specified, a default value (610 seconds) will be used. For Global - external HTTP(S) load balancer, the minimum allowed value is 5 seconds and - the maximum allowed value is 1200 seconds. For Global external HTTP(S) - load balancer (classic), this option is not available publicly. + not specified, a default value will be used. For Global + external HTTP(S) load balancer, the default value is 610 seconds, the + minimum allowed value is 5 seconds and the maximum allowed value is 1200 + seconds. For cross-region internal HTTP(S) load balancer, the default + value is 600 seconds, the minimum allowed value is 5 seconds, and the + maximum allowed value is 600 seconds. For Global external HTTP(S) load + balancer (classic), this option is not available publicly. type: number id: description: an identifier for the resource with format projects/{{project}}/global/targetHttpsProxies/{{name}} @@ -812,6 +829,10 @@ spec: INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. type: string sslCertificates: description: |- diff --git a/package/crds/container.gcp.upbound.io_clusters.yaml b/package/crds/container.gcp.upbound.io_clusters.yaml index e42788496..e920424df 100644 --- a/package/crds/container.gcp.upbound.io_clusters.yaml +++ b/package/crds/container.gcp.upbound.io_clusters.yaml @@ -7391,6 +7391,18 @@ spec: cluster. It is disabled by default. Set disabled = false to enable. type: boolean type: object + parallelstoreCsiDriverConfig: + description: |- + The status of the Parallelstore CSI driver addon, + which allows the usage of a Parallelstore instances as volumes. + It is disabled by default for Standard clusters; set enabled = true to enable. + It is enabled by default for Autopilot clusters with version 1.29 or later; set enabled = true to enable it explicitly. + See Enable the Parallelstore CSI driver for more information. + properties: + enabled: + description: Enables vertical pod autoscaling + type: boolean + type: object rayOperatorConfig: description: |- . The status of the Ray Operator @@ -7674,6 +7686,24 @@ spec: enforce encryption of data in-use. type: boolean type: object + controlPlaneEndpointsConfig: + description: |- + Configuration for all of the cluster's control plane endpoints. + Structure is documented below. + properties: + dnsEndpointConfig: + description: DNS endpoint configuration. + properties: + allowExternalTraffic: + description: Controls whether user traffic is allowed + over this endpoint. Note that GCP-managed services may + still use the endpoint even if this is false. + type: boolean + endpoint: + description: (Output) The cluster's DNS endpoint. + type: string + type: object + type: object costManagementConfig: description: |- Configuration for the @@ -7721,8 +7751,6 @@ spec: internal traffic type: boolean type: object - deletionProtection: - type: boolean description: description: Description of the cluster. type: string @@ -7730,6 +7758,12 @@ spec: description: Configuration for Using Cloud DNS for GKE. Structure is documented below. properties: + additiveVpcScopeDnsDomain: + description: This will enable Cloud DNS additive VPC scope. + Must provide a domain name that is unique within the VPC. + For this to work cluster_dns = "CLOUD_DNS" and cluster_dns_scope + = "CLUSTER_SCOPE" must both be set as well. + type: string clusterDns: description: Which in-cluster DNS provider should be used. PROVIDER_UNSPECIFIED (default) or PLATFORM_DEFAULT or CLOUD_DNS. @@ -7753,6 +7787,13 @@ spec: description: Whether CiliumClusterWideNetworkPolicy is enabled on this cluster. Defaults to false. type: boolean + enableFqdnNetworkPolicy: + description: Whether FQDN Network Policy is enabled on this cluster. + Users who enable this feature for existing Standard clusters + must restart the GKE Dataplane V2 anetd DaemonSet after enabling + it. See the Enable FQDN Network Policy in an existing cluster + for more information. + type: boolean enableIntranodeVisibility: description: Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for @@ -7803,6 +7844,15 @@ spec: Whether to enable Cloud TPU resources in this cluster. See the official documentation. type: boolean + enterpriseConfig: + description: Configuration for [Enterprise edition].(https://cloud.google.com/kubernetes-engine/enterprise/docs/concepts/gke-editions). + Structure is documented below. + properties: + desiredTier: + description: Sets the tier of the cluster. Available options + include STANDARD and ENTERPRISE. + type: string + type: object fleet: description: Fleet configuration for the cluster. Structure is documented below. @@ -8023,6 +8073,10 @@ spec: Whether Kubernetes master is accessible via Google Compute Engine Public IPs. type: boolean + privateEndpointEnforcementEnabled: + description: Whether authorized networks is enforced on the + private endpoint or not. + type: boolean type: object meshCertificates: description: Structure is documented below. @@ -8059,13 +8113,6 @@ spec: enableRelay: description: Whether or not Relay is enabled. type: boolean - relayMode: - description: Mode used to make Relay available. Deprecated - in favor of enable_relay field. Remove this attribute's - configuration as this field will be removed in the next - major release and enable_relay will become a required - field. - type: string type: object enableComponents: description: 'The GKE components exposing metrics. Supported @@ -8314,8 +8361,11 @@ spec: guestAccelerator: description: |- List of the type and count of accelerator cards attached to the instance. - Structure documented below.12 this field is an - Attribute as Block + Structure documented below. + Note: As of 6.0.0, argument syntax + is no longer supported for this field in favor of block syntax. + To dynamically set a list of guest accelerators, use dynamic blocks. + To set an empty list, use a single guest_accelerator block with count = 0. items: properties: count: @@ -8403,12 +8453,14 @@ spec: description: |- The CPU management policy on the node. See K8S CPU Management Policies. - One of "none" or "static". Defaults to none when kubelet_config is unset. + One of "none" or "static". If unset (or set to the empty string ""), the API will treat the field as if set to "none". + Prior to the 6.4.0 this field was marked as required. The workaround for the required field + is setting the empty string "", which will function identically to not setting this field. type: string insecureKubeletReadonlyPortEnabled: - description: 'Controls whether the kubelet read-only port - is enabled. It is strongly recommended to set this to - FALSE. Possible values: TRUE, FALSE.' + description: 'only port is enabled for newly created node + pools in the cluster. It is strongly recommended to + set this to FALSE. Possible values: TRUE, FALSE.' type: string podPidsLimit: description: Controls the maximum number of processes @@ -8433,6 +8485,17 @@ spec: Possible cgroup modes that can be used. Accepted values are: type: string + hugepagesConfig: + description: Amounts for 2M and 1G hugepages. Structure + is documented below. + properties: + hugepageSize1G: + description: Amount of 1G hugepages. + type: number + hugepageSize2M: + description: Amount of 2M hugepages. + type: number + type: object sysctls: additionalProperties: type: string @@ -8459,6 +8522,11 @@ spec: The amount of local SSD disks that will be attached to each cluster node. Defaults to 0. type: number + localSsdEncryptionMode: + description: |- + Possible Local SSD encryption modes: + Accepted values are: + type: string loggingVariant: description: wide default value. Valid values include DEFAULT and MAX_THROUGHPUT. See Increasing logging agent throughput @@ -8690,6 +8758,12 @@ spec: See the official documentation for more information. Defaults to false. type: boolean + storagePools: + description: The list of Storage Pools where boot disks are + provisioned. + items: + type: string + type: array tags: description: |- The list of instance tags applied to all nodes. Tags are used to identify @@ -8744,6 +8818,18 @@ spec: autopilot clusters and node auto-provisioning-enabled clusters. Structure is documented below. properties: + linuxNodeConfig: + description: Linux system configuration for the cluster's + automatically provisioned node pools. Only cgroup_mode field + is supported in node_pool_auto_config. Structure is documented + below. + properties: + cgroupMode: + description: |- + Possible cgroup modes that can be used. + Accepted values are: + type: string + type: object networkTags: description: The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. @@ -8762,9 +8848,9 @@ spec: Structure is documented below. properties: insecureKubeletReadonlyPortEnabled: - description: 'Controls whether the kubelet read-only port - is enabled. It is strongly recommended to set this to - FALSE. Possible values: TRUE, FALSE.' + description: 'only port is enabled for newly created node + pools in the cluster. It is strongly recommended to + set this to FALSE. Possible values: TRUE, FALSE.' type: string type: object resourceManagerTags: @@ -8821,6 +8907,16 @@ spec: type: boolean type: object type: object + gcfsConfig: + description: The default Google Container Filesystem (GCFS) + configuration at the cluster level. e.g. enable image + streaming across all the node pools within the cluster. + Structure is documented below. + properties: + enabled: + description: Enables vertical pod autoscaling + type: boolean + type: object insecureKubeletReadonlyPortEnabled: description: 'only port is enabled for newly created node pools in the cluster. It is strongly recommended to @@ -9063,6 +9159,16 @@ spec: billing export. Defaults to true. type: boolean type: object + secretManagerConfig: + description: |- + Configuration for the + SecretManagerConfig feature. + Structure is documented below. + properties: + enabled: + description: Enable the Secret Manager add-on for this cluster. + type: boolean + type: object securityPostureConfig: description: Enable/Disable Security Posture API features for the cluster. Structure is documented below. @@ -9168,6 +9274,31 @@ spec: type: string type: object type: object + userManagedKeysConfig: + properties: + aggregationCa: + type: string + clusterCa: + type: string + controlPlaneDiskEncryptionKey: + type: string + etcdApiCa: + type: string + etcdPeerCa: + type: string + gkeopsEtcdBackupEncryptionKey: + type: string + serviceAccountSigningKeys: + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountVerificationKeys: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object verticalPodAutoscaling: description: |- Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it. @@ -9323,6 +9454,18 @@ spec: cluster. It is disabled by default. Set disabled = false to enable. type: boolean type: object + parallelstoreCsiDriverConfig: + description: |- + The status of the Parallelstore CSI driver addon, + which allows the usage of a Parallelstore instances as volumes. + It is disabled by default for Standard clusters; set enabled = true to enable. + It is enabled by default for Autopilot clusters with version 1.29 or later; set enabled = true to enable it explicitly. + See Enable the Parallelstore CSI driver for more information. + properties: + enabled: + description: Enables vertical pod autoscaling + type: boolean + type: object rayOperatorConfig: description: |- . The status of the Ray Operator @@ -9606,6 +9749,24 @@ spec: enforce encryption of data in-use. type: boolean type: object + controlPlaneEndpointsConfig: + description: |- + Configuration for all of the cluster's control plane endpoints. + Structure is documented below. + properties: + dnsEndpointConfig: + description: DNS endpoint configuration. + properties: + allowExternalTraffic: + description: Controls whether user traffic is allowed + over this endpoint. Note that GCP-managed services may + still use the endpoint even if this is false. + type: boolean + endpoint: + description: (Output) The cluster's DNS endpoint. + type: string + type: object + type: object costManagementConfig: description: |- Configuration for the @@ -9653,8 +9814,6 @@ spec: internal traffic type: boolean type: object - deletionProtection: - type: boolean description: description: Description of the cluster. type: string @@ -9662,6 +9821,12 @@ spec: description: Configuration for Using Cloud DNS for GKE. Structure is documented below. properties: + additiveVpcScopeDnsDomain: + description: This will enable Cloud DNS additive VPC scope. + Must provide a domain name that is unique within the VPC. + For this to work cluster_dns = "CLOUD_DNS" and cluster_dns_scope + = "CLUSTER_SCOPE" must both be set as well. + type: string clusterDns: description: Which in-cluster DNS provider should be used. PROVIDER_UNSPECIFIED (default) or PLATFORM_DEFAULT or CLOUD_DNS. @@ -9685,6 +9850,13 @@ spec: description: Whether CiliumClusterWideNetworkPolicy is enabled on this cluster. Defaults to false. type: boolean + enableFqdnNetworkPolicy: + description: Whether FQDN Network Policy is enabled on this cluster. + Users who enable this feature for existing Standard clusters + must restart the GKE Dataplane V2 anetd DaemonSet after enabling + it. See the Enable FQDN Network Policy in an existing cluster + for more information. + type: boolean enableIntranodeVisibility: description: Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for @@ -9735,6 +9907,15 @@ spec: Whether to enable Cloud TPU resources in this cluster. See the official documentation. type: boolean + enterpriseConfig: + description: Configuration for [Enterprise edition].(https://cloud.google.com/kubernetes-engine/enterprise/docs/concepts/gke-editions). + Structure is documented below. + properties: + desiredTier: + description: Sets the tier of the cluster. Available options + include STANDARD and ENTERPRISE. + type: string + type: object fleet: description: Fleet configuration for the cluster. Structure is documented below. @@ -9946,6 +10127,10 @@ spec: Whether Kubernetes master is accessible via Google Compute Engine Public IPs. type: boolean + privateEndpointEnforcementEnabled: + description: Whether authorized networks is enforced on the + private endpoint or not. + type: boolean type: object meshCertificates: description: Structure is documented below. @@ -9982,13 +10167,6 @@ spec: enableRelay: description: Whether or not Relay is enabled. type: boolean - relayMode: - description: Mode used to make Relay available. Deprecated - in favor of enable_relay field. Remove this attribute's - configuration as this field will be removed in the next - major release and enable_relay will become a required - field. - type: string type: object enableComponents: description: 'The GKE components exposing metrics. Supported @@ -10237,8 +10415,11 @@ spec: guestAccelerator: description: |- List of the type and count of accelerator cards attached to the instance. - Structure documented below.12 this field is an - Attribute as Block + Structure documented below. + Note: As of 6.0.0, argument syntax + is no longer supported for this field in favor of block syntax. + To dynamically set a list of guest accelerators, use dynamic blocks. + To set an empty list, use a single guest_accelerator block with count = 0. items: properties: count: @@ -10326,12 +10507,14 @@ spec: description: |- The CPU management policy on the node. See K8S CPU Management Policies. - One of "none" or "static". Defaults to none when kubelet_config is unset. + One of "none" or "static". If unset (or set to the empty string ""), the API will treat the field as if set to "none". + Prior to the 6.4.0 this field was marked as required. The workaround for the required field + is setting the empty string "", which will function identically to not setting this field. type: string insecureKubeletReadonlyPortEnabled: - description: 'Controls whether the kubelet read-only port - is enabled. It is strongly recommended to set this to - FALSE. Possible values: TRUE, FALSE.' + description: 'only port is enabled for newly created node + pools in the cluster. It is strongly recommended to + set this to FALSE. Possible values: TRUE, FALSE.' type: string podPidsLimit: description: Controls the maximum number of processes @@ -10356,6 +10539,17 @@ spec: Possible cgroup modes that can be used. Accepted values are: type: string + hugepagesConfig: + description: Amounts for 2M and 1G hugepages. Structure + is documented below. + properties: + hugepageSize1G: + description: Amount of 1G hugepages. + type: number + hugepageSize2M: + description: Amount of 2M hugepages. + type: number + type: object sysctls: additionalProperties: type: string @@ -10382,6 +10576,11 @@ spec: The amount of local SSD disks that will be attached to each cluster node. Defaults to 0. type: number + localSsdEncryptionMode: + description: |- + Possible Local SSD encryption modes: + Accepted values are: + type: string loggingVariant: description: wide default value. Valid values include DEFAULT and MAX_THROUGHPUT. See Increasing logging agent throughput @@ -10613,6 +10812,12 @@ spec: See the official documentation for more information. Defaults to false. type: boolean + storagePools: + description: The list of Storage Pools where boot disks are + provisioned. + items: + type: string + type: array tags: description: |- The list of instance tags applied to all nodes. Tags are used to identify @@ -10667,6 +10872,18 @@ spec: autopilot clusters and node auto-provisioning-enabled clusters. Structure is documented below. properties: + linuxNodeConfig: + description: Linux system configuration for the cluster's + automatically provisioned node pools. Only cgroup_mode field + is supported in node_pool_auto_config. Structure is documented + below. + properties: + cgroupMode: + description: |- + Possible cgroup modes that can be used. + Accepted values are: + type: string + type: object networkTags: description: The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. @@ -10685,9 +10902,9 @@ spec: Structure is documented below. properties: insecureKubeletReadonlyPortEnabled: - description: 'Controls whether the kubelet read-only port - is enabled. It is strongly recommended to set this to - FALSE. Possible values: TRUE, FALSE.' + description: 'only port is enabled for newly created node + pools in the cluster. It is strongly recommended to + set this to FALSE. Possible values: TRUE, FALSE.' type: string type: object resourceManagerTags: @@ -10744,6 +10961,16 @@ spec: type: boolean type: object type: object + gcfsConfig: + description: The default Google Container Filesystem (GCFS) + configuration at the cluster level. e.g. enable image + streaming across all the node pools within the cluster. + Structure is documented below. + properties: + enabled: + description: Enables vertical pod autoscaling + type: boolean + type: object insecureKubeletReadonlyPortEnabled: description: 'only port is enabled for newly created node pools in the cluster. It is strongly recommended to @@ -10986,6 +11213,16 @@ spec: billing export. Defaults to true. type: boolean type: object + secretManagerConfig: + description: |- + Configuration for the + SecretManagerConfig feature. + Structure is documented below. + properties: + enabled: + description: Enable the Secret Manager add-on for this cluster. + type: boolean + type: object securityPostureConfig: description: Enable/Disable Security Posture API features for the cluster. Structure is documented below. @@ -11091,6 +11328,31 @@ spec: type: string type: object type: object + userManagedKeysConfig: + properties: + aggregationCa: + type: string + clusterCa: + type: string + controlPlaneDiskEncryptionKey: + type: string + etcdApiCa: + type: string + etcdPeerCa: + type: string + gkeopsEtcdBackupEncryptionKey: + type: string + serviceAccountSigningKeys: + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountVerificationKeys: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object verticalPodAutoscaling: description: |- Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it. @@ -11403,6 +11665,18 @@ spec: cluster. It is disabled by default. Set disabled = false to enable. type: boolean type: object + parallelstoreCsiDriverConfig: + description: |- + The status of the Parallelstore CSI driver addon, + which allows the usage of a Parallelstore instances as volumes. + It is disabled by default for Standard clusters; set enabled = true to enable. + It is enabled by default for Autopilot clusters with version 1.29 or later; set enabled = true to enable it explicitly. + See Enable the Parallelstore CSI driver for more information. + properties: + enabled: + description: Enables vertical pod autoscaling + type: boolean + type: object rayOperatorConfig: description: |- . The status of the Ray Operator @@ -11698,6 +11972,24 @@ spec: enforce encryption of data in-use. type: boolean type: object + controlPlaneEndpointsConfig: + description: |- + Configuration for all of the cluster's control plane endpoints. + Structure is documented below. + properties: + dnsEndpointConfig: + description: DNS endpoint configuration. + properties: + allowExternalTraffic: + description: Controls whether user traffic is allowed + over this endpoint. Note that GCP-managed services may + still use the endpoint even if this is false. + type: boolean + endpoint: + description: (Output) The cluster's DNS endpoint. + type: string + type: object + type: object costManagementConfig: description: |- Configuration for the @@ -11754,6 +12046,12 @@ spec: description: Configuration for Using Cloud DNS for GKE. Structure is documented below. properties: + additiveVpcScopeDnsDomain: + description: This will enable Cloud DNS additive VPC scope. + Must provide a domain name that is unique within the VPC. + For this to work cluster_dns = "CLOUD_DNS" and cluster_dns_scope + = "CLUSTER_SCOPE" must both be set as well. + type: string clusterDns: description: Which in-cluster DNS provider should be used. PROVIDER_UNSPECIFIED (default) or PLATFORM_DEFAULT or CLOUD_DNS. @@ -11766,6 +12064,11 @@ spec: (default) or CLUSTER_SCOPE or VPC_SCOPE. type: string type: object + effectiveLabels: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular enableAutopilot: description: |- Enable Autopilot for this cluster. Defaults to false. @@ -11777,6 +12080,13 @@ spec: description: Whether CiliumClusterWideNetworkPolicy is enabled on this cluster. Defaults to false. type: boolean + enableFqdnNetworkPolicy: + description: Whether FQDN Network Policy is enabled on this cluster. + Users who enable this feature for existing Standard clusters + must restart the GKE Dataplane V2 anetd DaemonSet after enabling + it. See the Enable FQDN Network Policy in an existing cluster + for more information. + type: boolean enableIntranodeVisibility: description: Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for @@ -11830,6 +12140,18 @@ spec: endpoint: description: The IP address of this cluster's Kubernetes master. type: string + enterpriseConfig: + description: Configuration for [Enterprise edition].(https://cloud.google.com/kubernetes-engine/enterprise/docs/concepts/gke-editions). + Structure is documented below. + properties: + clusterTier: + description: The effective tier of the cluster. + type: string + desiredTier: + description: Sets the tier of the cluster. Available options + include STANDARD and ENTERPRISE. + type: string + type: object fleet: description: Fleet configuration for the cluster. Structure is documented below. @@ -12089,6 +12411,10 @@ spec: Whether Kubernetes master is accessible via Google Compute Engine Public IPs. type: boolean + privateEndpointEnforcementEnabled: + description: Whether authorized networks is enforced on the + private endpoint or not. + type: boolean type: object masterVersion: description: |- @@ -12131,13 +12457,6 @@ spec: enableRelay: description: Whether or not Relay is enabled. type: boolean - relayMode: - description: Mode used to make Relay available. Deprecated - in favor of enable_relay field. Remove this attribute's - configuration as this field will be removed in the next - major release and enable_relay will become a required - field. - type: string type: object enableComponents: description: 'The GKE components exposing metrics. Supported @@ -12329,8 +12648,11 @@ spec: guestAccelerator: description: |- List of the type and count of accelerator cards attached to the instance. - Structure documented below.12 this field is an - Attribute as Block + Structure documented below. + Note: As of 6.0.0, argument syntax + is no longer supported for this field in favor of block syntax. + To dynamically set a list of guest accelerators, use dynamic blocks. + To set an empty list, use a single guest_accelerator block with count = 0. items: properties: count: @@ -12418,12 +12740,14 @@ spec: description: |- The CPU management policy on the node. See K8S CPU Management Policies. - One of "none" or "static". Defaults to none when kubelet_config is unset. + One of "none" or "static". If unset (or set to the empty string ""), the API will treat the field as if set to "none". + Prior to the 6.4.0 this field was marked as required. The workaround for the required field + is setting the empty string "", which will function identically to not setting this field. type: string insecureKubeletReadonlyPortEnabled: - description: 'Controls whether the kubelet read-only port - is enabled. It is strongly recommended to set this to - FALSE. Possible values: TRUE, FALSE.' + description: 'only port is enabled for newly created node + pools in the cluster. It is strongly recommended to + set this to FALSE. Possible values: TRUE, FALSE.' type: string podPidsLimit: description: Controls the maximum number of processes @@ -12448,6 +12772,17 @@ spec: Possible cgroup modes that can be used. Accepted values are: type: string + hugepagesConfig: + description: Amounts for 2M and 1G hugepages. Structure + is documented below. + properties: + hugepageSize1G: + description: Amount of 1G hugepages. + type: number + hugepageSize2M: + description: Amount of 2M hugepages. + type: number + type: object sysctls: additionalProperties: type: string @@ -12474,6 +12809,11 @@ spec: The amount of local SSD disks that will be attached to each cluster node. Defaults to 0. type: number + localSsdEncryptionMode: + description: |- + Possible Local SSD encryption modes: + Accepted values are: + type: string loggingVariant: description: wide default value. Valid values include DEFAULT and MAX_THROUGHPUT. See Increasing logging agent throughput @@ -12629,6 +12969,12 @@ spec: See the official documentation for more information. Defaults to false. type: boolean + storagePools: + description: The list of Storage Pools where boot disks are + provisioned. + items: + type: string + type: array tags: description: |- The list of instance tags applied to all nodes. Tags are used to identify @@ -12939,8 +13285,11 @@ spec: guestAccelerator: description: |- List of the type and count of accelerator cards attached to the instance. - Structure documented below.12 this field is an - Attribute as Block + Structure documented below. + Note: As of 6.0.0, argument syntax + is no longer supported for this field in favor of block syntax. + To dynamically set a list of guest accelerators, use dynamic blocks. + To set an empty list, use a single guest_accelerator block with count = 0. items: properties: count: @@ -13028,12 +13377,14 @@ spec: description: |- The CPU management policy on the node. See K8S CPU Management Policies. - One of "none" or "static". Defaults to none when kubelet_config is unset. + One of "none" or "static". If unset (or set to the empty string ""), the API will treat the field as if set to "none". + Prior to the 6.4.0 this field was marked as required. The workaround for the required field + is setting the empty string "", which will function identically to not setting this field. type: string insecureKubeletReadonlyPortEnabled: - description: 'Controls whether the kubelet read-only - port is enabled. It is strongly recommended to - set this to FALSE. Possible values: TRUE, FALSE.' + description: 'only port is enabled for newly created + node pools in the cluster. It is strongly recommended + to set this to FALSE. Possible values: TRUE, FALSE.' type: string podPidsLimit: description: Controls the maximum number of processes @@ -13050,14 +13401,27 @@ spec: type: object x-kubernetes-map-type: granular linuxNodeConfig: - description: Parameters that can be configured on Linux - nodes. Structure is documented below. + description: Linux system configuration for the cluster's + automatically provisioned node pools. Only cgroup_mode + field is supported in node_pool_auto_config. Structure + is documented below. properties: cgroupMode: description: |- Possible cgroup modes that can be used. Accepted values are: type: string + hugepagesConfig: + description: Amounts for 2M and 1G hugepages. Structure + is documented below. + properties: + hugepageSize1G: + description: Amount of 1G hugepages. + type: number + hugepageSize2M: + description: Amount of 2M hugepages. + type: number + type: object sysctls: additionalProperties: type: string @@ -13084,6 +13448,11 @@ spec: The amount of local SSD disks that will be attached to each cluster node. Defaults to 0. type: number + localSsdEncryptionMode: + description: |- + Possible Local SSD encryption modes: + Accepted values are: + type: string loggingVariant: description: The type of logging agent that is deployed by default for newly created node pools in the cluster. @@ -13242,6 +13611,12 @@ spec: See the official documentation for more information. Defaults to false. type: boolean + storagePools: + description: The list of Storage Pools where boot disks + are provisioned. + items: + type: string + type: array tags: description: |- The list of instance tags applied to all nodes. Tags are used to identify @@ -13381,6 +13756,18 @@ spec: autopilot clusters and node auto-provisioning-enabled clusters. Structure is documented below. properties: + linuxNodeConfig: + description: Linux system configuration for the cluster's + automatically provisioned node pools. Only cgroup_mode field + is supported in node_pool_auto_config. Structure is documented + below. + properties: + cgroupMode: + description: |- + Possible cgroup modes that can be used. + Accepted values are: + type: string + type: object networkTags: description: The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. @@ -13399,9 +13786,9 @@ spec: Structure is documented below. properties: insecureKubeletReadonlyPortEnabled: - description: 'Controls whether the kubelet read-only port - is enabled. It is strongly recommended to set this to - FALSE. Possible values: TRUE, FALSE.' + description: 'only port is enabled for newly created node + pools in the cluster. It is strongly recommended to + set this to FALSE. Possible values: TRUE, FALSE.' type: string type: object resourceManagerTags: @@ -13458,6 +13845,16 @@ spec: type: boolean type: object type: object + gcfsConfig: + description: The default Google Container Filesystem (GCFS) + configuration at the cluster level. e.g. enable image + streaming across all the node pools within the cluster. + Structure is documented below. + properties: + enabled: + description: Enables vertical pod autoscaling + type: boolean + type: object insecureKubeletReadonlyPortEnabled: description: 'only port is enabled for newly created node pools in the cluster. It is strongly recommended to @@ -13638,6 +14035,16 @@ spec: billing export. Defaults to true. type: boolean type: object + secretManagerConfig: + description: |- + Configuration for the + SecretManagerConfig feature. + Structure is documented below. + properties: + enabled: + description: Enable the Secret Manager add-on for this cluster. + type: boolean + type: object securityPostureConfig: description: Enable/Disable Security Posture API features for the cluster. Structure is documented below. @@ -13677,12 +14084,44 @@ spec: The name or self_link of the Google Compute Engine subnetwork in which the cluster's instances are launched. type: string + terraformLabels: + additionalProperties: + type: string + description: The combination of labels configured directly on + the resource and default labels configured on the provider. + type: object + x-kubernetes-map-type: granular tpuIpv4CidrBlock: description: |- The IP address range of the Cloud TPUs in this cluster, in CIDR notation (e.g. 1.2.3.4/29). type: string + userManagedKeysConfig: + properties: + aggregationCa: + type: string + clusterCa: + type: string + controlPlaneDiskEncryptionKey: + type: string + etcdApiCa: + type: string + etcdPeerCa: + type: string + gkeopsEtcdBackupEncryptionKey: + type: string + serviceAccountSigningKeys: + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountVerificationKeys: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object verticalPodAutoscaling: description: |- Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it. diff --git a/package/crds/container.gcp.upbound.io_nodepools.yaml b/package/crds/container.gcp.upbound.io_nodepools.yaml index 7f088cc02..9eefe556b 100644 --- a/package/crds/container.gcp.upbound.io_nodepools.yaml +++ b/package/crds/container.gcp.upbound.io_nodepools.yaml @@ -2738,6 +2738,13 @@ spec: properties: cgroupMode: type: string + hugepagesConfig: + properties: + hugepageSize1G: + type: number + hugepageSize2M: + type: number + type: object sysctls: additionalProperties: type: string @@ -2751,6 +2758,11 @@ spec: type: object localSsdCount: type: number + localSsdEncryptionMode: + description: |- + Possible Local SSD encryption modes: + Accepted values are: + type: string loggingVariant: type: string machineType: @@ -2917,6 +2929,10 @@ spec: type: object spot: type: boolean + storagePools: + items: + type: string + type: array tags: items: type: string @@ -3351,6 +3367,13 @@ spec: properties: cgroupMode: type: string + hugepagesConfig: + properties: + hugepageSize1G: + type: number + hugepageSize2M: + type: number + type: object sysctls: additionalProperties: type: string @@ -3364,6 +3387,11 @@ spec: type: object localSsdCount: type: number + localSsdEncryptionMode: + description: |- + Possible Local SSD encryption modes: + Accepted values are: + type: string loggingVariant: type: string machineType: @@ -3530,6 +3558,10 @@ spec: type: object spot: type: boolean + storagePools: + items: + type: string + type: array tags: items: type: string @@ -4160,6 +4192,13 @@ spec: properties: cgroupMode: type: string + hugepagesConfig: + properties: + hugepageSize1G: + type: number + hugepageSize2M: + type: number + type: object sysctls: additionalProperties: type: string @@ -4173,6 +4212,11 @@ spec: type: object localSsdCount: type: number + localSsdEncryptionMode: + description: |- + Possible Local SSD encryption modes: + Accepted values are: + type: string loggingVariant: type: string machineType: @@ -4263,6 +4307,10 @@ spec: type: object spot: type: boolean + storagePools: + items: + type: string + type: array tags: items: type: string diff --git a/package/crds/containerattached.gcp.upbound.io_clusters.yaml b/package/crds/containerattached.gcp.upbound.io_clusters.yaml index f2322b3c6..b3c42b46c 100644 --- a/package/crds/containerattached.gcp.upbound.io_clusters.yaml +++ b/package/crds/containerattached.gcp.upbound.io_clusters.yaml @@ -1051,7 +1051,8 @@ spec: type: string type: object deletionPolicy: - description: Policy to determine what flags to send on delete. + description: 'Policy to determine what flags to send on delete. + Possible values: DELETE, DELETE_IGNORE_ERRORS' type: string description: description: |- @@ -1159,6 +1160,17 @@ spec: type: string type: object type: object + securityPostureConfig: + description: |- + Enable/Disable Security Posture API features for the cluster. + Structure is documented below. + properties: + vulnerabilityMode: + description: |- + Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. + Possible values are: VULNERABILITY_DISABLED, VULNERABILITY_ENTERPRISE. + type: string + type: object required: - location type: object @@ -1225,7 +1237,8 @@ spec: type: string type: object deletionPolicy: - description: Policy to determine what flags to send on delete. + description: 'Policy to determine what flags to send on delete. + Possible values: DELETE, DELETE_IGNORE_ERRORS' type: string description: description: |- @@ -1330,6 +1343,17 @@ spec: type: string type: object type: object + securityPostureConfig: + description: |- + Enable/Disable Security Posture API features for the cluster. + Structure is documented below. + properties: + vulnerabilityMode: + description: |- + Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. + Possible values are: VULNERABILITY_DISABLED, VULNERABILITY_ENTERPRISE. + type: string + type: object type: object managementPolicies: default: @@ -1579,7 +1603,8 @@ spec: description: Output only. The time at which this cluster was created. type: string deletionPolicy: - description: Policy to determine what flags to send on delete. + description: 'Policy to determine what flags to send on delete. + Possible values: DELETE, DELETE_IGNORE_ERRORS' type: string description: description: |- @@ -1720,6 +1745,17 @@ spec: description: If set, there are currently changes in flight to the cluster. type: boolean + securityPostureConfig: + description: |- + Enable/Disable Security Posture API features for the cluster. + Structure is documented below. + properties: + vulnerabilityMode: + description: |- + Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. + Possible values are: VULNERABILITY_DISABLED, VULNERABILITY_ENTERPRISE. + type: string + type: object state: description: |- The current state of the cluster. Possible values: diff --git a/package/crds/containeraws.gcp.upbound.io_nodepools.yaml b/package/crds/containeraws.gcp.upbound.io_nodepools.yaml index fca61b887..5ce4dea9b 100644 --- a/package/crds/containeraws.gcp.upbound.io_nodepools.yaml +++ b/package/crds/containeraws.gcp.upbound.io_nodepools.yaml @@ -1478,6 +1478,27 @@ spec: type: object type: array type: object + kubeletConfig: + description: The kubelet configuration for the node pool. + properties: + cpuCfsQuota: + description: Whether or not to enable CPU CFS quota. Defaults + to true. + type: boolean + cpuCfsQuotaPeriod: + description: Optional. The CPU CFS quota period to use for + the node. Defaults to "100ms". + type: string + cpuManagerPolicy: + description: The CpuManagerPolicy to use for the node. Defaults + to "none". + type: string + podPidsLimit: + description: Optional. The maximum number of PIDs in each + pod running on the node. The limit scales automatically + based on underlying machine size if left unset. + type: number + type: object location: description: The location for the resource type: string @@ -1710,6 +1731,27 @@ spec: type: object type: array type: object + kubeletConfig: + description: The kubelet configuration for the node pool. + properties: + cpuCfsQuota: + description: Whether or not to enable CPU CFS quota. Defaults + to true. + type: boolean + cpuCfsQuotaPeriod: + description: Optional. The CPU CFS quota period to use for + the node. Defaults to "100ms". + type: string + cpuManagerPolicy: + description: The CpuManagerPolicy to use for the node. Defaults + to "none". + type: string + podPidsLimit: + description: Optional. The maximum number of PIDs in each + pod running on the node. The limit scales automatically + based on underlying machine size if left unset. + type: number + type: object management: description: The Management configuration for this node pool. properties: @@ -2138,6 +2180,27 @@ spec: id: description: an identifier for the resource with format projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}} type: string + kubeletConfig: + description: The kubelet configuration for the node pool. + properties: + cpuCfsQuota: + description: Whether or not to enable CPU CFS quota. Defaults + to true. + type: boolean + cpuCfsQuotaPeriod: + description: Optional. The CPU CFS quota period to use for + the node. Defaults to "100ms". + type: string + cpuManagerPolicy: + description: The CpuManagerPolicy to use for the node. Defaults + to "none". + type: string + podPidsLimit: + description: Optional. The maximum number of PIDs in each + pod running on the node. The limit scales automatically + based on underlying machine size if left unset. + type: number + type: object location: description: The location for the resource type: string diff --git a/package/crds/dataproc.gcp.upbound.io_clusters.yaml b/package/crds/dataproc.gcp.upbound.io_clusters.yaml index a5cada2c4..9046e6fde 100644 --- a/package/crds/dataproc.gcp.upbound.io_clusters.yaml +++ b/package/crds/dataproc.gcp.upbound.io_clusters.yaml @@ -3632,6 +3632,15 @@ spec: Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below. properties: + confidentialInstanceConfig: + description: Confidential Instance Config for clusters + using Confidential VMs + properties: + enableConfidentialCompute: + description: Defines whether the instance should have + confidential compute enabled. + type: boolean + type: object internalIpOnly: description: |- By default, clusters are not restricted to internal IP addresses, @@ -3994,6 +4003,13 @@ spec: type: number type: object type: array + provisioningModelMix: + properties: + standardCapacityBase: + type: number + standardCapacityPercentAboveBase: + type: number + type: object type: object numInstances: description: |- @@ -4597,6 +4613,15 @@ spec: Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below. properties: + confidentialInstanceConfig: + description: Confidential Instance Config for clusters + using Confidential VMs + properties: + enableConfidentialCompute: + description: Defines whether the instance should have + confidential compute enabled. + type: boolean + type: object internalIpOnly: description: |- By default, clusters are not restricted to internal IP addresses, @@ -4959,6 +4984,13 @@ spec: type: number type: object type: array + provisioningModelMix: + properties: + standardCapacityBase: + type: number + standardCapacityPercentAboveBase: + type: number + type: object type: object numInstances: description: |- @@ -5752,6 +5784,15 @@ spec: Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below. properties: + confidentialInstanceConfig: + description: Confidential Instance Config for clusters + using Confidential VMs + properties: + enableConfidentialCompute: + description: Defines whether the instance should have + confidential compute enabled. + type: boolean + type: object internalIpOnly: description: |- By default, clusters are not restricted to internal IP addresses, @@ -6061,6 +6102,13 @@ spec: type: number type: object type: array + provisioningModelMix: + properties: + standardCapacityBase: + type: number + standardCapacityPercentAboveBase: + type: number + type: object type: object instanceNames: description: |- diff --git a/package/crds/dataproc.gcp.upbound.io_metastoreservices.yaml b/package/crds/dataproc.gcp.upbound.io_metastoreservices.yaml index 581ec82d0..3b5a6cebe 100644 --- a/package/crds/dataproc.gcp.upbound.io_metastoreservices.yaml +++ b/package/crds/dataproc.gcp.upbound.io_metastoreservices.yaml @@ -2510,6 +2510,10 @@ spec: Default value is MYSQL. Possible values are: MYSQL, SPANNER. type: string + deletionProtection: + description: Indicates if the dataproc metastore should be protected + against accidental deletions. + type: boolean effectiveLabels: additionalProperties: type: string diff --git a/package/crds/dataproc.gcp.upbound.io_workflowtemplates.yaml b/package/crds/dataproc.gcp.upbound.io_workflowtemplates.yaml index d2fb2ff84..637c93aa7 100644 --- a/package/crds/dataproc.gcp.upbound.io_workflowtemplates.yaml +++ b/package/crds/dataproc.gcp.upbound.io_workflowtemplates.yaml @@ -5517,7 +5517,7 @@ spec: additionalProperties: type: string description: The Compute Engine metadata entries - to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + to add to all instances (see About VM metadata). type: object x-kubernetes-map-type: granular network: @@ -5608,7 +5608,7 @@ spec: type: string tags: description: The Compute Engine tags to add to - all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + all instances (see Manage tags for resources). items: type: string type: array @@ -5644,12 +5644,12 @@ spec: executionTimeout: description: Amount of time executable has to complete. Default is 10 minutes (see JSON - representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). - Cluster creation fails with an explanatory - error message (the name of the executable - that caused the error and the exceeded timeout - period) if the executable is not completed - at end of the timeout period. + representation of JSON Mapping - Language + Guide (proto 3)). Cluster creation fails with + an explanatory error message (the name of + the executable that caused the error and the + exceeded timeout period) if the executable + is not completed at end of the timeout period. type: string type: object type: array @@ -5658,14 +5658,15 @@ spec: properties: autoDeleteTime: description: The time when cluster will be auto-deleted - (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + (see JSON representation of JSON Mapping - Language + Guide (proto 3)). type: string autoDeleteTtl: description: The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation - of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + of JSON Mapping - Language Guide (proto 3)). type: string idleDeleteTtl: description: The duration to keep the cluster @@ -5673,7 +5674,7 @@ spec: Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of - (https://developers.google.com/protocol-buffers/docs/proto3#json). + JSON Mapping - Language Guide (proto 3). type: string type: object masterConfig: @@ -5741,7 +5742,7 @@ spec: type: string minCpuPlatform: description: Specifies the minimum cpu platform - for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + for the Instance Group. See Minimum CPU platform. type: string numInstances: description: The number of VM instances in the @@ -5822,7 +5823,7 @@ spec: type: string minCpuPlatform: description: Specifies the minimum cpu platform - for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + for the Instance Group. See Minimum CPU platform. type: string numInstances: description: The number of VM instances in the @@ -5968,7 +5969,8 @@ spec: (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage - this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + this project-level, per-location bucket (see Dataproc + staging and temp buckets). type: string tempBucket: description: A Cloud Storage bucket used to store @@ -6047,7 +6049,7 @@ spec: type: string minCpuPlatform: description: Specifies the minimum cpu platform - for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + for the Instance Group. See Minimum CPU platform. type: string numInstances: description: The number of VM instances in the @@ -6800,7 +6802,7 @@ spec: additionalProperties: type: string description: The Compute Engine metadata entries - to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + to add to all instances (see About VM metadata). type: object x-kubernetes-map-type: granular network: @@ -6891,7 +6893,7 @@ spec: type: string tags: description: The Compute Engine tags to add to - all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + all instances (see Manage tags for resources). items: type: string type: array @@ -6927,12 +6929,12 @@ spec: executionTimeout: description: Amount of time executable has to complete. Default is 10 minutes (see JSON - representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). - Cluster creation fails with an explanatory - error message (the name of the executable - that caused the error and the exceeded timeout - period) if the executable is not completed - at end of the timeout period. + representation of JSON Mapping - Language + Guide (proto 3)). Cluster creation fails with + an explanatory error message (the name of + the executable that caused the error and the + exceeded timeout period) if the executable + is not completed at end of the timeout period. type: string type: object type: array @@ -6941,14 +6943,15 @@ spec: properties: autoDeleteTime: description: The time when cluster will be auto-deleted - (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + (see JSON representation of JSON Mapping - Language + Guide (proto 3)). type: string autoDeleteTtl: description: The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation - of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + of JSON Mapping - Language Guide (proto 3)). type: string idleDeleteTtl: description: The duration to keep the cluster @@ -6956,7 +6959,7 @@ spec: Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of - (https://developers.google.com/protocol-buffers/docs/proto3#json). + JSON Mapping - Language Guide (proto 3). type: string type: object masterConfig: @@ -7024,7 +7027,7 @@ spec: type: string minCpuPlatform: description: Specifies the minimum cpu platform - for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + for the Instance Group. See Minimum CPU platform. type: string numInstances: description: The number of VM instances in the @@ -7105,7 +7108,7 @@ spec: type: string minCpuPlatform: description: Specifies the minimum cpu platform - for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + for the Instance Group. See Minimum CPU platform. type: string numInstances: description: The number of VM instances in the @@ -7251,7 +7254,8 @@ spec: (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage - this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + this project-level, per-location bucket (see Dataproc + staging and temp buckets). type: string tempBucket: description: A Cloud Storage bucket used to store @@ -7330,7 +7334,7 @@ spec: type: string minCpuPlatform: description: Specifies the minimum cpu platform - for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + for the Instance Group. See Minimum CPU platform. type: string numInstances: description: The number of VM instances in the @@ -8271,7 +8275,7 @@ spec: additionalProperties: type: string description: The Compute Engine metadata entries - to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + to add to all instances (see About VM metadata). type: object x-kubernetes-map-type: granular network: @@ -8362,7 +8366,7 @@ spec: type: string tags: description: The Compute Engine tags to add to - all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + all instances (see Manage tags for resources). items: type: string type: array @@ -8398,12 +8402,12 @@ spec: executionTimeout: description: Amount of time executable has to complete. Default is 10 minutes (see JSON - representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). - Cluster creation fails with an explanatory - error message (the name of the executable - that caused the error and the exceeded timeout - period) if the executable is not completed - at end of the timeout period. + representation of JSON Mapping - Language + Guide (proto 3)). Cluster creation fails with + an explanatory error message (the name of + the executable that caused the error and the + exceeded timeout period) if the executable + is not completed at end of the timeout period. type: string type: object type: array @@ -8412,14 +8416,15 @@ spec: properties: autoDeleteTime: description: The time when cluster will be auto-deleted - (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + (see JSON representation of JSON Mapping - Language + Guide (proto 3)). type: string autoDeleteTtl: description: The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation - of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + of JSON Mapping - Language Guide (proto 3)). type: string idleDeleteTtl: description: The duration to keep the cluster @@ -8427,13 +8432,14 @@ spec: Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of - (https://developers.google.com/protocol-buffers/docs/proto3#json). + JSON Mapping - Language Guide (proto 3). type: string idleStartTime: description: Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON - representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + representation of JSON Mapping - Language Guide + (proto 3)). type: string type: object masterConfig: @@ -8543,7 +8549,7 @@ spec: type: array minCpuPlatform: description: Specifies the minimum cpu platform - for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + for the Instance Group. See Minimum CPU platform. type: string numInstances: description: The number of VM instances in the @@ -8666,7 +8672,7 @@ spec: type: array minCpuPlatform: description: Specifies the minimum cpu platform - for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + for the Instance Group. See Minimum CPU platform. type: string numInstances: description: The number of VM instances in the @@ -8812,7 +8818,8 @@ spec: (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage - this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + this project-level, per-location bucket (see Dataproc + staging and temp buckets). type: string tempBucket: description: A Cloud Storage bucket used to store @@ -8933,7 +8940,7 @@ spec: type: array minCpuPlatform: description: Specifies the minimum cpu platform - for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + for the Instance Group. See Minimum CPU platform. type: string numInstances: description: The number of VM instances in the diff --git a/package/crds/datastore.gcp.upbound.io_indices.yaml b/package/crds/datastore.gcp.upbound.io_indices.yaml deleted file mode 100644 index 8d9b9df8c..000000000 --- a/package/crds/datastore.gcp.upbound.io_indices.yaml +++ /dev/null @@ -1,425 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.14.0 - name: indices.datastore.gcp.upbound.io -spec: - group: datastore.gcp.upbound.io - names: - categories: - - crossplane - - managed - - gcp - kind: Index - listKind: IndexList - plural: indices - singular: index - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Synced')].status - name: SYNCED - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: READY - type: string - - jsonPath: .metadata.annotations.crossplane\.io/external-name - name: EXTERNAL-NAME - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: Index is the Schema for the Indexs API. Describes a composite - index for Firestore in Datastore Mode. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IndexSpec defines the desired state of Index - properties: - deletionPolicy: - default: Delete - description: |- - DeletionPolicy specifies what will happen to the underlying external - when this managed resource is deleted - either "Delete" or "Orphan" the - external resource. - This field is planned to be deprecated in favor of the ManagementPolicies - field in a future release. Currently, both could be set independently and - non-default values would be honored if the feature flag is enabled. - See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 - enum: - - Orphan - - Delete - type: string - forProvider: - properties: - ancestor: - description: |- - Policy for including ancestors in the index. - Default value is NONE. - Possible values are: NONE, ALL_ANCESTORS. - type: string - kind: - description: The entity kind which the index applies to. - type: string - project: - description: |- - The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - type: string - properties: - description: |- - An ordered list of properties to index on. - Structure is documented below. - items: - properties: - direction: - description: |- - The direction the index should optimize for sorting. - Possible values are: ASCENDING, DESCENDING. - type: string - name: - description: The property name to index. - type: string - type: object - type: array - type: object - initProvider: - description: |- - THIS IS A BETA FIELD. It will be honored - unless the Management Policies feature flag is disabled. - InitProvider holds the same fields as ForProvider, with the exception - of Identifier and other resource reference fields. The fields that are - in InitProvider are merged into ForProvider when the resource is created. - The same fields are also added to the terraform ignore_changes hook, to - avoid updating them after creation. This is useful for fields that are - required on creation, but we do not desire to update them after creation, - for example because of an external controller is managing them, like an - autoscaler. - properties: - ancestor: - description: |- - Policy for including ancestors in the index. - Default value is NONE. - Possible values are: NONE, ALL_ANCESTORS. - type: string - kind: - description: The entity kind which the index applies to. - type: string - project: - description: |- - The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - type: string - properties: - description: |- - An ordered list of properties to index on. - Structure is documented below. - items: - properties: - direction: - description: |- - The direction the index should optimize for sorting. - Possible values are: ASCENDING, DESCENDING. - type: string - name: - description: The property name to index. - type: string - type: object - type: array - type: object - managementPolicies: - default: - - '*' - description: |- - THIS IS A BETA FIELD. It is on by default but can be opted out - through a Crossplane feature flag. - ManagementPolicies specify the array of actions Crossplane is allowed to - take on the managed and external resources. - This field is planned to replace the DeletionPolicy field in a future - release. Currently, both could be set independently and non-default - values would be honored if the feature flag is enabled. If both are - custom, the DeletionPolicy field will be ignored. - See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 - and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md - items: - description: |- - A ManagementAction represents an action that the Crossplane controllers - can take on an external resource. - enum: - - Observe - - Create - - Update - - Delete - - LateInitialize - - '*' - type: string - type: array - providerConfigRef: - default: - name: default - description: |- - ProviderConfigReference specifies how the provider that will be used to - create, observe, update, and delete this managed resource should be - configured. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - publishConnectionDetailsTo: - description: |- - PublishConnectionDetailsTo specifies the connection secret config which - contains a name, metadata and a reference to secret store config to - which any connection details for this managed resource should be written. - Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - properties: - configRef: - default: - name: default - description: |- - SecretStoreConfigRef specifies which secret store config should be used - for this ConnectionSecret. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - metadata: - description: Metadata is the metadata for connection secret. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are the annotations to be added to connection secret. - - For Kubernetes secrets, this will be used as "metadata.annotations". - - It is up to Secret Store implementation for others store types. - type: object - labels: - additionalProperties: - type: string - description: |- - Labels are the labels/tags to be added to connection secret. - - For Kubernetes secrets, this will be used as "metadata.labels". - - It is up to Secret Store implementation for others store types. - type: object - type: - description: |- - Type is the SecretType for the connection secret. - - Only valid for Kubernetes Secret Stores. - type: string - type: object - name: - description: Name is the name of the connection secret. - type: string - required: - - name - type: object - writeConnectionSecretToRef: - description: |- - WriteConnectionSecretToReference specifies the namespace and name of a - Secret to which any connection details for this managed resource should - be written. Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - This field is planned to be replaced in a future release in favor of - PublishConnectionDetailsTo. Currently, both could be set independently - and connection details would be published to both without affecting - each other. - properties: - name: - description: Name of the secret. - type: string - namespace: - description: Namespace of the secret. - type: string - required: - - name - - namespace - type: object - required: - - forProvider - type: object - x-kubernetes-validations: - - message: spec.forProvider.kind is a required parameter - rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies - || ''Update'' in self.managementPolicies) || has(self.forProvider.kind) - || (has(self.initProvider) && has(self.initProvider.kind))' - status: - description: IndexStatus defines the observed state of Index. - properties: - atProvider: - properties: - ancestor: - description: |- - Policy for including ancestors in the index. - Default value is NONE. - Possible values are: NONE, ALL_ANCESTORS. - type: string - id: - description: an identifier for the resource with format projects/{{project}}/indexes/{{index_id}} - type: string - indexId: - description: The index id. - type: string - kind: - description: The entity kind which the index applies to. - type: string - project: - description: |- - The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - type: string - properties: - description: |- - An ordered list of properties to index on. - Structure is documented below. - items: - properties: - direction: - description: |- - The direction the index should optimize for sorting. - Possible values are: ASCENDING, DESCENDING. - type: string - name: - description: The property name to index. - type: string - type: object - type: array - type: object - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/package/crds/dialogflowcx.gcp.upbound.io_agents.yaml b/package/crds/dialogflowcx.gcp.upbound.io_agents.yaml index f5685376e..ea8028fe8 100644 --- a/package/crds/dialogflowcx.gcp.upbound.io_agents.yaml +++ b/package/crds/dialogflowcx.gcp.upbound.io_agents.yaml @@ -961,6 +961,50 @@ spec: description: Max length of DTMF digits. type: number type: object + loggingSettings: + description: 'Settings for logging. Settings for Dialogflow + History, Contact Center messages, StackDriver logs, and + speech logging. Exposed at the following levels:' + properties: + enableConsentBasedRedaction: + description: Enables consent-based end-user input redaction, + if true, a pre-defined session parameter $session.params.conversation-redaction + will be used to determine if the utterance should be + redacted. + type: boolean + enableInteractionLogging: + description: Enables DF Interaction logging. + type: boolean + enableStackdriverLogging: + description: Enables Google Cloud Logging. + type: boolean + type: object + speechSettings: + description: 'Settings for speech to text detection. Exposed + at the following levels:' + properties: + endpointerSensitivity: + description: Sensitivity of the speech model that detects + the end of speech. Scale from 0 to 100. + type: number + models: + additionalProperties: + type: string + description: |- + Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + type: object + x-kubernetes-map-type: granular + noSpeechTimeout: + description: |- + Timeout before detecting no speech. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + type: string + useTimeoutBasedEndpointing: + description: Use timeout based endpointing, interpreting + endpointer sensitivity as seconds of timeout value. + type: boolean + type: object type: object avatarUri: description: The URI of the agent's avatar. Avatars are used throughout @@ -1136,6 +1180,50 @@ spec: description: Max length of DTMF digits. type: number type: object + loggingSettings: + description: 'Settings for logging. Settings for Dialogflow + History, Contact Center messages, StackDriver logs, and + speech logging. Exposed at the following levels:' + properties: + enableConsentBasedRedaction: + description: Enables consent-based end-user input redaction, + if true, a pre-defined session parameter $session.params.conversation-redaction + will be used to determine if the utterance should be + redacted. + type: boolean + enableInteractionLogging: + description: Enables DF Interaction logging. + type: boolean + enableStackdriverLogging: + description: Enables Google Cloud Logging. + type: boolean + type: object + speechSettings: + description: 'Settings for speech to text detection. Exposed + at the following levels:' + properties: + endpointerSensitivity: + description: Sensitivity of the speech model that detects + the end of speech. Scale from 0 to 100. + type: number + models: + additionalProperties: + type: string + description: |- + Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + type: object + x-kubernetes-map-type: granular + noSpeechTimeout: + description: |- + Timeout before detecting no speech. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + type: string + useTimeoutBasedEndpointing: + description: Use timeout based endpointing, interpreting + endpointer sensitivity as seconds of timeout value. + type: boolean + type: object type: object avatarUri: description: The URI of the agent's avatar. Avatars are used throughout @@ -1487,6 +1575,50 @@ spec: description: Max length of DTMF digits. type: number type: object + loggingSettings: + description: 'Settings for logging. Settings for Dialogflow + History, Contact Center messages, StackDriver logs, and + speech logging. Exposed at the following levels:' + properties: + enableConsentBasedRedaction: + description: Enables consent-based end-user input redaction, + if true, a pre-defined session parameter $session.params.conversation-redaction + will be used to determine if the utterance should be + redacted. + type: boolean + enableInteractionLogging: + description: Enables DF Interaction logging. + type: boolean + enableStackdriverLogging: + description: Enables Google Cloud Logging. + type: boolean + type: object + speechSettings: + description: 'Settings for speech to text detection. Exposed + at the following levels:' + properties: + endpointerSensitivity: + description: Sensitivity of the speech model that detects + the end of speech. Scale from 0 to 100. + type: number + models: + additionalProperties: + type: string + description: |- + Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + type: object + x-kubernetes-map-type: granular + noSpeechTimeout: + description: |- + Timeout before detecting no speech. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + type: string + useTimeoutBasedEndpointing: + description: Use timeout based endpointing, interpreting + endpointer sensitivity as seconds of timeout value. + type: boolean + type: object type: object avatarUri: description: The URI of the agent's avatar. Avatars are used throughout diff --git a/package/crds/dialogflowcx.gcp.upbound.io_flows.yaml b/package/crds/dialogflowcx.gcp.upbound.io_flows.yaml index 3a575ceef..4bfada5d2 100644 --- a/package/crds/dialogflowcx.gcp.upbound.io_flows.yaml +++ b/package/crds/dialogflowcx.gcp.upbound.io_flows.yaml @@ -2159,6 +2159,50 @@ spec: description: Max length of DTMF digits. type: number type: object + loggingSettings: + description: 'Settings for logging. Settings for Dialogflow + History, Contact Center messages, StackDriver logs, and + speech logging. Exposed at the following levels:' + properties: + enableConsentBasedRedaction: + description: Enables consent-based end-user input redaction, + if true, a pre-defined session parameter $session.params.conversation-redaction + will be used to determine if the utterance should be + redacted. + type: boolean + enableInteractionLogging: + description: Enables DF Interaction logging. + type: boolean + enableStackdriverLogging: + description: Enables Google Cloud Logging. + type: boolean + type: object + speechSettings: + description: 'Settings for speech to text detection. Exposed + at the following levels:' + properties: + endpointerSensitivity: + description: Sensitivity of the speech model that detects + the end of speech. Scale from 0 to 100. + type: number + models: + additionalProperties: + type: string + description: |- + Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + type: object + x-kubernetes-map-type: granular + noSpeechTimeout: + description: |- + Timeout before detecting no speech. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + type: string + useTimeoutBasedEndpointing: + description: Use timeout based endpointing, interpreting + endpointer sensitivity as seconds of timeout value. + type: boolean + type: object type: object description: description: The description of the flow. The maximum length is @@ -2747,6 +2791,50 @@ spec: description: Max length of DTMF digits. type: number type: object + loggingSettings: + description: 'Settings for logging. Settings for Dialogflow + History, Contact Center messages, StackDriver logs, and + speech logging. Exposed at the following levels:' + properties: + enableConsentBasedRedaction: + description: Enables consent-based end-user input redaction, + if true, a pre-defined session parameter $session.params.conversation-redaction + will be used to determine if the utterance should be + redacted. + type: boolean + enableInteractionLogging: + description: Enables DF Interaction logging. + type: boolean + enableStackdriverLogging: + description: Enables Google Cloud Logging. + type: boolean + type: object + speechSettings: + description: 'Settings for speech to text detection. Exposed + at the following levels:' + properties: + endpointerSensitivity: + description: Sensitivity of the speech model that detects + the end of speech. Scale from 0 to 100. + type: number + models: + additionalProperties: + type: string + description: |- + Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + type: object + x-kubernetes-map-type: granular + noSpeechTimeout: + description: |- + Timeout before detecting no speech. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + type: string + useTimeoutBasedEndpointing: + description: Use timeout based endpointing, interpreting + endpointer sensitivity as seconds of timeout value. + type: boolean + type: object type: object description: description: The description of the flow. The maximum length is @@ -3499,6 +3587,50 @@ spec: description: Max length of DTMF digits. type: number type: object + loggingSettings: + description: 'Settings for logging. Settings for Dialogflow + History, Contact Center messages, StackDriver logs, and + speech logging. Exposed at the following levels:' + properties: + enableConsentBasedRedaction: + description: Enables consent-based end-user input redaction, + if true, a pre-defined session parameter $session.params.conversation-redaction + will be used to determine if the utterance should be + redacted. + type: boolean + enableInteractionLogging: + description: Enables DF Interaction logging. + type: boolean + enableStackdriverLogging: + description: Enables Google Cloud Logging. + type: boolean + type: object + speechSettings: + description: 'Settings for speech to text detection. Exposed + at the following levels:' + properties: + endpointerSensitivity: + description: Sensitivity of the speech model that detects + the end of speech. Scale from 0 to 100. + type: number + models: + additionalProperties: + type: string + description: |- + Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see Speech models. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + type: object + x-kubernetes-map-type: granular + noSpeechTimeout: + description: |- + Timeout before detecting no speech. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + type: string + useTimeoutBasedEndpointing: + description: Use timeout based endpointing, interpreting + endpointer sensitivity as seconds of timeout value. + type: boolean + type: object type: object description: description: The description of the flow. The maximum length is diff --git a/package/crds/dns.gcp.upbound.io_recordsets.yaml b/package/crds/dns.gcp.upbound.io_recordsets.yaml index 56dcfbe18..5a8f344b7 100644 --- a/package/crds/dns.gcp.upbound.io_recordsets.yaml +++ b/package/crds/dns.gcp.upbound.io_recordsets.yaml @@ -2202,6 +2202,12 @@ spec: The list of targets to be health checked. Note that if DNSSEC is enabled for this zone, only one of rrdatas or health_checked_targets can be set. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. @@ -2254,6 +2260,86 @@ spec: type: array type: object type: array + healthCheck: + description: Specifies the health check (used with external + endpoints). + type: string + healthCheckRef: + description: Reference to a HealthCheck in compute to populate + healthCheck. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + healthCheckSelector: + description: Selector for a HealthCheck in compute to populate + healthCheck. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object primaryBackup: description: |- The configuration for a failover policy with global to regional failover. Queries are responded to with the global primary targets, but if none of the primary targets are healthy, then we fallback to a regional failover policy. @@ -2270,6 +2356,12 @@ spec: The list of targets to be health checked. Note that if DNSSEC is enabled for this zone, only one of rrdatas or health_checked_targets can be set. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. @@ -2335,6 +2427,12 @@ spec: The list of global primary targets to be health checked. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. @@ -2698,6 +2796,12 @@ spec: The list of targets to be health checked. Note that if DNSSEC is enabled for this zone, only one of rrdatas or health_checked_targets can be set. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. @@ -2885,6 +2989,12 @@ spec: The list of targets to be health checked. Note that if DNSSEC is enabled for this zone, only one of rrdatas or health_checked_targets can be set. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. @@ -2937,6 +3047,86 @@ spec: type: array type: object type: array + healthCheck: + description: Specifies the health check (used with external + endpoints). + type: string + healthCheckRef: + description: Reference to a HealthCheck in compute to populate + healthCheck. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + healthCheckSelector: + description: Selector for a HealthCheck in compute to populate + healthCheck. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object primaryBackup: description: |- The configuration for a failover policy with global to regional failover. Queries are responded to with the global primary targets, but if none of the primary targets are healthy, then we fallback to a regional failover policy. @@ -2953,6 +3143,12 @@ spec: The list of targets to be health checked. Note that if DNSSEC is enabled for this zone, only one of rrdatas or health_checked_targets can be set. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. @@ -3018,6 +3214,12 @@ spec: The list of global primary targets to be health checked. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. @@ -3381,6 +3583,12 @@ spec: The list of targets to be health checked. Note that if DNSSEC is enabled for this zone, only one of rrdatas or health_checked_targets can be set. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. @@ -3665,6 +3873,12 @@ spec: The list of targets to be health checked. Note that if DNSSEC is enabled for this zone, only one of rrdatas or health_checked_targets can be set. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. @@ -3717,6 +3931,10 @@ spec: type: array type: object type: array + healthCheck: + description: Specifies the health check (used with external + endpoints). + type: string primaryBackup: description: |- The configuration for a failover policy with global to regional failover. Queries are responded to with the global primary targets, but if none of the primary targets are healthy, then we fallback to a regional failover policy. @@ -3733,6 +3951,12 @@ spec: The list of targets to be health checked. Note that if DNSSEC is enabled for this zone, only one of rrdatas or health_checked_targets can be set. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. @@ -3798,6 +4022,12 @@ spec: The list of global primary targets to be health checked. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. @@ -3857,6 +4087,12 @@ spec: The list of targets to be health checked. Note that if DNSSEC is enabled for this zone, only one of rrdatas or health_checked_targets can be set. Structure is documented below. properties: + externalEndpoints: + description: The list of external endpoint addresses + to health check. + items: + type: string + type: array internalLoadBalancers: description: |- The list of internal load balancers to health check. diff --git a/package/crds/filestore.gcp.upbound.io_backups.yaml b/package/crds/filestore.gcp.upbound.io_backups.yaml index 2951eac31..6f8af00cd 100644 --- a/package/crds/filestore.gcp.upbound.io_backups.yaml +++ b/package/crds/filestore.gcp.upbound.io_backups.yaml @@ -177,6 +177,16 @@ spec: type: string type: object type: object + tags: + additionalProperties: + type: string + description: |- + A map of resource manager tags. + Resource manager tag keys and values have the same definition as resource manager tags. + Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/{tag_value_id}. + The field is ignored (both PUT & PATCH) when empty. + type: object + x-kubernetes-map-type: granular required: - location type: object @@ -293,6 +303,16 @@ spec: type: string type: object type: object + tags: + additionalProperties: + type: string + description: |- + A map of resource manager tags. + Resource manager tag keys and values have the same definition as resource manager tags. + Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/{tag_value_id}. + The field is ignored (both PUT & PATCH) when empty. + type: object + x-kubernetes-map-type: granular type: object managementPolicies: default: @@ -535,6 +555,16 @@ spec: share storage, this number is expected to change with backup creation/deletion. type: string + tags: + additionalProperties: + type: string + description: |- + A map of resource manager tags. + Resource manager tag keys and values have the same definition as resource manager tags. + Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/{tag_value_id}. + The field is ignored (both PUT & PATCH) when empty. + type: object + x-kubernetes-map-type: granular terraformLabels: additionalProperties: type: string diff --git a/package/crds/filestore.gcp.upbound.io_instances.yaml b/package/crds/filestore.gcp.upbound.io_instances.yaml index 236109894..75ee40ce9 100644 --- a/package/crds/filestore.gcp.upbound.io_instances.yaml +++ b/package/crds/filestore.gcp.upbound.io_instances.yaml @@ -955,6 +955,13 @@ spec: type: string forProvider: properties: + deletionProtectionEnabled: + description: Indicates whether the instance is protected against + deletion. + type: boolean + deletionProtectionReason: + description: The reason for enabling deletion protection. + type: string description: description: A description of the instance. type: string @@ -1143,11 +1150,55 @@ spec: type: string type: object type: array + performanceConfig: + description: |- + Performance configuration for the instance. If not provided, + the default performance settings will be used. + Structure is documented below. + properties: + fixedIops: + description: |- + The instance will have a fixed provisioned IOPS value, + which will remain constant regardless of instance + capacity. + Structure is documented below. + properties: + maxIops: + description: |- + The number of IOPS to provision for the instance. + max_iops must be in multiple of 1000. + type: number + type: object + iopsPerTb: + description: |- + The instance provisioned IOPS will change dynamically + based on the capacity of the instance. + Structure is documented below. + properties: + maxIopsPerTb: + description: |- + The instance max IOPS will be calculated by multiplying + the capacity of the instance (TB) by max_iops_per_tb, + and rounding to the nearest 1000. The instance max IOPS + will be changed dynamically based on the instance + capacity. + type: number + type: object + type: object project: description: |- The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + protocol: + description: |- + Either NFSv3, for using NFS version 3 as file sharing protocol, + or NFSv4.1, for using NFS version 4.1 as file sharing protocol. + NFSv4.1 can be used with HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE. + The default is NFSv3. + Default value is NFS_V3. + Possible values are: NFS_V3, NFS_V4_1. + type: string tier: description: |- The service tier of the instance. @@ -1170,6 +1221,13 @@ spec: for example because of an external controller is managing them, like an autoscaler. properties: + deletionProtectionEnabled: + description: Indicates whether the instance is protected against + deletion. + type: boolean + deletionProtectionReason: + description: The reason for enabling deletion protection. + type: string description: description: A description of the instance. type: string @@ -1354,11 +1412,55 @@ spec: type: string type: object type: array + performanceConfig: + description: |- + Performance configuration for the instance. If not provided, + the default performance settings will be used. + Structure is documented below. + properties: + fixedIops: + description: |- + The instance will have a fixed provisioned IOPS value, + which will remain constant regardless of instance + capacity. + Structure is documented below. + properties: + maxIops: + description: |- + The number of IOPS to provision for the instance. + max_iops must be in multiple of 1000. + type: number + type: object + iopsPerTb: + description: |- + The instance provisioned IOPS will change dynamically + based on the capacity of the instance. + Structure is documented below. + properties: + maxIopsPerTb: + description: |- + The instance max IOPS will be calculated by multiplying + the capacity of the instance (TB) by max_iops_per_tb, + and rounding to the nearest 1000. The instance max IOPS + will be changed dynamically based on the instance + capacity. + type: number + type: object + type: object project: description: |- The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + protocol: + description: |- + Either NFSv3, for using NFS version 3 as file sharing protocol, + or NFSv4.1, for using NFS version 4.1 as file sharing protocol. + NFSv4.1 can be used with HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE. + The default is NFSv3. + Default value is NFS_V3. + Possible values are: NFS_V3, NFS_V4_1. + type: string tier: description: |- The service tier of the instance. @@ -1556,6 +1658,13 @@ spec: createTime: description: Creation timestamp in RFC3339 text format. type: string + deletionProtectionEnabled: + description: Indicates whether the instance is protected against + deletion. + type: boolean + deletionProtectionReason: + description: The reason for enabling deletion protection. + type: string description: description: A description of the instance. type: string @@ -1690,11 +1799,55 @@ spec: type: string type: object type: array + performanceConfig: + description: |- + Performance configuration for the instance. If not provided, + the default performance settings will be used. + Structure is documented below. + properties: + fixedIops: + description: |- + The instance will have a fixed provisioned IOPS value, + which will remain constant regardless of instance + capacity. + Structure is documented below. + properties: + maxIops: + description: |- + The number of IOPS to provision for the instance. + max_iops must be in multiple of 1000. + type: number + type: object + iopsPerTb: + description: |- + The instance provisioned IOPS will change dynamically + based on the capacity of the instance. + Structure is documented below. + properties: + maxIopsPerTb: + description: |- + The instance max IOPS will be calculated by multiplying + the capacity of the instance (TB) by max_iops_per_tb, + and rounding to the nearest 1000. The instance max IOPS + will be changed dynamically based on the instance + capacity. + type: number + type: object + type: object project: description: |- The ID of the project in which the resource belongs. If it is not provided, the provider project is used. type: string + protocol: + description: |- + Either NFSv3, for using NFS version 3 as file sharing protocol, + or NFSv4.1, for using NFS version 4.1 as file sharing protocol. + NFSv4.1 can be used with HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE. + The default is NFSv3. + Default value is NFS_V3. + Possible values are: NFS_V3, NFS_V4_1. + type: string terraformLabels: additionalProperties: type: string diff --git a/package/crds/gkehub.gcp.upbound.io_memberships.yaml b/package/crds/gkehub.gcp.upbound.io_memberships.yaml index 05625926e..c8866b4a0 100644 --- a/package/crds/gkehub.gcp.upbound.io_memberships.yaml +++ b/package/crds/gkehub.gcp.upbound.io_memberships.yaml @@ -716,7 +716,7 @@ spec: issuer: description: |- A JSON Web Token (JWT) issuer URI. issuer must start with https:// and // be a valid - with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster (must be locations rather than zones).googleapis.com/v1/${google_container_cluster.my-cluster.id}". + with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster.googleapis.com/v1/${google_container_cluster.my-cluster.id}". type: string type: object endpoint: @@ -732,7 +732,7 @@ spec: resourceLink: description: |- Self-link of the GCP resource for the GKE cluster. - For example: //container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster. + For example: //container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster. It can be at the most 1000 characters in length.googleapis.com/${google_container_cluster.my-cluster.id}" or google_container_cluster.my-cluster.id. type: string @@ -854,7 +854,7 @@ spec: issuer: description: |- A JSON Web Token (JWT) issuer URI. issuer must start with https:// and // be a valid - with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster (must be locations rather than zones).googleapis.com/v1/${google_container_cluster.my-cluster.id}". + with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster.googleapis.com/v1/${google_container_cluster.my-cluster.id}". type: string type: object endpoint: @@ -870,7 +870,7 @@ spec: resourceLink: description: |- Self-link of the GCP resource for the GKE cluster. - For example: //container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster. + For example: //container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster. It can be at the most 1000 characters in length.googleapis.com/${google_container_cluster.my-cluster.id}" or google_container_cluster.my-cluster.id. type: string @@ -1146,7 +1146,7 @@ spec: issuer: description: |- A JSON Web Token (JWT) issuer URI. issuer must start with https:// and // be a valid - with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster (must be locations rather than zones).googleapis.com/v1/${google_container_cluster.my-cluster.id}". + with length <2000 characters. For example: https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster.googleapis.com/v1/${google_container_cluster.my-cluster.id}". type: string type: object effectiveLabels: @@ -1168,7 +1168,7 @@ spec: resourceLink: description: |- Self-link of the GCP resource for the GKE cluster. - For example: //container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster. + For example: //container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster. It can be at the most 1000 characters in length.googleapis.com/${google_container_cluster.my-cluster.id}" or google_container_cluster.my-cluster.id. type: string diff --git a/package/crds/healthcare.gcp.upbound.io_datasets.yaml b/package/crds/healthcare.gcp.upbound.io_datasets.yaml index 0aa96876a..9a983081c 100644 --- a/package/crds/healthcare.gcp.upbound.io_datasets.yaml +++ b/package/crds/healthcare.gcp.upbound.io_datasets.yaml @@ -74,7 +74,7 @@ spec: properties: encryptionSpec: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: kmsKeyName: @@ -192,7 +192,7 @@ spec: properties: encryptionSpec: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: kmsKeyName: @@ -478,7 +478,7 @@ spec: properties: encryptionSpec: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: kmsKeyName: diff --git a/package/crds/iam.gcp.upbound.io_workloadidentitypoolproviders.yaml b/package/crds/iam.gcp.upbound.io_workloadidentitypoolproviders.yaml index 7d1c7f0d5..0f051aab6 100644 --- a/package/crds/iam.gcp.upbound.io_workloadidentitypoolproviders.yaml +++ b/package/crds/iam.gcp.upbound.io_workloadidentitypoolproviders.yaml @@ -919,6 +919,53 @@ spec: type: string type: object type: object + x509: + description: |- + An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + Structure is documented below. + properties: + trustStore: + description: |- + A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + Structure is documented below. + properties: + intermediateCas: + description: |- + Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + Structure is documented below. + items: + properties: + pemCertificate: + description: |- + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + type: string + type: object + type: array + trustAnchors: + description: |- + List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + Structure is documented below. + items: + properties: + pemCertificate: + description: |- + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + type: string + type: object + type: array + type: object + type: object type: object initProvider: description: |- @@ -1018,6 +1065,53 @@ spec: xml doc. type: string type: object + x509: + description: |- + An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + Structure is documented below. + properties: + trustStore: + description: |- + A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + Structure is documented below. + properties: + intermediateCas: + description: |- + Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + Structure is documented below. + items: + properties: + pemCertificate: + description: |- + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + type: string + type: object + type: array + trustAnchors: + description: |- + List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + Structure is documented below. + items: + properties: + pemCertificate: + description: |- + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + type: string + type: object + type: array + type: object + type: object type: object managementPolicies: default: @@ -1294,6 +1388,53 @@ spec: value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix gcp- is reserved for use by Google, and may not be specified. type: string + x509: + description: |- + An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + Structure is documented below. + properties: + trustStore: + description: |- + A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + Structure is documented below. + properties: + intermediateCas: + description: |- + Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + Structure is documented below. + items: + properties: + pemCertificate: + description: |- + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + type: string + type: object + type: array + trustAnchors: + description: |- + List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + Structure is documented below. + items: + properties: + pemCertificate: + description: |- + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + type: string + type: object + type: array + type: object + type: object type: object conditions: description: Conditions of the resource. diff --git a/package/crds/identityplatform.gcp.upbound.io_configs.yaml b/package/crds/identityplatform.gcp.upbound.io_configs.yaml new file mode 100644 index 000000000..688109ada --- /dev/null +++ b/package/crds/identityplatform.gcp.upbound.io_configs.yaml @@ -0,0 +1,1297 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: configs.identityplatform.gcp.upbound.io +spec: + group: identityplatform.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: Config + listKind: ConfigList + plural: configs + singular: config + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Config is the Schema for the Configs API. Identity Platform configuration + for a Cloud project. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConfigSpec defines the desired state of Config + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authorizedDomains: + description: List of domains authorized for OAuth redirects. + items: + type: string + type: array + autodeleteAnonymousUsers: + description: Whether anonymous users will be auto-deleted after + a period of 30 days + type: boolean + blockingFunctions: + description: |- + Configuration related to blocking functions. + Structure is documented below. + properties: + forwardInboundCredentials: + description: |- + The user credentials to include in the JWT payload that is sent to the registered Blocking Functions. + Structure is documented below. + properties: + accessToken: + description: Whether to pass the user's OAuth identity + provider's access token. + type: boolean + idToken: + description: Whether to pass the user's OIDC identity + provider's ID token. + type: boolean + refreshToken: + description: Whether to pass the user's OAuth identity + provider's refresh token. + type: boolean + type: object + triggers: + description: |- + Map of Trigger to event type. Key should be one of the supported event types: "beforeCreate", "beforeSignIn". + Structure is documented below. + items: + properties: + eventType: + description: The identifier for this object. Format + specified above. + type: string + functionUri: + description: HTTP URI trigger for the Cloud Function. + type: string + type: object + type: array + type: object + client: + description: |- + Options related to how clients making requests on behalf of a project should be configured. + Structure is documented below. + properties: + permissions: + description: |- + Configuration related to restricting a user's ability to affect their account. + Structure is documented below. + properties: + disabledUserDeletion: + description: When true, end users cannot delete their + account on the associated project through any of our + API methods + type: boolean + disabledUserSignup: + description: When true, end users cannot sign up for a + new account on the associated project through any of + our API methods + type: boolean + type: object + type: object + mfa: + description: |- + Options related to how clients making requests on behalf of a project should be configured. + Structure is documented below. + properties: + enabledProviders: + description: |- + A list of usable second factors for this project. + Each value may be one of: PHONE_SMS. + items: + type: string + type: array + providerConfigs: + description: |- + A list of usable second factors for this project along with their configurations. + This field does not support phone based MFA, for that use the 'enabledProviders' field. + Structure is documented below. + items: + properties: + state: + description: |- + Whether MultiFactor Authentication has been enabled for this project. + Possible values are: DISABLED, ENABLED, MANDATORY. + type: string + totpProviderConfig: + description: |- + TOTP MFA provider config for this project. + Structure is documented below. + properties: + adjacentIntervals: + description: The allowed number of adjacent intervals + that will be used for verification to avoid clock + skew. + type: number + type: object + type: object + type: array + state: + description: |- + Whether MultiFactor Authentication has been enabled for this project. + Possible values are: DISABLED, ENABLED, MANDATORY. + type: string + type: object + monitoring: + description: |- + Configuration related to monitoring project activity. + Structure is documented below. + properties: + requestLogging: + description: |- + Configuration for logging requests made to this project to Stackdriver Logging + Structure is documented below. + properties: + enabled: + description: Whether logging is enabled for this project + or not. + type: boolean + type: object + type: object + multiTenant: + description: |- + Configuration related to multi-tenant functionality. + Structure is documented below. + properties: + allowTenants: + description: Whether this project can have tenants or not. + type: boolean + defaultTenantLocation: + description: |- + The default cloud parent org or folder that the tenant project should be created under. + The parent resource name should be in the format of "/", such as "folders/123" or "organizations/456". + If the value is not set, the tenant will be created under the same organization or folder as the agent project. + type: string + type: object + project: + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + type: string + projectRef: + description: Reference to a Project in cloudplatform to populate + project. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + projectSelector: + description: Selector for a Project in cloudplatform to populate + project. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + quota: + description: |- + Configuration related to quotas. + Structure is documented below. + properties: + signUpQuotaConfig: + description: |- + Quota for the Signup endpoint, if overwritten. Signup quota is measured in sign ups per project per hour per IP. None of quota, startTime, or quotaDuration can be skipped. + Structure is documented below. + properties: + quota: + description: A sign up APIs quota that customers can override + temporarily. Value can be in between 1 and 1000. + type: number + quotaDuration: + description: 'How long this quota will be active for. + It is measurred in seconds, e.g., Example: "9.615s".' + type: string + startTime: + description: When this quota will take affect. + type: string + type: object + type: object + signIn: + description: |- + Configuration related to local sign in methods. + Structure is documented below. + properties: + allowDuplicateEmails: + description: Whether to allow more than one account to have + the same email. + type: boolean + anonymous: + description: |- + Configuration options related to authenticating an anonymous user. + Structure is documented below. + properties: + enabled: + description: Whether phone number auth is enabled for + the project or not. + type: boolean + type: object + email: + description: |- + Configuration options related to authenticating a user by their email address. + Structure is documented below. + properties: + enabled: + description: Whether phone number auth is enabled for + the project or not. + type: boolean + passwordRequired: + description: |- + Whether a password is required for email auth or not. If true, both an email and + password must be provided to sign in. If false, a user may sign in via either + email/password or email link. + type: boolean + type: object + phoneNumber: + description: |- + Configuration options related to authenticated a user by their phone number. + Structure is documented below. + properties: + enabled: + description: Whether phone number auth is enabled for + the project or not. + type: boolean + testPhoneNumbers: + additionalProperties: + type: string + description: A map of that + can be used for phone auth testing. + type: object + x-kubernetes-map-type: granular + type: object + type: object + smsRegionConfig: + description: |- + Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. + Structure is documented below. + properties: + allowByDefault: + description: |- + A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. + Structure is documented below. + properties: + disallowedRegions: + description: 'Two letter unicode region codes to disallow + as defined by https://cldr.unicode.org/ The full list + of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json' + items: + type: string + type: array + type: object + allowlistOnly: + description: |- + A policy of only allowing regions by explicitly adding them to an allowlist. + Structure is documented below. + properties: + allowedRegions: + description: 'Two letter unicode region codes to allow + as defined by https://cldr.unicode.org/ The full list + of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json' + items: + type: string + type: array + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authorizedDomains: + description: List of domains authorized for OAuth redirects. + items: + type: string + type: array + autodeleteAnonymousUsers: + description: Whether anonymous users will be auto-deleted after + a period of 30 days + type: boolean + blockingFunctions: + description: |- + Configuration related to blocking functions. + Structure is documented below. + properties: + forwardInboundCredentials: + description: |- + The user credentials to include in the JWT payload that is sent to the registered Blocking Functions. + Structure is documented below. + properties: + accessToken: + description: Whether to pass the user's OAuth identity + provider's access token. + type: boolean + idToken: + description: Whether to pass the user's OIDC identity + provider's ID token. + type: boolean + refreshToken: + description: Whether to pass the user's OAuth identity + provider's refresh token. + type: boolean + type: object + triggers: + description: |- + Map of Trigger to event type. Key should be one of the supported event types: "beforeCreate", "beforeSignIn". + Structure is documented below. + items: + properties: + eventType: + description: The identifier for this object. Format + specified above. + type: string + functionUri: + description: HTTP URI trigger for the Cloud Function. + type: string + type: object + type: array + type: object + client: + description: |- + Options related to how clients making requests on behalf of a project should be configured. + Structure is documented below. + properties: + permissions: + description: |- + Configuration related to restricting a user's ability to affect their account. + Structure is documented below. + properties: + disabledUserDeletion: + description: When true, end users cannot delete their + account on the associated project through any of our + API methods + type: boolean + disabledUserSignup: + description: When true, end users cannot sign up for a + new account on the associated project through any of + our API methods + type: boolean + type: object + type: object + mfa: + description: |- + Options related to how clients making requests on behalf of a project should be configured. + Structure is documented below. + properties: + enabledProviders: + description: |- + A list of usable second factors for this project. + Each value may be one of: PHONE_SMS. + items: + type: string + type: array + providerConfigs: + description: |- + A list of usable second factors for this project along with their configurations. + This field does not support phone based MFA, for that use the 'enabledProviders' field. + Structure is documented below. + items: + properties: + state: + description: |- + Whether MultiFactor Authentication has been enabled for this project. + Possible values are: DISABLED, ENABLED, MANDATORY. + type: string + totpProviderConfig: + description: |- + TOTP MFA provider config for this project. + Structure is documented below. + properties: + adjacentIntervals: + description: The allowed number of adjacent intervals + that will be used for verification to avoid clock + skew. + type: number + type: object + type: object + type: array + state: + description: |- + Whether MultiFactor Authentication has been enabled for this project. + Possible values are: DISABLED, ENABLED, MANDATORY. + type: string + type: object + monitoring: + description: |- + Configuration related to monitoring project activity. + Structure is documented below. + properties: + requestLogging: + description: |- + Configuration for logging requests made to this project to Stackdriver Logging + Structure is documented below. + properties: + enabled: + description: Whether logging is enabled for this project + or not. + type: boolean + type: object + type: object + multiTenant: + description: |- + Configuration related to multi-tenant functionality. + Structure is documented below. + properties: + allowTenants: + description: Whether this project can have tenants or not. + type: boolean + defaultTenantLocation: + description: |- + The default cloud parent org or folder that the tenant project should be created under. + The parent resource name should be in the format of "/", such as "folders/123" or "organizations/456". + If the value is not set, the tenant will be created under the same organization or folder as the agent project. + type: string + type: object + project: + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + type: string + projectRef: + description: Reference to a Project in cloudplatform to populate + project. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + projectSelector: + description: Selector for a Project in cloudplatform to populate + project. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + quota: + description: |- + Configuration related to quotas. + Structure is documented below. + properties: + signUpQuotaConfig: + description: |- + Quota for the Signup endpoint, if overwritten. Signup quota is measured in sign ups per project per hour per IP. None of quota, startTime, or quotaDuration can be skipped. + Structure is documented below. + properties: + quota: + description: A sign up APIs quota that customers can override + temporarily. Value can be in between 1 and 1000. + type: number + quotaDuration: + description: 'How long this quota will be active for. + It is measurred in seconds, e.g., Example: "9.615s".' + type: string + startTime: + description: When this quota will take affect. + type: string + type: object + type: object + signIn: + description: |- + Configuration related to local sign in methods. + Structure is documented below. + properties: + allowDuplicateEmails: + description: Whether to allow more than one account to have + the same email. + type: boolean + anonymous: + description: |- + Configuration options related to authenticating an anonymous user. + Structure is documented below. + properties: + enabled: + description: Whether phone number auth is enabled for + the project or not. + type: boolean + type: object + email: + description: |- + Configuration options related to authenticating a user by their email address. + Structure is documented below. + properties: + enabled: + description: Whether phone number auth is enabled for + the project or not. + type: boolean + passwordRequired: + description: |- + Whether a password is required for email auth or not. If true, both an email and + password must be provided to sign in. If false, a user may sign in via either + email/password or email link. + type: boolean + type: object + phoneNumber: + description: |- + Configuration options related to authenticated a user by their phone number. + Structure is documented below. + properties: + enabled: + description: Whether phone number auth is enabled for + the project or not. + type: boolean + testPhoneNumbers: + additionalProperties: + type: string + description: A map of that + can be used for phone auth testing. + type: object + x-kubernetes-map-type: granular + type: object + type: object + smsRegionConfig: + description: |- + Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. + Structure is documented below. + properties: + allowByDefault: + description: |- + A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. + Structure is documented below. + properties: + disallowedRegions: + description: 'Two letter unicode region codes to disallow + as defined by https://cldr.unicode.org/ The full list + of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json' + items: + type: string + type: array + type: object + allowlistOnly: + description: |- + A policy of only allowing regions by explicitly adding them to an allowlist. + Structure is documented below. + properties: + allowedRegions: + description: 'Two letter unicode region codes to allow + as defined by https://cldr.unicode.org/ The full list + of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json' + items: + type: string + type: array + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ConfigStatus defines the observed state of Config. + properties: + atProvider: + properties: + authorizedDomains: + description: List of domains authorized for OAuth redirects. + items: + type: string + type: array + autodeleteAnonymousUsers: + description: Whether anonymous users will be auto-deleted after + a period of 30 days + type: boolean + blockingFunctions: + description: |- + Configuration related to blocking functions. + Structure is documented below. + properties: + forwardInboundCredentials: + description: |- + The user credentials to include in the JWT payload that is sent to the registered Blocking Functions. + Structure is documented below. + properties: + accessToken: + description: Whether to pass the user's OAuth identity + provider's access token. + type: boolean + idToken: + description: Whether to pass the user's OIDC identity + provider's ID token. + type: boolean + refreshToken: + description: Whether to pass the user's OAuth identity + provider's refresh token. + type: boolean + type: object + triggers: + description: |- + Map of Trigger to event type. Key should be one of the supported event types: "beforeCreate", "beforeSignIn". + Structure is documented below. + items: + properties: + eventType: + description: The identifier for this object. Format + specified above. + type: string + functionUri: + description: HTTP URI trigger for the Cloud Function. + type: string + updateTime: + description: |- + (Output) + When the trigger was changed. + type: string + type: object + type: array + type: object + client: + description: |- + Options related to how clients making requests on behalf of a project should be configured. + Structure is documented below. + properties: + firebaseSubdomain: + description: |- + (Output) + Firebase subdomain. + type: string + permissions: + description: |- + Configuration related to restricting a user's ability to affect their account. + Structure is documented below. + properties: + disabledUserDeletion: + description: When true, end users cannot delete their + account on the associated project through any of our + API methods + type: boolean + disabledUserSignup: + description: When true, end users cannot sign up for a + new account on the associated project through any of + our API methods + type: boolean + type: object + type: object + id: + description: an identifier for the resource with format projects/{{project}}/config + type: string + mfa: + description: |- + Options related to how clients making requests on behalf of a project should be configured. + Structure is documented below. + properties: + enabledProviders: + description: |- + A list of usable second factors for this project. + Each value may be one of: PHONE_SMS. + items: + type: string + type: array + providerConfigs: + description: |- + A list of usable second factors for this project along with their configurations. + This field does not support phone based MFA, for that use the 'enabledProviders' field. + Structure is documented below. + items: + properties: + state: + description: |- + Whether MultiFactor Authentication has been enabled for this project. + Possible values are: DISABLED, ENABLED, MANDATORY. + type: string + totpProviderConfig: + description: |- + TOTP MFA provider config for this project. + Structure is documented below. + properties: + adjacentIntervals: + description: The allowed number of adjacent intervals + that will be used for verification to avoid clock + skew. + type: number + type: object + type: object + type: array + state: + description: |- + Whether MultiFactor Authentication has been enabled for this project. + Possible values are: DISABLED, ENABLED, MANDATORY. + type: string + type: object + monitoring: + description: |- + Configuration related to monitoring project activity. + Structure is documented below. + properties: + requestLogging: + description: |- + Configuration for logging requests made to this project to Stackdriver Logging + Structure is documented below. + properties: + enabled: + description: Whether logging is enabled for this project + or not. + type: boolean + type: object + type: object + multiTenant: + description: |- + Configuration related to multi-tenant functionality. + Structure is documented below. + properties: + allowTenants: + description: Whether this project can have tenants or not. + type: boolean + defaultTenantLocation: + description: |- + The default cloud parent org or folder that the tenant project should be created under. + The parent resource name should be in the format of "/", such as "folders/123" or "organizations/456". + If the value is not set, the tenant will be created under the same organization or folder as the agent project. + type: string + type: object + name: + description: The name of the Config resource + type: string + project: + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + type: string + quota: + description: |- + Configuration related to quotas. + Structure is documented below. + properties: + signUpQuotaConfig: + description: |- + Quota for the Signup endpoint, if overwritten. Signup quota is measured in sign ups per project per hour per IP. None of quota, startTime, or quotaDuration can be skipped. + Structure is documented below. + properties: + quota: + description: A sign up APIs quota that customers can override + temporarily. Value can be in between 1 and 1000. + type: number + quotaDuration: + description: 'How long this quota will be active for. + It is measurred in seconds, e.g., Example: "9.615s".' + type: string + startTime: + description: When this quota will take affect. + type: string + type: object + type: object + signIn: + description: |- + Configuration related to local sign in methods. + Structure is documented below. + properties: + allowDuplicateEmails: + description: Whether to allow more than one account to have + the same email. + type: boolean + anonymous: + description: |- + Configuration options related to authenticating an anonymous user. + Structure is documented below. + properties: + enabled: + description: Whether phone number auth is enabled for + the project or not. + type: boolean + type: object + email: + description: |- + Configuration options related to authenticating a user by their email address. + Structure is documented below. + properties: + enabled: + description: Whether phone number auth is enabled for + the project or not. + type: boolean + passwordRequired: + description: |- + Whether a password is required for email auth or not. If true, both an email and + password must be provided to sign in. If false, a user may sign in via either + email/password or email link. + type: boolean + type: object + hashConfig: + description: |- + (Output) + Output only. Hash config information. + Structure is documented below. + items: + properties: + algorithm: + description: |- + (Output) + Different password hash algorithms used in Identity Toolkit. + type: string + memoryCost: + description: |- + (Output) + Memory cost for hash calculation. Used by scrypt and other similar password derivation algorithms. See https://tools.ietf.org/html/rfc7914 for explanation of field. + type: number + rounds: + description: |- + (Output) + How many rounds for hash calculation. Used by scrypt and other similar password derivation algorithms. + type: number + saltSeparator: + description: |- + (Output) + Non-printable character to be inserted between the salt and plain text password in base64. + type: string + signerKey: + description: |- + (Output) + Signer key in base64. + type: string + type: object + type: array + phoneNumber: + description: |- + Configuration options related to authenticated a user by their phone number. + Structure is documented below. + properties: + enabled: + description: Whether phone number auth is enabled for + the project or not. + type: boolean + testPhoneNumbers: + additionalProperties: + type: string + description: A map of that + can be used for phone auth testing. + type: object + x-kubernetes-map-type: granular + type: object + type: object + smsRegionConfig: + description: |- + Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. + Structure is documented below. + properties: + allowByDefault: + description: |- + A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. + Structure is documented below. + properties: + disallowedRegions: + description: 'Two letter unicode region codes to disallow + as defined by https://cldr.unicode.org/ The full list + of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json' + items: + type: string + type: array + type: object + allowlistOnly: + description: |- + A policy of only allowing regions by explicitly adding them to an allowlist. + Structure is documented below. + properties: + allowedRegions: + description: 'Two letter unicode region codes to allow + as defined by https://cldr.unicode.org/ The full list + of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json' + items: + type: string + type: array + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/identityplatform.gcp.upbound.io_projectdefaultconfigs.yaml b/package/crds/identityplatform.gcp.upbound.io_projectdefaultconfigs.yaml deleted file mode 100644 index cd2d50cf3..000000000 --- a/package/crds/identityplatform.gcp.upbound.io_projectdefaultconfigs.yaml +++ /dev/null @@ -1,1082 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.14.0 - name: projectdefaultconfigs.identityplatform.gcp.upbound.io -spec: - group: identityplatform.gcp.upbound.io - names: - categories: - - crossplane - - managed - - gcp - kind: ProjectDefaultConfig - listKind: ProjectDefaultConfigList - plural: projectdefaultconfigs - singular: projectdefaultconfig - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Synced')].status - name: SYNCED - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: READY - type: string - - jsonPath: .metadata.annotations.crossplane\.io/external-name - name: EXTERNAL-NAME - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: ProjectDefaultConfig is the Schema for the ProjectDefaultConfigs - API. There is no persistent data associated with this resource. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ProjectDefaultConfigSpec defines the desired state of ProjectDefaultConfig - properties: - deletionPolicy: - default: Delete - description: |- - DeletionPolicy specifies what will happen to the underlying external - when this managed resource is deleted - either "Delete" or "Orphan" the - external resource. - This field is planned to be deprecated in favor of the ManagementPolicies - field in a future release. Currently, both could be set independently and - non-default values would be honored if the feature flag is enabled. - See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 - enum: - - Orphan - - Delete - type: string - forProvider: - properties: - project: - description: |- - The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - type: string - signIn: - description: |- - Configuration related to local sign in methods. - Structure is documented below. - items: - properties: - allowDuplicateEmails: - description: Whether to allow more than one account to have - the same email. - type: boolean - anonymous: - description: |- - Configuration options related to authenticating an anonymous user. - Structure is documented below. - items: - properties: - enabled: - description: Whether phone number auth is enabled - for the project or not. - type: boolean - type: object - type: array - email: - description: |- - Configuration options related to authenticating a user by their email address. - Structure is documented below. - items: - properties: - enabled: - description: Whether phone number auth is enabled - for the project or not. - type: boolean - passwordRequired: - description: |- - Whether a password is required for email auth or not. If true, both an email and - password must be provided to sign in. If false, a user may sign in via either - email/password or email link. - type: boolean - type: object - type: array - phoneNumber: - description: |- - Configuration options related to authenticated a user by their phone number. - Structure is documented below. - items: - properties: - enabled: - description: Whether phone number auth is enabled - for the project or not. - type: boolean - testPhoneNumbers: - additionalProperties: - type: string - description: A map of - that can be used for phone auth testing. - type: object - x-kubernetes-map-type: granular - type: object - type: array - type: object - type: array - type: object - initProvider: - description: |- - THIS IS A BETA FIELD. It will be honored - unless the Management Policies feature flag is disabled. - InitProvider holds the same fields as ForProvider, with the exception - of Identifier and other resource reference fields. The fields that are - in InitProvider are merged into ForProvider when the resource is created. - The same fields are also added to the terraform ignore_changes hook, to - avoid updating them after creation. This is useful for fields that are - required on creation, but we do not desire to update them after creation, - for example because of an external controller is managing them, like an - autoscaler. - properties: - project: - description: |- - The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - type: string - signIn: - description: |- - Configuration related to local sign in methods. - Structure is documented below. - items: - properties: - allowDuplicateEmails: - description: Whether to allow more than one account to have - the same email. - type: boolean - anonymous: - description: |- - Configuration options related to authenticating an anonymous user. - Structure is documented below. - items: - properties: - enabled: - description: Whether phone number auth is enabled - for the project or not. - type: boolean - type: object - type: array - email: - description: |- - Configuration options related to authenticating a user by their email address. - Structure is documented below. - items: - properties: - enabled: - description: Whether phone number auth is enabled - for the project or not. - type: boolean - passwordRequired: - description: |- - Whether a password is required for email auth or not. If true, both an email and - password must be provided to sign in. If false, a user may sign in via either - email/password or email link. - type: boolean - type: object - type: array - phoneNumber: - description: |- - Configuration options related to authenticated a user by their phone number. - Structure is documented below. - items: - properties: - enabled: - description: Whether phone number auth is enabled - for the project or not. - type: boolean - testPhoneNumbers: - additionalProperties: - type: string - description: A map of - that can be used for phone auth testing. - type: object - x-kubernetes-map-type: granular - type: object - type: array - type: object - type: array - type: object - managementPolicies: - default: - - '*' - description: |- - THIS IS A BETA FIELD. It is on by default but can be opted out - through a Crossplane feature flag. - ManagementPolicies specify the array of actions Crossplane is allowed to - take on the managed and external resources. - This field is planned to replace the DeletionPolicy field in a future - release. Currently, both could be set independently and non-default - values would be honored if the feature flag is enabled. If both are - custom, the DeletionPolicy field will be ignored. - See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 - and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md - items: - description: |- - A ManagementAction represents an action that the Crossplane controllers - can take on an external resource. - enum: - - Observe - - Create - - Update - - Delete - - LateInitialize - - '*' - type: string - type: array - providerConfigRef: - default: - name: default - description: |- - ProviderConfigReference specifies how the provider that will be used to - create, observe, update, and delete this managed resource should be - configured. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - publishConnectionDetailsTo: - description: |- - PublishConnectionDetailsTo specifies the connection secret config which - contains a name, metadata and a reference to secret store config to - which any connection details for this managed resource should be written. - Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - properties: - configRef: - default: - name: default - description: |- - SecretStoreConfigRef specifies which secret store config should be used - for this ConnectionSecret. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - metadata: - description: Metadata is the metadata for connection secret. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are the annotations to be added to connection secret. - - For Kubernetes secrets, this will be used as "metadata.annotations". - - It is up to Secret Store implementation for others store types. - type: object - labels: - additionalProperties: - type: string - description: |- - Labels are the labels/tags to be added to connection secret. - - For Kubernetes secrets, this will be used as "metadata.labels". - - It is up to Secret Store implementation for others store types. - type: object - type: - description: |- - Type is the SecretType for the connection secret. - - Only valid for Kubernetes Secret Stores. - type: string - type: object - name: - description: Name is the name of the connection secret. - type: string - required: - - name - type: object - writeConnectionSecretToRef: - description: |- - WriteConnectionSecretToReference specifies the namespace and name of a - Secret to which any connection details for this managed resource should - be written. Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - This field is planned to be replaced in a future release in favor of - PublishConnectionDetailsTo. Currently, both could be set independently - and connection details would be published to both without affecting - each other. - properties: - name: - description: Name of the secret. - type: string - namespace: - description: Namespace of the secret. - type: string - required: - - name - - namespace - type: object - required: - - forProvider - type: object - status: - description: ProjectDefaultConfigStatus defines the observed state of - ProjectDefaultConfig. - properties: - atProvider: - properties: - id: - description: an identifier for the resource with format {{project}} - type: string - name: - description: 'The name of the Config resource. Example: "projects/my-awesome-project/config"' - type: string - project: - description: |- - The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - type: string - signIn: - description: |- - Configuration related to local sign in methods. - Structure is documented below. - items: - properties: - allowDuplicateEmails: - description: Whether to allow more than one account to have - the same email. - type: boolean - anonymous: - description: |- - Configuration options related to authenticating an anonymous user. - Structure is documented below. - items: - properties: - enabled: - description: Whether phone number auth is enabled - for the project or not. - type: boolean - type: object - type: array - email: - description: |- - Configuration options related to authenticating a user by their email address. - Structure is documented below. - items: - properties: - enabled: - description: Whether phone number auth is enabled - for the project or not. - type: boolean - passwordRequired: - description: |- - Whether a password is required for email auth or not. If true, both an email and - password must be provided to sign in. If false, a user may sign in via either - email/password or email link. - type: boolean - type: object - type: array - hashConfig: - description: |- - (Output) - Output only. Hash config information. - Structure is documented below. - items: - properties: - algorithm: - description: |- - (Output) - Different password hash algorithms used in Identity Toolkit. - type: string - memoryCost: - description: |- - (Output) - Memory cost for hash calculation. Used by scrypt and other similar password derivation algorithms. See https://tools.ietf.org/html/rfc7914 for explanation of field. - type: number - rounds: - description: |- - (Output) - How many rounds for hash calculation. Used by scrypt and other similar password derivation algorithms. - type: number - saltSeparator: - description: |- - (Output) - Non-printable character to be inserted between the salt and plain text password in base64. - type: string - signerKey: - description: |- - (Output) - Signer key in base64. - type: string - type: object - type: array - phoneNumber: - description: |- - Configuration options related to authenticated a user by their phone number. - Structure is documented below. - items: - properties: - enabled: - description: Whether phone number auth is enabled - for the project or not. - type: boolean - testPhoneNumbers: - additionalProperties: - type: string - description: A map of - that can be used for phone auth testing. - type: object - x-kubernetes-map-type: granular - type: object - type: array - type: object - type: array - type: object - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Synced')].status - name: SYNCED - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: READY - type: string - - jsonPath: .metadata.annotations.crossplane\.io/external-name - name: EXTERNAL-NAME - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1beta2 - schema: - openAPIV3Schema: - description: ProjectDefaultConfig is the Schema for the ProjectDefaultConfigs - API. There is no persistent data associated with this resource. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ProjectDefaultConfigSpec defines the desired state of ProjectDefaultConfig - properties: - deletionPolicy: - default: Delete - description: |- - DeletionPolicy specifies what will happen to the underlying external - when this managed resource is deleted - either "Delete" or "Orphan" the - external resource. - This field is planned to be deprecated in favor of the ManagementPolicies - field in a future release. Currently, both could be set independently and - non-default values would be honored if the feature flag is enabled. - See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 - enum: - - Orphan - - Delete - type: string - forProvider: - properties: - project: - description: |- - The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - type: string - signIn: - description: |- - Configuration related to local sign in methods. - Structure is documented below. - properties: - allowDuplicateEmails: - description: Whether to allow more than one account to have - the same email. - type: boolean - anonymous: - description: |- - Configuration options related to authenticating an anonymous user. - Structure is documented below. - properties: - enabled: - description: Whether phone number auth is enabled for - the project or not. - type: boolean - type: object - email: - description: |- - Configuration options related to authenticating a user by their email address. - Structure is documented below. - properties: - enabled: - description: Whether phone number auth is enabled for - the project or not. - type: boolean - passwordRequired: - description: |- - Whether a password is required for email auth or not. If true, both an email and - password must be provided to sign in. If false, a user may sign in via either - email/password or email link. - type: boolean - type: object - phoneNumber: - description: |- - Configuration options related to authenticated a user by their phone number. - Structure is documented below. - properties: - enabled: - description: Whether phone number auth is enabled for - the project or not. - type: boolean - testPhoneNumbers: - additionalProperties: - type: string - description: A map of that - can be used for phone auth testing. - type: object - x-kubernetes-map-type: granular - type: object - type: object - type: object - initProvider: - description: |- - THIS IS A BETA FIELD. It will be honored - unless the Management Policies feature flag is disabled. - InitProvider holds the same fields as ForProvider, with the exception - of Identifier and other resource reference fields. The fields that are - in InitProvider are merged into ForProvider when the resource is created. - The same fields are also added to the terraform ignore_changes hook, to - avoid updating them after creation. This is useful for fields that are - required on creation, but we do not desire to update them after creation, - for example because of an external controller is managing them, like an - autoscaler. - properties: - project: - description: |- - The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - type: string - signIn: - description: |- - Configuration related to local sign in methods. - Structure is documented below. - properties: - allowDuplicateEmails: - description: Whether to allow more than one account to have - the same email. - type: boolean - anonymous: - description: |- - Configuration options related to authenticating an anonymous user. - Structure is documented below. - properties: - enabled: - description: Whether phone number auth is enabled for - the project or not. - type: boolean - type: object - email: - description: |- - Configuration options related to authenticating a user by their email address. - Structure is documented below. - properties: - enabled: - description: Whether phone number auth is enabled for - the project or not. - type: boolean - passwordRequired: - description: |- - Whether a password is required for email auth or not. If true, both an email and - password must be provided to sign in. If false, a user may sign in via either - email/password or email link. - type: boolean - type: object - phoneNumber: - description: |- - Configuration options related to authenticated a user by their phone number. - Structure is documented below. - properties: - enabled: - description: Whether phone number auth is enabled for - the project or not. - type: boolean - testPhoneNumbers: - additionalProperties: - type: string - description: A map of that - can be used for phone auth testing. - type: object - x-kubernetes-map-type: granular - type: object - type: object - type: object - managementPolicies: - default: - - '*' - description: |- - THIS IS A BETA FIELD. It is on by default but can be opted out - through a Crossplane feature flag. - ManagementPolicies specify the array of actions Crossplane is allowed to - take on the managed and external resources. - This field is planned to replace the DeletionPolicy field in a future - release. Currently, both could be set independently and non-default - values would be honored if the feature flag is enabled. If both are - custom, the DeletionPolicy field will be ignored. - See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 - and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md - items: - description: |- - A ManagementAction represents an action that the Crossplane controllers - can take on an external resource. - enum: - - Observe - - Create - - Update - - Delete - - LateInitialize - - '*' - type: string - type: array - providerConfigRef: - default: - name: default - description: |- - ProviderConfigReference specifies how the provider that will be used to - create, observe, update, and delete this managed resource should be - configured. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - publishConnectionDetailsTo: - description: |- - PublishConnectionDetailsTo specifies the connection secret config which - contains a name, metadata and a reference to secret store config to - which any connection details for this managed resource should be written. - Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - properties: - configRef: - default: - name: default - description: |- - SecretStoreConfigRef specifies which secret store config should be used - for this ConnectionSecret. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - metadata: - description: Metadata is the metadata for connection secret. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are the annotations to be added to connection secret. - - For Kubernetes secrets, this will be used as "metadata.annotations". - - It is up to Secret Store implementation for others store types. - type: object - labels: - additionalProperties: - type: string - description: |- - Labels are the labels/tags to be added to connection secret. - - For Kubernetes secrets, this will be used as "metadata.labels". - - It is up to Secret Store implementation for others store types. - type: object - type: - description: |- - Type is the SecretType for the connection secret. - - Only valid for Kubernetes Secret Stores. - type: string - type: object - name: - description: Name is the name of the connection secret. - type: string - required: - - name - type: object - writeConnectionSecretToRef: - description: |- - WriteConnectionSecretToReference specifies the namespace and name of a - Secret to which any connection details for this managed resource should - be written. Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - This field is planned to be replaced in a future release in favor of - PublishConnectionDetailsTo. Currently, both could be set independently - and connection details would be published to both without affecting - each other. - properties: - name: - description: Name of the secret. - type: string - namespace: - description: Namespace of the secret. - type: string - required: - - name - - namespace - type: object - required: - - forProvider - type: object - status: - description: ProjectDefaultConfigStatus defines the observed state of - ProjectDefaultConfig. - properties: - atProvider: - properties: - id: - description: an identifier for the resource with format {{project}} - type: string - name: - description: 'The name of the Config resource. Example: "projects/my-awesome-project/config"' - type: string - project: - description: |- - The ID of the project in which the resource belongs. - If it is not provided, the provider project is used. - type: string - signIn: - description: |- - Configuration related to local sign in methods. - Structure is documented below. - properties: - allowDuplicateEmails: - description: Whether to allow more than one account to have - the same email. - type: boolean - anonymous: - description: |- - Configuration options related to authenticating an anonymous user. - Structure is documented below. - properties: - enabled: - description: Whether phone number auth is enabled for - the project or not. - type: boolean - type: object - email: - description: |- - Configuration options related to authenticating a user by their email address. - Structure is documented below. - properties: - enabled: - description: Whether phone number auth is enabled for - the project or not. - type: boolean - passwordRequired: - description: |- - Whether a password is required for email auth or not. If true, both an email and - password must be provided to sign in. If false, a user may sign in via either - email/password or email link. - type: boolean - type: object - hashConfig: - description: |- - (Output) - Output only. Hash config information. - Structure is documented below. - items: - properties: - algorithm: - description: |- - (Output) - Different password hash algorithms used in Identity Toolkit. - type: string - memoryCost: - description: |- - (Output) - Memory cost for hash calculation. Used by scrypt and other similar password derivation algorithms. See https://tools.ietf.org/html/rfc7914 for explanation of field. - type: number - rounds: - description: |- - (Output) - How many rounds for hash calculation. Used by scrypt and other similar password derivation algorithms. - type: number - saltSeparator: - description: |- - (Output) - Non-printable character to be inserted between the salt and plain text password in base64. - type: string - signerKey: - description: |- - (Output) - Signer key in base64. - type: string - type: object - type: array - phoneNumber: - description: |- - Configuration options related to authenticated a user by their phone number. - Structure is documented below. - properties: - enabled: - description: Whether phone number auth is enabled for - the project or not. - type: boolean - testPhoneNumbers: - additionalProperties: - type: string - description: A map of that - can be used for phone auth testing. - type: object - x-kubernetes-map-type: granular - type: object - type: object - type: object - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: false - subresources: - status: {} diff --git a/package/crds/monitoring.gcp.upbound.io_alertpolicies.yaml b/package/crds/monitoring.gcp.upbound.io_alertpolicies.yaml index 5cf78d877..67aeb7327 100644 --- a/package/crds/monitoring.gcp.upbound.io_alertpolicies.yaml +++ b/package/crds/monitoring.gcp.upbound.io_alertpolicies.yaml @@ -3101,6 +3101,13 @@ spec: type: string type: object type: array + notificationPrompts: + description: |- + Control when notifications will be sent out. + Each value may be one of: NOTIFICATION_PROMPT_UNSPECIFIED, OPENED, CLOSED. + items: + type: string + type: array notificationRateLimit: description: |- Required for alert policies with a LogMatch condition. @@ -3420,6 +3427,12 @@ spec: This field is optional. If this field is not empty, then it must be a valid Prometheus label name. type: string + disableMetricValidation: + description: |- + Whether to disable metric existence validation for this condition. + Users with the monitoring.alertPolicyViewer role are able to see the + name of the non-existent metric in the alerting policy condition. + type: boolean duration: description: |- The amount of time that a time series must @@ -3972,6 +3985,13 @@ spec: type: string type: object type: array + notificationPrompts: + description: |- + Control when notifications will be sent out. + Each value may be one of: NOTIFICATION_PROMPT_UNSPECIFIED, OPENED, CLOSED. + items: + type: string + type: array notificationRateLimit: description: |- Required for alert policies with a LogMatch condition. @@ -4291,6 +4311,12 @@ spec: This field is optional. If this field is not empty, then it must be a valid Prometheus label name. type: string + disableMetricValidation: + description: |- + Whether to disable metric existence validation for this condition. + Users with the monitoring.alertPolicyViewer role are able to see the + name of the non-existent metric in the alerting policy condition. + type: boolean duration: description: |- The amount of time that a time series must @@ -5015,6 +5041,13 @@ spec: type: string type: object type: array + notificationPrompts: + description: |- + Control when notifications will be sent out. + Each value may be one of: NOTIFICATION_PROMPT_UNSPECIFIED, OPENED, CLOSED. + items: + type: string + type: array notificationRateLimit: description: |- Required for alert policies with a LogMatch condition. @@ -5334,6 +5367,12 @@ spec: This field is optional. If this field is not empty, then it must be a valid Prometheus label name. type: string + disableMetricValidation: + description: |- + Whether to disable metric existence validation for this condition. + Users with the monitoring.alertPolicyViewer role are able to see the + name of the non-existent metric in the alerting policy condition. + type: boolean duration: description: |- The amount of time that a time series must diff --git a/package/crds/networkconnectivity.gcp.upbound.io_hubs.yaml b/package/crds/networkconnectivity.gcp.upbound.io_hubs.yaml index 56b39d160..e32f76eb6 100644 --- a/package/crds/networkconnectivity.gcp.upbound.io_hubs.yaml +++ b/package/crds/networkconnectivity.gcp.upbound.io_hubs.yaml @@ -95,6 +95,11 @@ spec: description: 'Immutable. The name of the hub. Hub names must be unique. They use the following form: projects/{project_number}/locations/global/hubs/{hub_id}' type: string + presetTopology: + description: |- + Optional. The topology implemented in this hub. Currently, this field is only used when policyMode = PRESET. The available preset topologies are MESH and STAR. If presetTopology is unspecified and policyMode = PRESET, the presetTopology defaults to MESH. When policyMode = CUSTOM, the presetTopology is set to PRESET_TOPOLOGY_UNSPECIFIED. + Possible values are: MESH, STAR. + type: string project: description: |- The ID of the project in which the resource belongs. @@ -136,6 +141,11 @@ spec: description: 'Immutable. The name of the hub. Hub names must be unique. They use the following form: projects/{project_number}/locations/global/hubs/{hub_id}' type: string + presetTopology: + description: |- + Optional. The topology implemented in this hub. Currently, this field is only used when policyMode = PRESET. The available preset topologies are MESH and STAR. If presetTopology is unspecified and policyMode = PRESET, the presetTopology defaults to MESH. When policyMode = CUSTOM, the presetTopology is set to PRESET_TOPOLOGY_UNSPECIFIED. + Possible values are: MESH, STAR. + type: string project: description: |- The ID of the project in which the resource belongs. @@ -347,6 +357,11 @@ spec: description: 'Immutable. The name of the hub. Hub names must be unique. They use the following form: projects/{project_number}/locations/global/hubs/{hub_id}' type: string + presetTopology: + description: |- + Optional. The topology implemented in this hub. Currently, this field is only used when policyMode = PRESET. The available preset topologies are MESH and STAR. If presetTopology is unspecified and policyMode = PRESET, the presetTopology defaults to MESH. When policyMode = CUSTOM, the presetTopology is set to PRESET_TOPOLOGY_UNSPECIFIED. + Possible values are: MESH, STAR. + type: string project: description: |- The ID of the project in which the resource belongs. diff --git a/package/crds/networkconnectivity.gcp.upbound.io_spokes.yaml b/package/crds/networkconnectivity.gcp.upbound.io_spokes.yaml index 83de6d8e8..2b031877e 100644 --- a/package/crds/networkconnectivity.gcp.upbound.io_spokes.yaml +++ b/package/crds/networkconnectivity.gcp.upbound.io_spokes.yaml @@ -1174,6 +1174,10 @@ spec: description: description: An optional description of the spoke. type: string + group: + description: The name of the group that this spoke is associated + with. + type: string hub: description: Immutable. The URI of the hub that this spoke is attached to. @@ -1268,6 +1272,13 @@ spec: A collection of VLAN attachment resources. These resources should be redundant attachments that all advertise the same prefixes to Google Cloud. Alternatively, in active/passive configurations, all attachments should be capable of advertising the same prefixes. Structure is documented below. properties: + includeImportRanges: + description: |- + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". + items: + type: string + type: array siteToSiteDataTransfer: description: A value that controls whether site-to-site data transfer is enabled for these resources. Note that data @@ -1278,12 +1289,277 @@ spec: items: type: string type: array + urisRefs: + description: References to InterconnectAttachment in compute + to populate uris. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + urisSelector: + description: Selector for a list of InterconnectAttachment + in compute to populate uris. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + linkedProducerVpcNetwork: + description: |- + Producer VPC network that is associated with the spoke. + Structure is documented below. + properties: + excludeExportRanges: + description: IP ranges encompassing the subnets to be excluded + from peering. + items: + type: string + type: array + includeExportRanges: + description: IP ranges allowed to be included from peering. + items: + type: string + type: array + network: + description: The URI of the Service Consumer VPC that the + Producer VPC is peered with. + type: string + networkRef: + description: Reference to a Network in compute to populate + network. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkSelector: + description: Selector for a Network in compute to populate + network. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + peering: + description: The name of the VPC peering between the Service + Consumer VPC and the Producer VPC (defined in the Tenant + project) which is added to the NCC hub. This peering must + be in ACTIVE state. + type: string + peeringRef: + description: Reference to a Connection in servicenetworking + to populate peering. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + peeringSelector: + description: Selector for a Connection in servicenetworking + to populate peering. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object type: object linkedRouterApplianceInstances: description: |- The URIs of linked Router appliance resources Structure is documented below. properties: + includeImportRanges: + description: |- + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". + items: + type: string + type: array instances: description: |- The list of router appliance instances @@ -1391,6 +1667,11 @@ spec: items: type: string type: array + includeExportRanges: + description: IP ranges allowed to be included from peering. + items: + type: string + type: array uri: description: The URI of the VPC network resource. type: string @@ -1476,6 +1757,13 @@ spec: The URIs of linked VPN tunnel resources Structure is documented below. properties: + includeImportRanges: + description: |- + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". + items: + type: string + type: array siteToSiteDataTransfer: description: A value that controls whether site-to-site data transfer is enabled for these resources. Note that data @@ -1486,6 +1774,85 @@ spec: items: type: string type: array + urisRefs: + description: References to VPNTunnel in compute to populate + uris. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + urisSelector: + description: Selector for a list of VPNTunnel in compute to + populate uris. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object type: object location: description: The location for the resource @@ -1516,6 +1883,10 @@ spec: description: description: An optional description of the spoke. type: string + group: + description: The name of the group that this spoke is associated + with. + type: string hub: description: Immutable. The URI of the hub that this spoke is attached to. @@ -1570,62 +1941,334 @@ spec: description: MatchLabels ensures an object with matching labels is selected. type: object - policy: - description: Policies for selection. + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: |- + Optional labels in key:value format. For more information about labels, see Requirements for labels. + Note: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field effective_labels for all of the labels present on the resource. + type: object + x-kubernetes-map-type: granular + linkedInterconnectAttachments: + description: |- + A collection of VLAN attachment resources. These resources should be redundant attachments that all advertise the same prefixes to Google Cloud. Alternatively, in active/passive configurations, all attachments should be capable of advertising the same prefixes. + Structure is documented below. + properties: + includeImportRanges: + description: |- + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". + items: + type: string + type: array + siteToSiteDataTransfer: + description: A value that controls whether site-to-site data + transfer is enabled for these resources. Note that data + transfer is available only in supported locations. + type: boolean + uris: + description: The URIs of linked interconnect attachment resources + items: + type: string + type: array + urisRefs: + description: References to InterconnectAttachment in compute + to populate uris. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + urisSelector: + description: Selector for a list of InterconnectAttachment + in compute to populate uris. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + linkedProducerVpcNetwork: + description: |- + Producer VPC network that is associated with the spoke. + Structure is documented below. + properties: + excludeExportRanges: + description: IP ranges encompassing the subnets to be excluded + from peering. + items: + type: string + type: array + includeExportRanges: + description: IP ranges allowed to be included from peering. + items: + type: string + type: array + network: + description: The URI of the Service Consumer VPC that the + Producer VPC is peered with. + type: string + networkRef: + description: Reference to a Network in compute to populate + network. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkSelector: + description: Selector for a Network in compute to populate + network. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + peering: + description: The name of the VPC peering between the Service + Consumer VPC and the Producer VPC (defined in the Tenant + project) which is added to the NCC hub. This peering must + be in ACTIVE state. + type: string + peeringRef: + description: Reference to a Connection in servicenetworking + to populate peering. properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional + name: + description: Name of the referenced object. type: string - resolve: + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + peeringSelector: + description: Selector for a Connection in servicenetworking + to populate peering. + properties: + matchControllerRef: description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object type: object type: object - labels: - additionalProperties: - type: string - description: |- - Optional labels in key:value format. For more information about labels, see Requirements for labels. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. - Please refer to the field effective_labels for all of the labels present on the resource. - type: object - x-kubernetes-map-type: granular - linkedInterconnectAttachments: + linkedRouterApplianceInstances: description: |- - A collection of VLAN attachment resources. These resources should be redundant attachments that all advertise the same prefixes to Google Cloud. Alternatively, in active/passive configurations, all attachments should be capable of advertising the same prefixes. + The URIs of linked Router appliance resources Structure is documented below. properties: - siteToSiteDataTransfer: - description: A value that controls whether site-to-site data - transfer is enabled for these resources. Note that data - transfer is available only in supported locations. - type: boolean - uris: - description: The URIs of linked interconnect attachment resources + includeImportRanges: + description: |- + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". items: type: string type: array - type: object - linkedRouterApplianceInstances: - description: |- - The URIs of linked Router appliance resources - Structure is documented below. - properties: instances: description: |- The list of router appliance instances @@ -1733,6 +2376,11 @@ spec: items: type: string type: array + includeExportRanges: + description: IP ranges allowed to be included from peering. + items: + type: string + type: array uri: description: The URI of the VPC network resource. type: string @@ -1818,6 +2466,13 @@ spec: The URIs of linked VPN tunnel resources Structure is documented below. properties: + includeImportRanges: + description: |- + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". + items: + type: string + type: array siteToSiteDataTransfer: description: A value that controls whether site-to-site data transfer is enabled for these resources. Note that data @@ -1828,6 +2483,85 @@ spec: items: type: string type: array + urisRefs: + description: References to VPNTunnel in compute to populate + uris. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + urisSelector: + description: Selector for a list of VPNTunnel in compute to + populate uris. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object type: object location: description: The location for the resource @@ -2034,6 +2768,10 @@ spec: type: string type: object x-kubernetes-map-type: granular + group: + description: The name of the group that this spoke is associated + with. + type: string hub: description: Immutable. The URI of the hub that this spoke is attached to. @@ -2055,6 +2793,13 @@ spec: A collection of VLAN attachment resources. These resources should be redundant attachments that all advertise the same prefixes to Google Cloud. Alternatively, in active/passive configurations, all attachments should be capable of advertising the same prefixes. Structure is documented below. properties: + includeImportRanges: + description: |- + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". + items: + type: string + type: array siteToSiteDataTransfer: description: A value that controls whether site-to-site data transfer is enabled for these resources. Note that data @@ -2066,11 +2811,50 @@ spec: type: string type: array type: object + linkedProducerVpcNetwork: + description: |- + Producer VPC network that is associated with the spoke. + Structure is documented below. + properties: + excludeExportRanges: + description: IP ranges encompassing the subnets to be excluded + from peering. + items: + type: string + type: array + includeExportRanges: + description: IP ranges allowed to be included from peering. + items: + type: string + type: array + network: + description: The URI of the Service Consumer VPC that the + Producer VPC is peered with. + type: string + peering: + description: The name of the VPC peering between the Service + Consumer VPC and the Producer VPC (defined in the Tenant + project) which is added to the NCC hub. This peering must + be in ACTIVE state. + type: string + producerNetwork: + description: |- + (Output) + The URI of the Producer VPC. + type: string + type: object linkedRouterApplianceInstances: description: |- The URIs of linked Router appliance resources Structure is documented below. properties: + includeImportRanges: + description: |- + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". + items: + type: string + type: array instances: description: |- The list of router appliance instances @@ -2102,6 +2886,11 @@ spec: items: type: string type: array + includeExportRanges: + description: IP ranges allowed to be included from peering. + items: + type: string + type: array uri: description: The URI of the VPC network resource. type: string @@ -2111,6 +2900,13 @@ spec: The URIs of linked VPN tunnel resources Structure is documented below. properties: + includeImportRanges: + description: |- + IP ranges allowed to be included during import from hub (does not control transit connectivity). + The only allowed value for now is "ALL_IPV4_RANGES". + items: + type: string + type: array siteToSiteDataTransfer: description: A value that controls whether site-to-site data transfer is enabled for these resources. Note that data diff --git a/package/crds/orgpolicy.gcp.upbound.io_policies.yaml b/package/crds/orgpolicy.gcp.upbound.io_policies.yaml index 95b0f9416..583cf804c 100644 --- a/package/crds/orgpolicy.gcp.upbound.io_policies.yaml +++ b/package/crds/orgpolicy.gcp.upbound.io_policies.yaml @@ -144,6 +144,14 @@ spec: This field can be set only in Policies for boolean constraints. type: string + parameters: + description: 'Optional. Required for Managed Constraints + if parameters defined in constraints. Pass parameter + values when policy enforcement is enabled. Ensure + that parameter value types match those defined in + the constraint definition. For example: { "allowedLocations" + : ["us-east1", "us-west1"], "allowAll" : true }' + type: string values: description: |- List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. @@ -266,7 +274,7 @@ spec: type: boolean rules: description: |- - Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. + In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. Structure is documented below. items: properties: @@ -312,6 +320,14 @@ spec: This field can be set only in Policies for boolean constraints. type: string + parameters: + description: 'Optional. Required for Managed Constraints + if parameters defined in constraints. Pass parameter + values when policy enforcement is enabled. Ensure + that parameter value types match those defined in + the constraint definition. For example: { "allowedLocations" + : ["us-east1", "us-west1"], "allowAll" : true }' + type: string values: description: |- List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. @@ -415,6 +431,14 @@ spec: This field can be set only in Policies for boolean constraints. type: string + parameters: + description: 'Optional. Required for Managed Constraints + if parameters defined in constraints. Pass parameter + values when policy enforcement is enabled. Ensure + that parameter value types match those defined in + the constraint definition. For example: { "allowedLocations" + : ["us-east1", "us-west1"], "allowAll" : true }' + type: string values: description: |- List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. @@ -458,7 +482,7 @@ spec: type: boolean rules: description: |- - Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. + In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. Structure is documented below. items: properties: @@ -504,6 +528,14 @@ spec: This field can be set only in Policies for boolean constraints. type: string + parameters: + description: 'Optional. Required for Managed Constraints + if parameters defined in constraints. Pass parameter + values when policy enforcement is enabled. Ensure + that parameter value types match those defined in + the constraint definition. For example: { "allowedLocations" + : ["us-east1", "us-west1"], "allowAll" : true }' + type: string values: description: |- List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. @@ -771,6 +803,14 @@ spec: This field can be set only in Policies for boolean constraints. type: string + parameters: + description: 'Optional. Required for Managed Constraints + if parameters defined in constraints. Pass parameter + values when policy enforcement is enabled. Ensure + that parameter value types match those defined in + the constraint definition. For example: { "allowedLocations" + : ["us-east1", "us-west1"], "allowAll" : true }' + type: string values: description: |- List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. @@ -837,7 +877,7 @@ spec: type: boolean rules: description: |- - Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. + In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set enforced to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. Structure is documented below. items: properties: @@ -883,6 +923,14 @@ spec: This field can be set only in Policies for boolean constraints. type: string + parameters: + description: 'Optional. Required for Managed Constraints + if parameters defined in constraints. Pass parameter + values when policy enforcement is enabled. Ensure + that parameter value types match those defined in + the constraint definition. For example: { "allowedLocations" + : ["us-east1", "us-west1"], "allowAll" : true }' + type: string values: description: |- List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. diff --git a/package/crds/privateca.gcp.upbound.io_certificateauthorities.yaml b/package/crds/privateca.gcp.upbound.io_certificateauthorities.yaml index 40b1b3d53..0853e37e3 100644 --- a/package/crds/privateca.gcp.upbound.io_certificateauthorities.yaml +++ b/package/crds/privateca.gcp.upbound.io_certificateauthorities.yaml @@ -2241,7 +2241,7 @@ spec: Structure is documented below. properties: keyId: - description: The value of the KeyId in lowercase hexidecimal. + description: The value of the KeyId in lowercase hexadecimal. type: string type: object x509Config: @@ -2504,13 +2504,10 @@ spec: type: array type: object type: object - deletionProtection: - description: When the field is set to false, deleting the CertificateAuthority - is allowed. - type: boolean desiredState: - description: Desired state of the CertificateAuthority. Set this - field to STAGED to create a STAGED root CA. + description: |- + Desired state of the CertificateAuthority. Set this field to STAGED to create a STAGED root CA. + Possible values: ENABLED, DISABLED, STAGED. type: string gcsBucket: description: |- @@ -2863,7 +2860,7 @@ spec: Structure is documented below. properties: keyId: - description: The value of the KeyId in lowercase hexidecimal. + description: The value of the KeyId in lowercase hexadecimal. type: string type: object x509Config: @@ -3126,13 +3123,10 @@ spec: type: array type: object type: object - deletionProtection: - description: When the field is set to false, deleting the CertificateAuthority - is allowed. - type: boolean desiredState: - description: Desired state of the CertificateAuthority. Set this - field to STAGED to create a STAGED root CA. + description: |- + Desired state of the CertificateAuthority. Set this field to STAGED to create a STAGED root CA. + Possible values: ENABLED, DISABLED, STAGED. type: string gcsBucket: description: |- @@ -3591,7 +3585,7 @@ spec: Structure is documented below. properties: keyId: - description: The value of the KeyId in lowercase hexidecimal. + description: The value of the KeyId in lowercase hexadecimal. type: string type: object x509Config: @@ -3865,8 +3859,9 @@ spec: is allowed. type: boolean desiredState: - description: Desired state of the CertificateAuthority. Set this - field to STAGED to create a STAGED root CA. + description: |- + Desired state of the CertificateAuthority. Set this field to STAGED to create a STAGED root CA. + Possible values: ENABLED, DISABLED, STAGED. type: string effectiveLabels: additionalProperties: diff --git a/package/crds/privateca.gcp.upbound.io_certificates.yaml b/package/crds/privateca.gcp.upbound.io_certificates.yaml index d815592bd..08292632e 100644 --- a/package/crds/privateca.gcp.upbound.io_certificates.yaml +++ b/package/crds/privateca.gcp.upbound.io_certificates.yaml @@ -2962,7 +2962,7 @@ spec: Structure is documented below. properties: keyId: - description: The value of the KeyId in lowercase hexidecimal. + description: The value of the KeyId in lowercase hexadecimal. type: string type: object x509Config: @@ -3631,7 +3631,7 @@ spec: Structure is documented below. properties: keyId: - description: The value of the KeyId in lowercase hexidecimal. + description: The value of the KeyId in lowercase hexadecimal. type: string type: object x509Config: @@ -4123,7 +4123,7 @@ spec: items: properties: keyId: - description: The value of the KeyId in lowercase hexidecimal. + description: The value of the KeyId in lowercase hexadecimal. type: string type: object type: array @@ -4311,7 +4311,7 @@ spec: items: properties: keyId: - description: The value of the KeyId in lowercase hexidecimal. + description: The value of the KeyId in lowercase hexadecimal. type: string type: object type: array @@ -4695,7 +4695,7 @@ spec: Structure is documented below. properties: keyId: - description: The value of the KeyId in lowercase hexidecimal. + description: The value of the KeyId in lowercase hexadecimal. type: string type: object x509Config: diff --git a/package/crds/pubsub.gcp.upbound.io_subscriptions.yaml b/package/crds/pubsub.gcp.upbound.io_subscriptions.yaml index fa3acff02..ce0896b59 100644 --- a/package/crds/pubsub.gcp.upbound.io_subscriptions.yaml +++ b/package/crds/pubsub.gcp.upbound.io_subscriptions.yaml @@ -1791,6 +1791,11 @@ spec: If set, message data will be written to Cloud Storage in Avro format. Structure is documented below. properties: + useTopicSchema: + description: |- + When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + Only one of use_topic_schema and use_table_schema can be set. + type: boolean writeMetadata: description: |- When true, writes the Pub/Sub message metadata to @@ -1825,6 +1830,11 @@ spec: May not exceed the subscription's acknowledgement deadline. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". type: string + maxMessages: + description: The maximum messages that can be written to a + Cloud Storage file before a new file is created. Min 1000 + messages. + type: number serviceAccountEmail: description: |- The service account to use to write to Cloud Storage. If not specified, the Pub/Sub @@ -2069,7 +2079,7 @@ spec: retain_acked_messages is true, then this also configures the retention of acknowledged messages, and thus configures how far back in time a subscriptions.seek can be done. Defaults to 7 days. Cannot be more - than 7 days ("604800s") or less than 10 minutes ("600s"). + than 31 days ("2678400s") or less than 10 minutes ("600s"). A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "600.5s". type: string @@ -2414,6 +2424,11 @@ spec: If set, message data will be written to Cloud Storage in Avro format. Structure is documented below. properties: + useTopicSchema: + description: |- + When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + Only one of use_topic_schema and use_table_schema can be set. + type: boolean writeMetadata: description: |- When true, writes the Pub/Sub message metadata to @@ -2448,6 +2463,11 @@ spec: May not exceed the subscription's acknowledgement deadline. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". type: string + maxMessages: + description: The maximum messages that can be written to a + Cloud Storage file before a new file is created. Min 1000 + messages. + type: number serviceAccountEmail: description: |- The service account to use to write to Cloud Storage. If not specified, the Pub/Sub @@ -2692,7 +2712,7 @@ spec: retain_acked_messages is true, then this also configures the retention of acknowledged messages, and thus configures how far back in time a subscriptions.seek can be done. Defaults to 7 days. Cannot be more - than 7 days ("604800s") or less than 10 minutes ("600s"). + than 31 days ("2678400s") or less than 10 minutes ("600s"). A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "600.5s". type: string @@ -3120,6 +3140,11 @@ spec: If set, message data will be written to Cloud Storage in Avro format. Structure is documented below. properties: + useTopicSchema: + description: |- + When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + Only one of use_topic_schema and use_table_schema can be set. + type: boolean writeMetadata: description: |- When true, writes the Pub/Sub message metadata to @@ -3154,6 +3179,11 @@ spec: May not exceed the subscription's acknowledgement deadline. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". type: string + maxMessages: + description: The maximum messages that can be written to a + Cloud Storage file before a new file is created. Min 1000 + messages. + type: number serviceAccountEmail: description: |- The service account to use to write to Cloud Storage. If not specified, the Pub/Sub @@ -3262,7 +3292,7 @@ spec: retain_acked_messages is true, then this also configures the retention of acknowledged messages, and thus configures how far back in time a subscriptions.seek can be done. Defaults to 7 days. Cannot be more - than 7 days ("604800s") or less than 10 minutes ("600s"). + than 31 days ("2678400s") or less than 10 minutes ("600s"). A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "600.5s". type: string diff --git a/package/crds/pubsub.gcp.upbound.io_topics.yaml b/package/crds/pubsub.gcp.upbound.io_topics.yaml index 68a25f884..7e80adf03 100644 --- a/package/crds/pubsub.gcp.upbound.io_topics.yaml +++ b/package/crds/pubsub.gcp.upbound.io_topics.yaml @@ -911,6 +911,70 @@ spec: description: The Kinesis stream ARN to ingest data from. type: string type: object + cloudStorage: + description: |- + Settings for ingestion from Cloud Storage. + Structure is documented below. + properties: + avroFormat: + description: |- + Configuration for reading Cloud Storage data in Avro binary format. The + bytes of each object will be set to the data field of a Pub/Sub message. + type: object + bucket: + description: |- + Cloud Storage bucket. The bucket name must be without any + prefix like "gs://". See the bucket naming requirements: + https://cloud.google.com/storage/docs/buckets#naming. + type: string + matchGlob: + description: |- + Glob pattern used to match objects that will be ingested. If unset, all + objects will be ingested. See the supported patterns: + https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob + type: string + minimumObjectCreateTime: + description: |- + The timestamp set in RFC3339 text format. If set, only objects with a + larger or equal timestamp will be ingested. Unset by default, meaning + all objects will be ingested. + type: string + pubsubAvroFormat: + description: |- + Configuration for reading Cloud Storage data written via Cloud Storage + subscriptions(See https://cloud.google.com/pubsub/docs/cloudstorage). The + data and attributes fields of the originally exported Pub/Sub message + will be restored when publishing. + type: object + textFormat: + description: |- + Configuration for reading Cloud Storage data in text format. Each line of + text as specified by the delimiter will be set to the data field of a + Pub/Sub message. + Structure is documented below. + properties: + delimiter: + description: |- + The delimiter to use when using the 'text' format. Each line of text as + specified by the delimiter will be set to the 'data' field of a Pub/Sub + message. When unset, '\n' is used. + type: string + type: object + type: object + platformLogsSettings: + description: |- + Settings for Platform Logs regarding ingestion to Pub/Sub. If unset, + no Platform Logs will be generated.' + Structure is documented below. + properties: + severity: + description: |- + The minimum severity level of Platform Logs that will be written. If unspecified, + no Platform Logs will be written. + Default value is SEVERITY_UNSPECIFIED. + Possible values are: SEVERITY_UNSPECIFIED, DISABLED, DEBUG, INFO, WARNING, ERROR. + type: string + type: object type: object kmsKeyName: description: |- @@ -1102,6 +1166,70 @@ spec: description: The Kinesis stream ARN to ingest data from. type: string type: object + cloudStorage: + description: |- + Settings for ingestion from Cloud Storage. + Structure is documented below. + properties: + avroFormat: + description: |- + Configuration for reading Cloud Storage data in Avro binary format. The + bytes of each object will be set to the data field of a Pub/Sub message. + type: object + bucket: + description: |- + Cloud Storage bucket. The bucket name must be without any + prefix like "gs://". See the bucket naming requirements: + https://cloud.google.com/storage/docs/buckets#naming. + type: string + matchGlob: + description: |- + Glob pattern used to match objects that will be ingested. If unset, all + objects will be ingested. See the supported patterns: + https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob + type: string + minimumObjectCreateTime: + description: |- + The timestamp set in RFC3339 text format. If set, only objects with a + larger or equal timestamp will be ingested. Unset by default, meaning + all objects will be ingested. + type: string + pubsubAvroFormat: + description: |- + Configuration for reading Cloud Storage data written via Cloud Storage + subscriptions(See https://cloud.google.com/pubsub/docs/cloudstorage). The + data and attributes fields of the originally exported Pub/Sub message + will be restored when publishing. + type: object + textFormat: + description: |- + Configuration for reading Cloud Storage data in text format. Each line of + text as specified by the delimiter will be set to the data field of a + Pub/Sub message. + Structure is documented below. + properties: + delimiter: + description: |- + The delimiter to use when using the 'text' format. Each line of text as + specified by the delimiter will be set to the 'data' field of a Pub/Sub + message. When unset, '\n' is used. + type: string + type: object + type: object + platformLogsSettings: + description: |- + Settings for Platform Logs regarding ingestion to Pub/Sub. If unset, + no Platform Logs will be generated.' + Structure is documented below. + properties: + severity: + description: |- + The minimum severity level of Platform Logs that will be written. If unspecified, + no Platform Logs will be written. + Default value is SEVERITY_UNSPECIFIED. + Possible values are: SEVERITY_UNSPECIFIED, DISABLED, DEBUG, INFO, WARNING, ERROR. + type: string + type: object type: object kmsKeyName: description: |- @@ -1461,6 +1589,70 @@ spec: description: The Kinesis stream ARN to ingest data from. type: string type: object + cloudStorage: + description: |- + Settings for ingestion from Cloud Storage. + Structure is documented below. + properties: + avroFormat: + description: |- + Configuration for reading Cloud Storage data in Avro binary format. The + bytes of each object will be set to the data field of a Pub/Sub message. + type: object + bucket: + description: |- + Cloud Storage bucket. The bucket name must be without any + prefix like "gs://". See the bucket naming requirements: + https://cloud.google.com/storage/docs/buckets#naming. + type: string + matchGlob: + description: |- + Glob pattern used to match objects that will be ingested. If unset, all + objects will be ingested. See the supported patterns: + https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob + type: string + minimumObjectCreateTime: + description: |- + The timestamp set in RFC3339 text format. If set, only objects with a + larger or equal timestamp will be ingested. Unset by default, meaning + all objects will be ingested. + type: string + pubsubAvroFormat: + description: |- + Configuration for reading Cloud Storage data written via Cloud Storage + subscriptions(See https://cloud.google.com/pubsub/docs/cloudstorage). The + data and attributes fields of the originally exported Pub/Sub message + will be restored when publishing. + type: object + textFormat: + description: |- + Configuration for reading Cloud Storage data in text format. Each line of + text as specified by the delimiter will be set to the data field of a + Pub/Sub message. + Structure is documented below. + properties: + delimiter: + description: |- + The delimiter to use when using the 'text' format. Each line of text as + specified by the delimiter will be set to the 'data' field of a Pub/Sub + message. When unset, '\n' is used. + type: string + type: object + type: object + platformLogsSettings: + description: |- + Settings for Platform Logs regarding ingestion to Pub/Sub. If unset, + no Platform Logs will be generated.' + Structure is documented below. + properties: + severity: + description: |- + The minimum severity level of Platform Logs that will be written. If unspecified, + no Platform Logs will be written. + Default value is SEVERITY_UNSPECIFIED. + Possible values are: SEVERITY_UNSPECIFIED, DISABLED, DEBUG, INFO, WARNING, ERROR. + type: string + type: object type: object kmsKeyName: description: |- diff --git a/package/crds/redis.gcp.upbound.io_clusters.yaml b/package/crds/redis.gcp.upbound.io_clusters.yaml index fcb58b097..e9a772c07 100644 --- a/package/crds/redis.gcp.upbound.io_clusters.yaml +++ b/package/crds/redis.gcp.upbound.io_clusters.yaml @@ -79,12 +79,202 @@ spec: Default value is AUTH_MODE_DISABLED. Possible values are: AUTH_MODE_UNSPECIFIED, AUTH_MODE_IAM_AUTH, AUTH_MODE_DISABLED. type: string + crossClusterReplicationConfig: + description: field to the configuration file to match the latest + value in the state. + properties: + clusterRole: + description: from SECONDARY to PRIMARY. + type: string + primaryCluster: + description: field. + properties: + cluster: + description: 'The full resource path of the secondary + cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id}' + type: string + clusterRef: + description: Reference to a Cluster in redis to populate + cluster. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterSelector: + description: Selector for a Cluster in redis to populate + cluster. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + secondaryClusters: + description: list with the new secondaries. The new secondaries + are the current primary and other secondary clusters(if + any). + items: + properties: + cluster: + description: 'The full resource path of the secondary + cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id}' + type: string + type: object + type: array + type: object + deletionProtectionEnabled: + description: |- + Optional. Indicates if the cluster is deletion protected or not. + If the value if set to true, any delete cluster operation will fail. + Default value is true. + type: boolean + maintenancePolicy: + description: |- + Maintenance policy for a cluster + Structure is documented below. + properties: + weeklyMaintenanceWindow: + description: |- + Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + Structure is documented below. + items: + properties: + day: + description: Required. The day of week that maintenance + updates occur. + type: string + startTime: + description: |- + Required. Start time of the window in UTC time. + Structure is documented below. + properties: + hours: + description: |- + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + type: number + minutes: + description: Minutes of hour of day. Must be from + 0 to 59. + type: number + nanos: + description: Fractions of seconds in nanoseconds. + Must be from 0 to 999,999,999. + type: number + seconds: + description: |- + Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + type: number + type: object + type: object + type: array + type: object nodeType: description: |- The nodeType for the Redis cluster. If not provided, REDIS_HIGHMEM_MEDIUM will be used as default Possible values are: REDIS_SHARED_CORE_NANO, REDIS_HIGHMEM_MEDIUM, REDIS_HIGHMEM_XLARGE, REDIS_STANDARD_SMALL. type: string + persistenceConfig: + description: |- + Persistence config (RDB, AOF) for the cluster. + Structure is documented below. + properties: + aofConfig: + description: |- + AOF configuration. This field will be ignored if mode is not AOF. + Structure is documented below. + properties: + appendFsync: + description: Optional. Available fsync modes. + type: string + type: object + mode: + description: Optional. Controls whether Persistence features + are enabled. If not provided, the existing value will be + used. + type: string + rdbConfig: + description: |- + RDB configuration. This field will be ignored if mode is not RDB. + Structure is documented below. + properties: + rdbSnapshotPeriod: + description: Optional. Available snapshot periods for + scheduling. + type: string + rdbSnapshotStartTime: + description: |- + The time that the first snapshot was/will be attempted, and to which + future snapshots will be aligned. + If not provided, the current time will be used. + type: string + type: object + type: object project: description: |- The ID of the project in which the resource belongs. @@ -245,12 +435,202 @@ spec: Default value is AUTH_MODE_DISABLED. Possible values are: AUTH_MODE_UNSPECIFIED, AUTH_MODE_IAM_AUTH, AUTH_MODE_DISABLED. type: string + crossClusterReplicationConfig: + description: field to the configuration file to match the latest + value in the state. + properties: + clusterRole: + description: from SECONDARY to PRIMARY. + type: string + primaryCluster: + description: field. + properties: + cluster: + description: 'The full resource path of the secondary + cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id}' + type: string + clusterRef: + description: Reference to a Cluster in redis to populate + cluster. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterSelector: + description: Selector for a Cluster in redis to populate + cluster. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + secondaryClusters: + description: list with the new secondaries. The new secondaries + are the current primary and other secondary clusters(if + any). + items: + properties: + cluster: + description: 'The full resource path of the secondary + cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id}' + type: string + type: object + type: array + type: object + deletionProtectionEnabled: + description: |- + Optional. Indicates if the cluster is deletion protected or not. + If the value if set to true, any delete cluster operation will fail. + Default value is true. + type: boolean + maintenancePolicy: + description: |- + Maintenance policy for a cluster + Structure is documented below. + properties: + weeklyMaintenanceWindow: + description: |- + Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + Structure is documented below. + items: + properties: + day: + description: Required. The day of week that maintenance + updates occur. + type: string + startTime: + description: |- + Required. Start time of the window in UTC time. + Structure is documented below. + properties: + hours: + description: |- + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + type: number + minutes: + description: Minutes of hour of day. Must be from + 0 to 59. + type: number + nanos: + description: Fractions of seconds in nanoseconds. + Must be from 0 to 999,999,999. + type: number + seconds: + description: |- + Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + type: number + type: object + type: object + type: array + type: object nodeType: description: |- The nodeType for the Redis cluster. If not provided, REDIS_HIGHMEM_MEDIUM will be used as default Possible values are: REDIS_SHARED_CORE_NANO, REDIS_HIGHMEM_MEDIUM, REDIS_HIGHMEM_XLARGE, REDIS_STANDARD_SMALL. type: string + persistenceConfig: + description: |- + Persistence config (RDB, AOF) for the cluster. + Structure is documented below. + properties: + aofConfig: + description: |- + AOF configuration. This field will be ignored if mode is not AOF. + Structure is documented below. + properties: + appendFsync: + description: Optional. Available fsync modes. + type: string + type: object + mode: + description: Optional. Controls whether Persistence features + are enabled. If not provided, the existing value will be + used. + type: string + rdbConfig: + description: |- + RDB configuration. This field will be ignored if mode is not RDB. + Structure is documented below. + properties: + rdbSnapshotPeriod: + description: Optional. Available snapshot periods for + scheduling. + type: string + rdbSnapshotStartTime: + description: |- + The time that the first snapshot was/will be attempted, and to which + future snapshots will be aligned. + If not provided, the current time will be used. + type: string + type: object + type: object project: description: |- The ID of the project in which the resource belongs. @@ -580,6 +960,96 @@ spec: RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". type: string + crossClusterReplicationConfig: + description: field to the configuration file to match the latest + value in the state. + properties: + clusterRole: + description: from SECONDARY to PRIMARY. + type: string + membership: + description: |- + (Output) + An output only view of all the member clusters participating in cross cluster replication. This field is populated for all the member clusters irrespective of their cluster role. + Structure is documented below. + items: + properties: + primaryCluster: + description: |- + (Output) + Details of the primary cluster that is used as the replication source for all the secondary clusters. + Structure is documented below. + items: + properties: + cluster: + description: 'The full resource path of the secondary + cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id}' + type: string + uid: + description: System assigned, unique identifier + for the cluster. + type: string + type: object + type: array + secondaryClusters: + description: |- + (Output) + List of secondary clusters that are replicating from the primary cluster. + Structure is documented below. + items: + properties: + cluster: + description: 'The full resource path of the secondary + cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id}' + type: string + uid: + description: System assigned, unique identifier + for the cluster. + type: string + type: object + type: array + type: object + type: array + primaryCluster: + description: field. + properties: + cluster: + description: 'The full resource path of the secondary + cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id}' + type: string + uid: + description: System assigned, unique identifier for the + cluster. + type: string + type: object + secondaryClusters: + description: list with the new secondaries. The new secondaries + are the current primary and other secondary clusters(if + any). + items: + properties: + cluster: + description: 'The full resource path of the secondary + cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id}' + type: string + uid: + description: System assigned, unique identifier for + the cluster. + type: string + type: object + type: array + updateTime: + description: |- + (Output) + The last time cross cluster replication config was updated. + type: string + type: object + deletionProtectionEnabled: + description: |- + Optional. Indicates if the cluster is deletion protected or not. + If the value if set to true, any delete cluster operation will fail. + Default value is true. + type: boolean discoveryEndpoints: description: |- Output only. Endpoints created on each given network, @@ -612,12 +1082,144 @@ spec: id: description: an identifier for the resource with format projects/{{project}}/locations/{{region}}/clusters/{{name}} type: string + maintenancePolicy: + description: |- + Maintenance policy for a cluster + Structure is documented below. + properties: + createTime: + description: |- + (Output) + Output only. The time when the policy was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + type: string + updateTime: + description: |- + (Output) + Output only. The time when the policy was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + type: string + weeklyMaintenanceWindow: + description: |- + Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + Structure is documented below. + items: + properties: + day: + description: Required. The day of week that maintenance + updates occur. + type: string + duration: + description: |- + (Output) + Output only. Duration of the maintenance window. + The current window is fixed at 1 hour. + A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". + type: string + startTime: + description: |- + Required. Start time of the window in UTC time. + Structure is documented below. + properties: + hours: + description: |- + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + type: number + minutes: + description: Minutes of hour of day. Must be from + 0 to 59. + type: number + nanos: + description: Fractions of seconds in nanoseconds. + Must be from 0 to 999,999,999. + type: number + seconds: + description: |- + Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + type: number + type: object + type: object + type: array + type: object + maintenanceSchedule: + description: |- + Upcoming maintenance schedule. + Structure is documented below. + items: + properties: + endTime: + description: |- + (Output) + Output only. The end time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + type: string + scheduleDeadlineTime: + description: |- + (Output) + Output only. The deadline that the maintenance schedule start time + can not go beyond, including reschedule. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + type: string + startTime: + description: |- + (Output) + Output only. The start time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + type: string + type: object + type: array nodeType: description: |- The nodeType for the Redis cluster. If not provided, REDIS_HIGHMEM_MEDIUM will be used as default Possible values are: REDIS_SHARED_CORE_NANO, REDIS_HIGHMEM_MEDIUM, REDIS_HIGHMEM_XLARGE, REDIS_STANDARD_SMALL. type: string + persistenceConfig: + description: |- + Persistence config (RDB, AOF) for the cluster. + Structure is documented below. + properties: + aofConfig: + description: |- + AOF configuration. This field will be ignored if mode is not AOF. + Structure is documented below. + properties: + appendFsync: + description: Optional. Available fsync modes. + type: string + type: object + mode: + description: Optional. Controls whether Persistence features + are enabled. If not provided, the existing value will be + used. + type: string + rdbConfig: + description: |- + RDB configuration. This field will be ignored if mode is not RDB. + Structure is documented below. + properties: + rdbSnapshotPeriod: + description: Optional. Available snapshot periods for + scheduling. + type: string + rdbSnapshotStartTime: + description: |- + The time that the first snapshot was/will be attempted, and to which + future snapshots will be aligned. + If not provided, the current time will be used. + type: string + type: object + type: object preciseSizeGb: description: Output only. Redis memory precise size in GB for the entire cluster. @@ -705,7 +1307,7 @@ spec: properties: updateInfo: description: |- - A nested object resource + A nested object resource. Structure is documented below. properties: targetReplicaCount: diff --git a/package/crds/sourcerepo.gcp.upbound.io_repositories.yaml b/package/crds/sourcerepo.gcp.upbound.io_repositories.yaml index f51e6e50c..cdb85869e 100644 --- a/package/crds/sourcerepo.gcp.upbound.io_repositories.yaml +++ b/package/crds/sourcerepo.gcp.upbound.io_repositories.yaml @@ -73,6 +73,10 @@ spec: type: string forProvider: properties: + createIgnoreAlreadyExists: + description: If set to true, skip repository creation if a repository + with the same name already exists. + type: boolean project: description: |- The ID of the project in which the resource belongs. @@ -267,6 +271,10 @@ spec: for example because of an external controller is managing them, like an autoscaler. properties: + createIgnoreAlreadyExists: + description: If set to true, skip repository creation if a repository + with the same name already exists. + type: boolean project: description: |- The ID of the project in which the resource belongs. @@ -620,6 +628,10 @@ spec: properties: atProvider: properties: + createIgnoreAlreadyExists: + description: If set to true, skip repository creation if a repository + with the same name already exists. + type: boolean id: description: an identifier for the resource with format projects/{{project}}/repos/{{name}} type: string diff --git a/package/crds/spanner.gcp.upbound.io_databases.yaml b/package/crds/spanner.gcp.upbound.io_databases.yaml index 93617e126..2a5df173b 100644 --- a/package/crds/spanner.gcp.upbound.io_databases.yaml +++ b/package/crds/spanner.gcp.upbound.io_databases.yaml @@ -631,11 +631,6 @@ spec: items: type: string type: array - deletionProtection: - description: |- - Defaults to true. - When the field is set to false, deleting the database is allowed. - type: boolean enableDropProtection: description: |- Whether drop protection is enabled for this database. Defaults to false. @@ -653,6 +648,13 @@ spec: Fully qualified name of the KMS key to use to encrypt this database. This key must exist in the same location as the Spanner Database. type: string + kmsKeyNames: + description: |- + Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist + in the same locations as the Spanner Database. + items: + type: string + type: array type: object instance: description: The instance to create the database on. @@ -773,11 +775,6 @@ spec: items: type: string type: array - deletionProtection: - description: |- - Defaults to true. - When the field is set to false, deleting the database is allowed. - type: boolean enableDropProtection: description: |- Whether drop protection is enabled for this database. Defaults to false. @@ -795,6 +792,13 @@ spec: Fully qualified name of the KMS key to use to encrypt this database. This key must exist in the same location as the Spanner Database. type: string + kmsKeyNames: + description: |- + Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist + in the same locations as the Spanner Database. + items: + type: string + type: array type: object project: description: |- @@ -1019,6 +1023,13 @@ spec: Fully qualified name of the KMS key to use to encrypt this database. This key must exist in the same location as the Spanner Database. type: string + kmsKeyNames: + description: |- + Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist + in the same locations as the Spanner Database. + items: + type: string + type: array type: object id: description: an identifier for the resource with format {{instance}}/{{name}} diff --git a/package/crds/spanner.gcp.upbound.io_instances.yaml b/package/crds/spanner.gcp.upbound.io_instances.yaml index 318c3aa10..ee9cce40f 100644 --- a/package/crds/spanner.gcp.upbound.io_instances.yaml +++ b/package/crds/spanner.gcp.upbound.io_instances.yaml @@ -750,6 +750,44 @@ spec: the instance. Structure is documented below. properties: + asymmetricAutoscalingOptions: + description: |- + Asymmetric autoscaling options for specific replicas. + Structure is documented below. + items: + properties: + overrides: + description: |- + A nested object resource. + Structure is documented below. + properties: + autoscalingLimits: + description: |- + A nested object resource. + Structure is documented below. + properties: + maxNodes: + description: The maximum number of nodes for + this specific replica. + type: number + minNodes: + description: The minimum number of nodes for + this specific replica. + type: number + type: object + type: object + replicaSelection: + description: |- + A nested object resource. + Structure is documented below. + properties: + location: + description: The location of the replica to apply + asymmetric autoscaling options. + type: string + type: object + type: object + type: array autoscalingLimits: description: |- Defines scale in controls to reduce the risk of response latency @@ -761,9 +799,8 @@ spec: Structure is documented below. properties: maxNodes: - description: |- - Specifies maximum number of nodes allocated to the instance. If set, this number - should be greater than or equal to min_nodes. + description: The maximum number of nodes for this specific + replica. type: number maxProcessingUnits: description: |- @@ -772,9 +809,8 @@ spec: min_processing_units. type: number minNodes: - description: |- - Specifies number of nodes allocated to the instance. If set, this number - should be greater than or equal to 1. + description: The minimum number of nodes for this specific + replica. type: number minProcessingUnits: description: |- @@ -823,6 +859,11 @@ spec: The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. type: string + edition: + description: |- + The edition selected for this instance. Different editions provide different capabilities at different price points. + Possible values are: EDITION_UNSPECIFIED, STANDARD, ENTERPRISE, ENTERPRISE_PLUS. + type: string forceDestroy: description: |- When deleting a spanner instance, this boolean option will delete all backups of this instance. @@ -870,6 +911,44 @@ spec: the instance. Structure is documented below. properties: + asymmetricAutoscalingOptions: + description: |- + Asymmetric autoscaling options for specific replicas. + Structure is documented below. + items: + properties: + overrides: + description: |- + A nested object resource. + Structure is documented below. + properties: + autoscalingLimits: + description: |- + A nested object resource. + Structure is documented below. + properties: + maxNodes: + description: The maximum number of nodes for + this specific replica. + type: number + minNodes: + description: The minimum number of nodes for + this specific replica. + type: number + type: object + type: object + replicaSelection: + description: |- + A nested object resource. + Structure is documented below. + properties: + location: + description: The location of the replica to apply + asymmetric autoscaling options. + type: string + type: object + type: object + type: array autoscalingLimits: description: |- Defines scale in controls to reduce the risk of response latency @@ -881,9 +960,8 @@ spec: Structure is documented below. properties: maxNodes: - description: |- - Specifies maximum number of nodes allocated to the instance. If set, this number - should be greater than or equal to min_nodes. + description: The maximum number of nodes for this specific + replica. type: number maxProcessingUnits: description: |- @@ -892,9 +970,8 @@ spec: min_processing_units. type: number minNodes: - description: |- - Specifies number of nodes allocated to the instance. If set, this number - should be greater than or equal to 1. + description: The minimum number of nodes for this specific + replica. type: number minProcessingUnits: description: |- @@ -943,6 +1020,11 @@ spec: The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. type: string + edition: + description: |- + The edition selected for this instance. Different editions provide different capabilities at different price points. + Possible values are: EDITION_UNSPECIFIED, STANDARD, ENTERPRISE, ENTERPRISE_PLUS. + type: string forceDestroy: description: |- When deleting a spanner instance, this boolean option will delete all backups of this instance. @@ -1158,6 +1240,44 @@ spec: the instance. Structure is documented below. properties: + asymmetricAutoscalingOptions: + description: |- + Asymmetric autoscaling options for specific replicas. + Structure is documented below. + items: + properties: + overrides: + description: |- + A nested object resource. + Structure is documented below. + properties: + autoscalingLimits: + description: |- + A nested object resource. + Structure is documented below. + properties: + maxNodes: + description: The maximum number of nodes for + this specific replica. + type: number + minNodes: + description: The minimum number of nodes for + this specific replica. + type: number + type: object + type: object + replicaSelection: + description: |- + A nested object resource. + Structure is documented below. + properties: + location: + description: The location of the replica to apply + asymmetric autoscaling options. + type: string + type: object + type: object + type: array autoscalingLimits: description: |- Defines scale in controls to reduce the risk of response latency @@ -1169,9 +1289,8 @@ spec: Structure is documented below. properties: maxNodes: - description: |- - Specifies maximum number of nodes allocated to the instance. If set, this number - should be greater than or equal to min_nodes. + description: The maximum number of nodes for this specific + replica. type: number maxProcessingUnits: description: |- @@ -1180,9 +1299,8 @@ spec: min_processing_units. type: number minNodes: - description: |- - Specifies number of nodes allocated to the instance. If set, this number - should be greater than or equal to 1. + description: The minimum number of nodes for this specific + replica. type: number minProcessingUnits: description: |- @@ -1231,6 +1349,11 @@ spec: The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. type: string + edition: + description: |- + The edition selected for this instance. Different editions provide different capabilities at different price points. + Possible values are: EDITION_UNSPECIFIED, STANDARD, ENTERPRISE, ENTERPRISE_PLUS. + type: string effectiveLabels: additionalProperties: type: string diff --git a/package/crds/sql.gcp.upbound.io_databaseinstances.yaml b/package/crds/sql.gcp.upbound.io_databaseinstances.yaml index ba69edee3..9b7622080 100644 --- a/package/crds/sql.gcp.upbound.io_databaseinstances.yaml +++ b/package/crds/sql.gcp.upbound.io_databaseinstances.yaml @@ -2427,18 +2427,14 @@ spec: description: |- The MySQL, PostgreSQL or SQL Server version to use. Supported values include MYSQL_5_6, - MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, - POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, - SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. + MYSQL_5_7, MYSQL_8_0, MYSQL_8_4, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, + POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, POSTGRES_16, POSTGRES_17, + SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. SQLSERVER_2019_STANDARD, SQLSERVER_2019_ENTERPRISE, SQLSERVER_2019_EXPRESS, SQLSERVER_2019_WEB. Database Version Policies includes an up-to-date reference of supported versions. type: string - deletionProtection: - description: When the field is set to false, deleting the instance - is allowed. - type: boolean encryptionKeyName: description: |- The full path to the encryption key used for the CMEK disk encryption. @@ -2476,13 +2472,17 @@ spec: replicaConfiguration: description: |- The configuration for replication. The - configuration is detailed below. Valid only for MySQL instances. + configuration is detailed below. properties: caCertificate: description: |- PEM representation of the trusted CA's x509 certificate. type: string + cascadableReplica: + description: Specifies if the replica is a cascadable replica. + If true, instance must be in different region from primary. + type: boolean clientCertificate: description: |- PEM representation of the replica's x509 @@ -2501,7 +2501,8 @@ spec: dumpFilePath: description: |- Path to a SQL file in GCS from which replica - instances are created. Format is gs://bucket/filename. + instances are created. Format is gs://bucket/filename. Note, if the master + instance is a source representation instance this field must be present. type: string failoverTarget: description: |- @@ -2545,6 +2546,11 @@ spec: value is checked during the SSL handshake. type: boolean type: object + replicaNames: + description: List of replica names. Can be updated. + items: + type: string + type: array restoreBackupContext: description: |- The context needed to restore the database to a backup run. The configuration is detailed below. Adding or modifying this @@ -2664,9 +2670,10 @@ spec: description: The name of server instance collation. type: string connectorEnforcement: - description: Enables the enforcement of Cloud SQL Auth Proxy - or Cloud SQL connectors for all the connections. If enabled, - all the direct connections are rejected. + description: Control the enforcement of Cloud SQL Auth Proxy + or Cloud SQL connectors for all the connections, can be + REQUIRED or NOT_REQUIRED. If enabled, all the direct connections + are rejected. type: string dataCacheConfig: properties: @@ -2920,28 +2927,52 @@ spec: type: string type: array x-kubernetes-list-type: set + pscAutoConnections: + description: A comma-separated list of networks + or a comma-separated list of network-project pairs. + Each project in this list is represented by a + project number (numeric) or by a project ID (alphanumeric). + This allows Private Service Connect connections + to be created automatically for the specified + networks. + items: + properties: + consumerNetwork: + description: '"The consumer network of this + consumer endpoint. This must be a resource + path that includes both the host project + and the network name. For example, projects/project1/global/networks/network1. + The consumer host project of this network + might be different from the consumer service + project."' + type: string + consumerServiceProjectId: + description: The project ID of consumer service + project of this consumer endpoint. + type: string + type: object + type: array pscEnabled: description: Whether PSC connectivity is enabled for this instance. type: boolean type: object type: array - requireSsl: - description: Whether SSL connections over IP are enforced - or not. To change this field, also set the corresponding - value in ssl_mode. It will be fully deprecated in a - future major release. For now, please use ssl_mode with - a compatible require_ssl value instead. - type: boolean serverCaMode: description: Specify how the server certificate's Certificate - Authority is hosted. Supported value is GOOGLE_MANAGED_INTERNAL_CA. + Authority is hosted. Supported values are GOOGLE_MANAGED_INTERNAL_CA + and GOOGLE_MANAGED_CAS_CA. + type: string + serverCaPool: + description: The resource name of the server CA pool for + an instance with CUSTOMER_MANAGED_CAS_CA as the server_ca_mode. type: string sslMode: description: Specify how SSL connection should be enforced - in DB connections. This field provides more SSL enforcement - options compared to require_ssl. To change this field, - also set the correspoding value in require_ssl. + in DB connections. Supported values are ALLOW_UNENCRYPTED_AND_ENCRYPTED, + ENCRYPTED_ONLY, and TRUSTED_CLIENT_CERTIFICATE_REQUIRED + (not supported for SQL Server). See API reference doc + for details. type: string type: object locationPreference: @@ -3093,18 +3124,14 @@ spec: description: |- The MySQL, PostgreSQL or SQL Server version to use. Supported values include MYSQL_5_6, - MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, - POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, - SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. + MYSQL_5_7, MYSQL_8_0, MYSQL_8_4, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, + POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, POSTGRES_16, POSTGRES_17, + SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. SQLSERVER_2019_STANDARD, SQLSERVER_2019_ENTERPRISE, SQLSERVER_2019_EXPRESS, SQLSERVER_2019_WEB. Database Version Policies includes an up-to-date reference of supported versions. type: string - deletionProtection: - description: When the field is set to false, deleting the instance - is allowed. - type: boolean encryptionKeyName: description: |- The full path to the encryption key used for the CMEK disk encryption. @@ -3142,13 +3169,17 @@ spec: replicaConfiguration: description: |- The configuration for replication. The - configuration is detailed below. Valid only for MySQL instances. + configuration is detailed below. properties: caCertificate: description: |- PEM representation of the trusted CA's x509 certificate. type: string + cascadableReplica: + description: Specifies if the replica is a cascadable replica. + If true, instance must be in different region from primary. + type: boolean clientCertificate: description: |- PEM representation of the replica's x509 @@ -3167,7 +3198,8 @@ spec: dumpFilePath: description: |- Path to a SQL file in GCS from which replica - instances are created. Format is gs://bucket/filename. + instances are created. Format is gs://bucket/filename. Note, if the master + instance is a source representation instance this field must be present. type: string failoverTarget: description: |- @@ -3211,6 +3243,11 @@ spec: value is checked during the SSL handshake. type: boolean type: object + replicaNames: + description: List of replica names. Can be updated. + items: + type: string + type: array restoreBackupContext: description: |- The context needed to restore the database to a backup run. The configuration is detailed below. Adding or modifying this @@ -3330,9 +3367,10 @@ spec: description: The name of server instance collation. type: string connectorEnforcement: - description: Enables the enforcement of Cloud SQL Auth Proxy - or Cloud SQL connectors for all the connections. If enabled, - all the direct connections are rejected. + description: Control the enforcement of Cloud SQL Auth Proxy + or Cloud SQL connectors for all the connections, can be + REQUIRED or NOT_REQUIRED. If enabled, all the direct connections + are rejected. type: string dataCacheConfig: properties: @@ -3586,28 +3624,52 @@ spec: type: string type: array x-kubernetes-list-type: set + pscAutoConnections: + description: A comma-separated list of networks + or a comma-separated list of network-project pairs. + Each project in this list is represented by a + project number (numeric) or by a project ID (alphanumeric). + This allows Private Service Connect connections + to be created automatically for the specified + networks. + items: + properties: + consumerNetwork: + description: '"The consumer network of this + consumer endpoint. This must be a resource + path that includes both the host project + and the network name. For example, projects/project1/global/networks/network1. + The consumer host project of this network + might be different from the consumer service + project."' + type: string + consumerServiceProjectId: + description: The project ID of consumer service + project of this consumer endpoint. + type: string + type: object + type: array pscEnabled: description: Whether PSC connectivity is enabled for this instance. type: boolean type: object type: array - requireSsl: - description: Whether SSL connections over IP are enforced - or not. To change this field, also set the corresponding - value in ssl_mode. It will be fully deprecated in a - future major release. For now, please use ssl_mode with - a compatible require_ssl value instead. - type: boolean serverCaMode: description: Specify how the server certificate's Certificate - Authority is hosted. Supported value is GOOGLE_MANAGED_INTERNAL_CA. + Authority is hosted. Supported values are GOOGLE_MANAGED_INTERNAL_CA + and GOOGLE_MANAGED_CAS_CA. + type: string + serverCaPool: + description: The resource name of the server CA pool for + an instance with CUSTOMER_MANAGED_CAS_CA as the server_ca_mode. type: string sslMode: description: Specify how SSL connection should be enforced - in DB connections. This field provides more SSL enforcement - options compared to require_ssl. To change this field, - also set the correspoding value in require_ssl. + in DB connections. Supported values are ALLOW_UNENCRYPTED_AND_ENCRYPTED, + ENCRYPTED_ONLY, and TRUSTED_CLIENT_CERTIFICATE_REQUIRED + (not supported for SQL Server). See API reference doc + for details. type: string type: object locationPreference: @@ -3934,9 +3996,9 @@ spec: description: |- The MySQL, PostgreSQL or SQL Server version to use. Supported values include MYSQL_5_6, - MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, - POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, - SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. + MYSQL_5_7, MYSQL_8_0, MYSQL_8_4, POSTGRES_9_6,POSTGRES_10, POSTGRES_11, + POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, POSTGRES_16, POSTGRES_17, + SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. SQLSERVER_2019_STANDARD, SQLSERVER_2019_ENTERPRISE, SQLSERVER_2019_EXPRESS, SQLSERVER_2019_WEB. Database Version Policies @@ -3947,9 +4009,8 @@ spec: is allowed. type: boolean dnsName: - description: |- - The name of the instance. This is done because after a name is used, it cannot be reused for - up to one week. + description: The DNS name of the instance. See Connect to an instance + using Private Service Connect for more details. type: string encryptionKeyName: description: |- @@ -4025,13 +4086,17 @@ spec: replicaConfiguration: description: |- The configuration for replication. The - configuration is detailed below. Valid only for MySQL instances. + configuration is detailed below. properties: caCertificate: description: |- PEM representation of the trusted CA's x509 certificate. type: string + cascadableReplica: + description: Specifies if the replica is a cascadable replica. + If true, instance must be in different region from primary. + type: boolean clientCertificate: description: |- PEM representation of the replica's x509 @@ -4050,7 +4115,8 @@ spec: dumpFilePath: description: |- Path to a SQL file in GCS from which replica - instances are created. Format is gs://bucket/filename. + instances are created. Format is gs://bucket/filename. Note, if the master + instance is a source representation instance this field must be present. type: string failoverTarget: description: |- @@ -4077,6 +4143,11 @@ spec: value is checked during the SSL handshake. type: boolean type: object + replicaNames: + description: List of replica names. Can be updated. + items: + type: string + type: array restoreBackupContext: description: |- The context needed to restore the database to a backup run. The configuration is detailed below. Adding or modifying this @@ -4186,9 +4257,10 @@ spec: description: The name of server instance collation. type: string connectorEnforcement: - description: Enables the enforcement of Cloud SQL Auth Proxy - or Cloud SQL connectors for all the connections. If enabled, - all the direct connections are rejected. + description: Control the enforcement of Cloud SQL Auth Proxy + or Cloud SQL connectors for all the connections, can be + REQUIRED or NOT_REQUIRED. If enabled, all the direct connections + are rejected. type: string dataCacheConfig: properties: @@ -4366,28 +4438,52 @@ spec: type: string type: array x-kubernetes-list-type: set + pscAutoConnections: + description: A comma-separated list of networks + or a comma-separated list of network-project pairs. + Each project in this list is represented by a + project number (numeric) or by a project ID (alphanumeric). + This allows Private Service Connect connections + to be created automatically for the specified + networks. + items: + properties: + consumerNetwork: + description: '"The consumer network of this + consumer endpoint. This must be a resource + path that includes both the host project + and the network name. For example, projects/project1/global/networks/network1. + The consumer host project of this network + might be different from the consumer service + project."' + type: string + consumerServiceProjectId: + description: The project ID of consumer service + project of this consumer endpoint. + type: string + type: object + type: array pscEnabled: description: Whether PSC connectivity is enabled for this instance. type: boolean type: object type: array - requireSsl: - description: Whether SSL connections over IP are enforced - or not. To change this field, also set the corresponding - value in ssl_mode. It will be fully deprecated in a - future major release. For now, please use ssl_mode with - a compatible require_ssl value instead. - type: boolean serverCaMode: description: Specify how the server certificate's Certificate - Authority is hosted. Supported value is GOOGLE_MANAGED_INTERNAL_CA. + Authority is hosted. Supported values are GOOGLE_MANAGED_INTERNAL_CA + and GOOGLE_MANAGED_CAS_CA. + type: string + serverCaPool: + description: The resource name of the server CA pool for + an instance with CUSTOMER_MANAGED_CAS_CA as the server_ca_mode. type: string sslMode: description: Specify how SSL connection should be enforced - in DB connections. This field provides more SSL enforcement - options compared to require_ssl. To change this field, - also set the correspoding value in require_ssl. + in DB connections. Supported values are ALLOW_UNENCRYPTED_AND_ENCRYPTED, + ENCRYPTED_ONLY, and TRUSTED_CLIENT_CERTIFICATE_REQUIRED + (not supported for SQL Server). See API reference doc + for details. type: string type: object locationPreference: diff --git a/package/crds/sql.gcp.upbound.io_users.yaml b/package/crds/sql.gcp.upbound.io_users.yaml index cfd2b7c16..49279452d 100644 --- a/package/crds/sql.gcp.upbound.io_users.yaml +++ b/package/crds/sql.gcp.upbound.io_users.yaml @@ -890,9 +890,10 @@ spec: description: |- The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type. Flags - include "BUILT_IN", "CLOUD_IAM_USER", and "CLOUD_IAM_SERVICE_ACCOUNT" for both - Postgres and MySQL. - MySQL also includes "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT". + include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", "CLOUD_IAM_GROUP", + "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" for + Postgres + and MySQL. type: string type: object initProvider: @@ -1051,9 +1052,10 @@ spec: description: |- The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type. Flags - include "BUILT_IN", "CLOUD_IAM_USER", and "CLOUD_IAM_SERVICE_ACCOUNT" for both - Postgres and MySQL. - MySQL also includes "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT". + include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", "CLOUD_IAM_GROUP", + "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" for + Postgres + and MySQL. type: string type: object managementPolicies: @@ -1300,9 +1302,10 @@ spec: description: |- The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type. Flags - include "BUILT_IN", "CLOUD_IAM_USER", and "CLOUD_IAM_SERVICE_ACCOUNT" for both - Postgres and MySQL. - MySQL also includes "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT". + include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", "CLOUD_IAM_GROUP", + "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" for + Postgres + and MySQL. type: string type: object conditions: diff --git a/package/crds/storage.gcp.upbound.io_buckets.yaml b/package/crds/storage.gcp.upbound.io_buckets.yaml index 819343ff9..48b77f088 100644 --- a/package/crds/storage.gcp.upbound.io_buckets.yaml +++ b/package/crds/storage.gcp.upbound.io_buckets.yaml @@ -1529,6 +1529,16 @@ spec: When deleting a bucket, this boolean option will delete all contained objects. type: boolean + hierarchicalNamespace: + description: The bucket's hierarchical namespace policy, which + defines the bucket capability to handle folders in logical structure. + Structure is documented below. To use this configuration, uniform_bucket_level_access + must be enabled on bucket. + properties: + enabled: + description: Enables hierarchical namespace for the bucket. + type: boolean + type: object labels: additionalProperties: type: string @@ -1563,9 +1573,9 @@ spec: properties: age: description: Minimum age of an object in days to satisfy - this condition. If not supplied alongside another - condition and without setting no_age to true, a default - age of 0 will be set. + this condition. Note To set 0 value of age, send_age_if_zero + should be set true otherwise 0 value of age field + will be ignored. type: number createdBefore: description: A date in the RFC 3339 format YYYY-MM-DD. @@ -1612,17 +1622,6 @@ spec: items: type: string type: array - noAge: - description: While set true, age value will be omitted - from requests. This prevents a default age of 0 from - being applied, and if you do not have an age value - set, setting this to true is strongly recommended. - When unset and other conditions are set to zero values, - this can result in a rule that applies your action - to all files in the bucket. no_age is deprecated and - will be removed in a future major release. Use send_age_if_zero - instead. - type: boolean noncurrentTimeBefore: description: Relevant only for versioned objects. The date in RFC 3339 (e.g. 2017-06-13) when the object @@ -1880,6 +1879,16 @@ spec: When deleting a bucket, this boolean option will delete all contained objects. type: boolean + hierarchicalNamespace: + description: The bucket's hierarchical namespace policy, which + defines the bucket capability to handle folders in logical structure. + Structure is documented below. To use this configuration, uniform_bucket_level_access + must be enabled on bucket. + properties: + enabled: + description: Enables hierarchical namespace for the bucket. + type: boolean + type: object labels: additionalProperties: type: string @@ -1914,9 +1923,9 @@ spec: properties: age: description: Minimum age of an object in days to satisfy - this condition. If not supplied alongside another - condition and without setting no_age to true, a default - age of 0 will be set. + this condition. Note To set 0 value of age, send_age_if_zero + should be set true otherwise 0 value of age field + will be ignored. type: number createdBefore: description: A date in the RFC 3339 format YYYY-MM-DD. @@ -1963,17 +1972,6 @@ spec: items: type: string type: array - noAge: - description: While set true, age value will be omitted - from requests. This prevents a default age of 0 from - being applied, and if you do not have an age value - set, setting this to true is strongly recommended. - When unset and other conditions are set to zero values, - this can result in a rule that applies your action - to all files in the bucket. no_age is deprecated and - will be removed in a future major release. Use send_age_if_zero - instead. - type: boolean noncurrentTimeBefore: description: Relevant only for versioned objects. The date in RFC 3339 (e.g. 2017-06-13) when the object @@ -2401,6 +2399,16 @@ spec: When deleting a bucket, this boolean option will delete all contained objects. type: boolean + hierarchicalNamespace: + description: The bucket's hierarchical namespace policy, which + defines the bucket capability to handle folders in logical structure. + Structure is documented below. To use this configuration, uniform_bucket_level_access + must be enabled on bucket. + properties: + enabled: + description: Enables hierarchical namespace for the bucket. + type: boolean + type: object id: type: string labels: @@ -2437,9 +2445,9 @@ spec: properties: age: description: Minimum age of an object in days to satisfy - this condition. If not supplied alongside another - condition and without setting no_age to true, a default - age of 0 will be set. + this condition. Note To set 0 value of age, send_age_if_zero + should be set true otherwise 0 value of age field + will be ignored. type: number createdBefore: description: A date in the RFC 3339 format YYYY-MM-DD. @@ -2486,17 +2494,6 @@ spec: items: type: string type: array - noAge: - description: While set true, age value will be omitted - from requests. This prevents a default age of 0 from - being applied, and if you do not have an age value - set, setting this to true is strongly recommended. - When unset and other conditions are set to zero values, - this can result in a rule that applies your action - to all files in the bucket. no_age is deprecated and - will be removed in a future major release. Use send_age_if_zero - instead. - type: boolean noncurrentTimeBefore: description: Relevant only for versioned objects. The date in RFC 3339 (e.g. 2017-06-13) when the object diff --git a/package/crds/tags.gcp.upbound.io_tagkeys.yaml b/package/crds/tags.gcp.upbound.io_tagkeys.yaml index fa8848ade..e5b33d5aa 100644 --- a/package/crds/tags.gcp.upbound.io_tagkeys.yaml +++ b/package/crds/tags.gcp.upbound.io_tagkeys.yaml @@ -98,7 +98,7 @@ spec: shortName: description: |- Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace. - The short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). type: string type: object initProvider: @@ -139,7 +139,7 @@ spec: shortName: description: |- Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace. - The short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). type: string type: object managementPolicies: @@ -362,7 +362,7 @@ spec: shortName: description: |- Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace. - The short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). type: string updateTime: description: |- diff --git a/package/crds/tags.gcp.upbound.io_tagvalues.yaml b/package/crds/tags.gcp.upbound.io_tagvalues.yaml index 21ecf5133..82fb121c9 100644 --- a/package/crds/tags.gcp.upbound.io_tagvalues.yaml +++ b/package/crds/tags.gcp.upbound.io_tagvalues.yaml @@ -158,7 +158,7 @@ spec: shortName: description: |- Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. - The short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). type: string type: object initProvider: @@ -259,7 +259,7 @@ spec: shortName: description: |- Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. - The short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). type: string type: object managementPolicies: @@ -465,7 +465,7 @@ spec: shortName: description: |- Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. - The short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\), and forward slashes (/). type: string updateTime: description: |- diff --git a/package/crds/vpcaccess.gcp.upbound.io_connectors.yaml b/package/crds/vpcaccess.gcp.upbound.io_connectors.yaml index 79909ec9c..4556631b0 100644 --- a/package/crds/vpcaccess.gcp.upbound.io_connectors.yaml +++ b/package/crds/vpcaccess.gcp.upbound.io_connectors.yaml @@ -871,8 +871,7 @@ spec: description: |- Maximum throughput of the connector in Mbps, must be greater than min_throughput. Default is 300. Refers to the expected throughput when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by - min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of - max_throughput is discouraged in favor of max_instances. + min_throughput. Only one of max_throughput and max_instances can be specified. The use of max_throughput is discouraged in favor of max_instances. type: number minInstances: description: |- @@ -882,8 +881,8 @@ spec: minThroughput: description: |- Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type. - Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and - min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances. + Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. + Only one of min_throughput and min_instances can be specified. The use of min_throughput is discouraged in favor of min_instances. type: number network: description: Name or self_link of the VPC network. Required if @@ -1097,8 +1096,7 @@ spec: description: |- Maximum throughput of the connector in Mbps, must be greater than min_throughput. Default is 300. Refers to the expected throughput when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by - min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of - max_throughput is discouraged in favor of max_instances. + min_throughput. Only one of max_throughput and max_instances can be specified. The use of max_throughput is discouraged in favor of max_instances. type: number minInstances: description: |- @@ -1108,8 +1106,8 @@ spec: minThroughput: description: |- Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type. - Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and - min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances. + Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. + Only one of min_throughput and min_instances can be specified. The use of min_throughput is discouraged in favor of min_instances. type: number network: description: Name or self_link of the VPC network. Required if @@ -1484,8 +1482,7 @@ spec: description: |- Maximum throughput of the connector in Mbps, must be greater than min_throughput. Default is 300. Refers to the expected throughput when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by - min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of - max_throughput is discouraged in favor of max_instances. + min_throughput. Only one of max_throughput and max_instances can be specified. The use of max_throughput is discouraged in favor of max_instances. type: number minInstances: description: |- @@ -1495,8 +1492,8 @@ spec: minThroughput: description: |- Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type. - Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and - min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances. + Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. + Only one of min_throughput and min_instances can be specified. The use of min_throughput is discouraged in favor of min_instances. type: number network: description: Name or self_link of the VPC network. Required if diff --git a/package/crds/workflows.gcp.upbound.io_workflows.yaml b/package/crds/workflows.gcp.upbound.io_workflows.yaml index 42142cd73..ea39753ee 100644 --- a/package/crds/workflows.gcp.upbound.io_workflows.yaml +++ b/package/crds/workflows.gcp.upbound.io_workflows.yaml @@ -551,6 +551,11 @@ spec: The KMS key used to encrypt workflow and execution data. Format: projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} type: string + deletionProtection: + description: |- + Defaults to true. + When the field is set to false, deleting the workflow is allowed. + type: boolean description: description: Description of the workflow provided by the user. Must be at most 1000 unicode characters long. diff --git a/provider-gcp-storage.yaml b/provider-gcp-storage.yaml new file mode 100644 index 000000000..df344c355 --- /dev/null +++ b/provider-gcp-storage.yaml @@ -0,0 +1,34 @@ +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"pkg.crossplane.io/v1","kind":"Provider","metadata":{"annotations":{},"name":"provider-gcp-storage"},"spec":{"package":"xpkg.upbound.io/upbound/provider-gcp-storage:v0.41.0"}} + creationTimestamp: "2024-09-12T09:04:05Z" + generation: 1 + name: provider-gcp-storage + resourceVersion: "7130" + uid: 401591d8-e4de-4be6-a9d8-578fe8b75dfe +spec: + ignoreCrossplaneConstraints: false + package: xpkg.upbound.io/upbound/provider-gcp-storage:v0.41.0 + packagePullPolicy: IfNotPresent + revisionActivationPolicy: Automatic + revisionHistoryLimit: 1 + runtimeConfigRef: + apiVersion: pkg.crossplane.io/v1beta1 + kind: DeploymentRuntimeConfig + name: default + skipDependencyResolution: false +status: + conditions: + - lastTransitionTime: "2024-09-12T09:04:22Z" + reason: HealthyPackageRevision + status: "True" + type: Healthy + - lastTransitionTime: "2024-09-12T09:04:05Z" + reason: ActivePackageRevision + status: "True" + type: Installed + currentIdentifier: xpkg.upbound.io/upbound/provider-gcp-storage:v0.41.0 + currentRevision: provider-gcp-storage-70a994bdf770