Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 14 additions & 12 deletions providers/gcp/resources/cloudrun.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,7 @@ func (g *mqlGcpProjectCloudRunService) operations() ([]any, error) {
}
if err != nil {
log.Error().Err(err).Send()
break
}
mqlOp, err := CreateResource(g.MqlRuntime, "gcp.project.cloudRunService.operation", map[string]*llx.RawData{
"projectId": llx.StringData(projectId),
Expand All @@ -286,6 +287,7 @@ func (g *mqlGcpProjectCloudRunService) operations() ([]any, error) {
})
if err != nil {
log.Error().Err(err).Send()
continue
}
mux.Lock()
operations = append(operations, mqlOp)
Expand Down Expand Up @@ -386,7 +388,7 @@ func (g *mqlGcpProjectCloudRunService) services() ([]any, error) {
"annotations": llx.MapData(convert.MapToInterfaceMap(s.Template.Annotations), types.String),
"scaling": llx.DictData(scalingCfg),
"vpcAccess": llx.DictData(vpcCfg),
"timeout": llx.TimeData(llx.DurationToTime((s.Template.Timeout.Seconds))),
"timeout": llx.TimeDataPtr(durationSecondsToTimePtr(s.Template.Timeout)),
"serviceAccountEmail": llx.StringData(s.Template.ServiceAccount),
"containers": llx.ArrayData(mqlContainers, "gcp.project.cloudRunService.container"),
"volumes": llx.ArrayData(mqlVolumes(s.Template.Volumes), types.Dict),
Expand Down Expand Up @@ -443,10 +445,10 @@ func (g *mqlGcpProjectCloudRunService) services() ([]any, error) {
"generation": llx.IntData(s.Generation),
"labels": llx.MapData(convert.MapToInterfaceMap(s.Labels), types.String),
"annotations": llx.MapData(convert.MapToInterfaceMap(s.Annotations), types.String),
"created": llx.TimeData(s.CreateTime.AsTime()),
"updated": llx.TimeData(s.UpdateTime.AsTime()),
"deleted": llx.TimeData(s.DeleteTime.AsTime()),
"expired": llx.TimeData(s.ExpireTime.AsTime()),
"created": llx.TimeDataPtr(timestampAsTimePtr(s.CreateTime)),
"updated": llx.TimeDataPtr(timestampAsTimePtr(s.UpdateTime)),
"deleted": llx.TimeDataPtr(timestampAsTimePtr(s.DeleteTime)),
"expired": llx.TimeDataPtr(timestampAsTimePtr(s.ExpireTime)),
"creator": llx.StringData(s.Creator),
"lastModifier": llx.StringData(s.LastModifier),
"ingress": llx.StringData(s.Ingress.String()),
Expand Down Expand Up @@ -586,7 +588,7 @@ func (g *mqlGcpProjectCloudRunService) jobs() ([]any, error) {
"id": llx.StringData(fmt.Sprintf("%s/template", templateId)),
"projectId": llx.StringData(projectId),
"vpcAccess": llx.DictData(vpcAccess),
"timeout": llx.TimeData(llx.DurationToTime((j.Template.Template.Timeout.Seconds))),
"timeout": llx.TimeDataPtr(durationSecondsToTimePtr(j.Template.Template.Timeout)),
"serviceAccountEmail": llx.StringData(j.Template.Template.ServiceAccount),
"containers": llx.ArrayData(mqlContainers, types.Resource("gcp.project.cloudRunService.container")),
"volumes": llx.ArrayData(mqlVolumes(j.Template.Template.Volumes), types.Dict),
Expand Down Expand Up @@ -638,10 +640,10 @@ func (g *mqlGcpProjectCloudRunService) jobs() ([]any, error) {
"generation": llx.IntData(j.Generation),
"labels": llx.MapData(convert.MapToInterfaceMap(j.Labels), types.String),
"annotations": llx.MapData(convert.MapToInterfaceMap(j.Annotations), types.String),
"created": llx.TimeData(j.CreateTime.AsTime()),
"updated": llx.TimeData(j.UpdateTime.AsTime()),
"deleted": llx.TimeData(j.DeleteTime.AsTime()),
"expired": llx.TimeData(j.ExpireTime.AsTime()),
"created": llx.TimeDataPtr(timestampAsTimePtr(j.CreateTime)),
"updated": llx.TimeDataPtr(timestampAsTimePtr(j.UpdateTime)),
"deleted": llx.TimeDataPtr(timestampAsTimePtr(j.DeleteTime)),
"expired": llx.TimeDataPtr(timestampAsTimePtr(j.ExpireTime)),
"creator": llx.StringData(j.Creator),
"lastModifier": llx.StringData(j.LastModifier),
"client": llx.StringData(j.Client),
Expand Down Expand Up @@ -717,7 +719,7 @@ func mqlCondition(runtime *plugin.Runtime, c *runpb.Condition, parentId, suffix
"type": llx.StringData(c.Type),
"state": llx.StringData(c.String()),
"message": llx.StringData(c.Message),
"lastTransitionTime": llx.TimeData(c.LastTransitionTime.AsTime()),
"lastTransitionTime": llx.TimeDataPtr(timestampAsTimePtr(c.LastTransitionTime)),
"severity": llx.StringData(c.Severity.String()),
})
}
Expand All @@ -743,7 +745,7 @@ func mqlContainers(runtime *plugin.Runtime, containers []*runpb.Container, templ
for _, e := range c.Env {
valueSource := e.GetValueSource()
var mqlValueSource map[string]any
if valueSource != nil {
if valueSource != nil && valueSource.SecretKeyRef != nil {
mqlValueSource = map[string]any{
"secretKeyRef": map[string]any{
"secret": valueSource.SecretKeyRef.Secret,
Expand Down
21 changes: 21 additions & 0 deletions providers/gcp/resources/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (

"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
"google.golang.org/protobuf/types/known/wrapperspb"
)
Expand Down Expand Up @@ -79,6 +80,14 @@ func timestampAsTimePtr(t *timestamppb.Timestamp) *time.Time {
return &tm
}

func durationSecondsToTimePtr(d *durationpb.Duration) *time.Time {
if d == nil {
return nil
}
t := llx.DurationToTime(d.Seconds)
return &t
}

func boolValueToPtr(b *wrapperspb.BoolValue) *bool {
if b == nil {
return nil
Expand Down Expand Up @@ -117,6 +126,9 @@ func getAssetIdentifier(runtime *plugin.Runtime) *assetIdentifier {
if !ok {
return nil
}
if conn.Asset() == nil || len(conn.Asset().PlatformIds) == 0 {
return nil
}
id := conn.Asset().PlatformIds[0]

if strings.HasPrefix(id, "//platformid.api.mondoo.app/runtime/gcp/") {
Expand Down Expand Up @@ -184,6 +196,9 @@ func getNetworkByUrl(networkUrl string, runtime *plugin.Runtime) (*mqlGcpProject
params := strings.TrimPrefix(networkUrl, "https://www.googleapis.com/compute/v1/")
params = strings.TrimPrefix(params, "https://compute.googleapis.com/compute/v1/")
parts := strings.Split(params, "/")
if len(parts) < 5 {
return nil, errors.New("malformed network URL: " + networkUrl)
}
resId := resourceId{Project: parts[1], Region: parts[2], Name: parts[4]}

res, err := CreateResource(runtime, "gcp.project.computeService.network", map[string]*llx.RawData{
Expand All @@ -207,6 +222,9 @@ func getSubnetworkByUrl(subnetUrl string, runtime *plugin.Runtime) (*mqlGcpProje
params := strings.TrimPrefix(subnetUrl, "https://www.googleapis.com/compute/v1/")
params = strings.TrimPrefix(params, "https://compute.googleapis.com/compute/v1/")
parts := strings.Split(params, "/")
if len(parts) < 6 {
return nil, errors.New("malformed subnetwork URL: " + subnetUrl)
}
resId := resourceId{Project: parts[1], Region: parts[3], Name: parts[5]}
// regionUrl is the full URL up to and including the region segment
regionUrl := "https://www.googleapis.com/compute/v1/projects/" + resId.Project + "/regions/" + resId.Region
Expand All @@ -233,5 +251,8 @@ func getDiskIdByUrl(diskUrl string) (*resourceId, error) {
params := strings.TrimPrefix(diskUrl, "https://www.googleapis.com/compute/v1/")
params = strings.TrimPrefix(params, "https://compute.googleapis.com/compute/v1/")
parts := strings.Split(params, "/")
if len(parts) < 6 {
return nil, errors.New("malformed disk URL: " + diskUrl)
}
return &resourceId{Project: parts[1], Region: parts[3], Name: parts[5]}, nil
}
107 changes: 65 additions & 42 deletions providers/gcp/resources/dataproc.go
Original file line number Diff line number Diff line change
Expand Up @@ -525,10 +525,16 @@ func (g *mqlGcpProjectDataprocService) clusters() ([]any, error) {
SparkHistoryServerConfig mqlSparkHistoryServerConfig `json:"sparkHistoryServerConfig"`
}

mqlAuxServices, err := convert.JsonToDict(mqlAuxiliaryServices{
MetastoreConfig: mqlMetastoreConfig{DataprocMetastoreService: c.VirtualClusterConfig.AuxiliaryServicesConfig.MetastoreConfig.DataprocMetastoreService},
SparkHistoryServerConfig: mqlSparkHistoryServerConfig{DataprocCluster: c.VirtualClusterConfig.AuxiliaryServicesConfig.SparkHistoryServerConfig.DataprocCluster},
})
var auxSvc mqlAuxiliaryServices
if c.VirtualClusterConfig.AuxiliaryServicesConfig != nil {
if c.VirtualClusterConfig.AuxiliaryServicesConfig.MetastoreConfig != nil {
auxSvc.MetastoreConfig = mqlMetastoreConfig{DataprocMetastoreService: c.VirtualClusterConfig.AuxiliaryServicesConfig.MetastoreConfig.DataprocMetastoreService}
}
if c.VirtualClusterConfig.AuxiliaryServicesConfig.SparkHistoryServerConfig != nil {
auxSvc.SparkHistoryServerConfig = mqlSparkHistoryServerConfig{DataprocCluster: c.VirtualClusterConfig.AuxiliaryServicesConfig.SparkHistoryServerConfig.DataprocCluster}
}
}
mqlAuxServices, err := convert.JsonToDict(auxSvc)
if err != nil {
log.Error().Err(err).Send()
}
Expand All @@ -552,20 +558,32 @@ func (g *mqlGcpProjectDataprocService) clusters() ([]any, error) {
KubernetesNamespace string `json:"kubernetesNamespace"`
}

npTargets := make([]mqlGkeNodePoolTarget, 0, len(c.VirtualClusterConfig.KubernetesClusterConfig.GkeClusterConfig.NodePoolTarget))
for _, npt := range c.VirtualClusterConfig.KubernetesClusterConfig.GkeClusterConfig.NodePoolTarget {
npTargets = append(npTargets, nodePoolTargetToMql(npt))
var k8sClusterCfg mqlKubernetesClusterConfig
if k8sCfg := c.VirtualClusterConfig.KubernetesClusterConfig; k8sCfg != nil {
if gkeCfg := k8sCfg.GkeClusterConfig; gkeCfg != nil {
npTargets := make([]mqlGkeNodePoolTarget, 0, len(gkeCfg.NodePoolTarget))
for _, npt := range gkeCfg.NodePoolTarget {
npTargets = append(npTargets, nodePoolTargetToMql(npt))
}
var nsTarget mqlNamespacedGkeDeploymentTarget
if gkeCfg.NamespacedGkeDeploymentTarget != nil {
nsTarget = mqlNamespacedGkeDeploymentTarget{
ClusterNamespace: gkeCfg.NamespacedGkeDeploymentTarget.ClusterNamespace,
TargetGkeCluster: gkeCfg.NamespacedGkeDeploymentTarget.TargetGkeCluster,
}
}
k8sClusterCfg.GkeClusterConfig = mqlGkeClusterConfig{
TargetCluster: gkeCfg.GkeClusterTarget,
NamespacedGkeDeploymentTarget: nsTarget,
NodePoolTarget: npTargets,
}
}
k8sClusterCfg.KubernetesNamespace = k8sCfg.KubernetesNamespace
}

mqlK8sClusterCfg, err := convert.JsonToDict(mqlKubernetesClusterConfig{
GkeClusterConfig: mqlGkeClusterConfig{
TargetCluster: c.VirtualClusterConfig.KubernetesClusterConfig.GkeClusterConfig.GkeClusterTarget,
NamespacedGkeDeploymentTarget: mqlNamespacedGkeDeploymentTarget{
ClusterNamespace: c.VirtualClusterConfig.KubernetesClusterConfig.GkeClusterConfig.NamespacedGkeDeploymentTarget.ClusterNamespace,
TargetGkeCluster: c.VirtualClusterConfig.KubernetesClusterConfig.GkeClusterConfig.NamespacedGkeDeploymentTarget.TargetGkeCluster,
},
NodePoolTarget: npTargets,
},
GkeClusterConfig: k8sClusterCfg.GkeClusterConfig,
KubernetesNamespace: k8sClusterCfg.KubernetesNamespace,
})
if err != nil {
log.Error().Err(err).Send()
Expand Down Expand Up @@ -736,35 +754,40 @@ func (g *mqlGcpProjectDataprocServiceClusterConfigInstanceDiskConfig) id() (stri
}

func nodePoolTargetToMql(npt *dataproc.GkeNodePoolTarget) mqlGkeNodePoolTarget {
accs := make([]mqlGkeNodePoolAccelerator, 0, len(npt.NodePoolConfig.Config.Accelerators))
for _, acc := range npt.NodePoolConfig.Config.Accelerators {
accs = append(accs, mqlGkeNodePoolAccelerator{
AcceleratorCount: acc.AcceleratorCount,
AcceleratorType: acc.AcceleratorType,
GpuPartitionSize: acc.GpuPartitionSize,
})
}

return mqlGkeNodePoolTarget{
result := mqlGkeNodePoolTarget{
NodePool: npt.NodePool,
NodePoolConfig: mqlGkeNodePoolConfig{
Autoscaling: mqlGkeNodePoolAutoscalingConfig{
MaxNodeCount: npt.NodePoolConfig.Autoscaling.MaxNodeCount,
MinNodeCount: npt.NodePoolConfig.Autoscaling.MinNodeCount,
},
Config: mqlGkeNodeConfig{
Accelerators: accs,
BootDiskKmsKey: npt.NodePoolConfig.Config.BootDiskKmsKey,
LocalSsdCount: npt.NodePoolConfig.Config.LocalSsdCount,
MachineType: npt.NodePoolConfig.Config.MachineType,
MinCpuPlatform: npt.NodePoolConfig.Config.MinCpuPlatform,
Preemptible: npt.NodePoolConfig.Config.Preemptible,
Spot: npt.NodePoolConfig.Config.Spot,
},
Locations: npt.NodePoolConfig.Locations,
},
Roles: npt.Roles,
Roles: npt.Roles,
}
if npt.NodePoolConfig == nil {
return result
}
result.NodePoolConfig.Locations = npt.NodePoolConfig.Locations
if npt.NodePoolConfig.Autoscaling != nil {
result.NodePoolConfig.Autoscaling = mqlGkeNodePoolAutoscalingConfig{
MaxNodeCount: npt.NodePoolConfig.Autoscaling.MaxNodeCount,
MinNodeCount: npt.NodePoolConfig.Autoscaling.MinNodeCount,
}
}
if npt.NodePoolConfig.Config != nil {
accs := make([]mqlGkeNodePoolAccelerator, 0, len(npt.NodePoolConfig.Config.Accelerators))
for _, acc := range npt.NodePoolConfig.Config.Accelerators {
accs = append(accs, mqlGkeNodePoolAccelerator{
AcceleratorCount: acc.AcceleratorCount,
AcceleratorType: acc.AcceleratorType,
GpuPartitionSize: acc.GpuPartitionSize,
})
}
result.NodePoolConfig.Config = mqlGkeNodeConfig{
Accelerators: accs,
BootDiskKmsKey: npt.NodePoolConfig.Config.BootDiskKmsKey,
LocalSsdCount: npt.NodePoolConfig.Config.LocalSsdCount,
MachineType: npt.NodePoolConfig.Config.MachineType,
MinCpuPlatform: npt.NodePoolConfig.Config.MinCpuPlatform,
Preemptible: npt.NodePoolConfig.Config.Preemptible,
Spot: npt.NodePoolConfig.Config.Spot,
}
}
return result
}

func instanceGroupConfigToMql(runtime *plugin.Runtime, igc *dataproc.InstanceGroupConfig, id string) (plugin.Resource, error) {
Expand Down
6 changes: 3 additions & 3 deletions providers/gcp/resources/discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -794,7 +794,7 @@ func discoverProject(conn *connection.GcpConnection, gcpProject *mqlGcpProject,
return nil, storage.Error
}
buckets := storage.Data.GetBuckets()
if buckets == nil {
if buckets.Error != nil {
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🟡 warning — The original code checked if buckets == nil to guard against a nil buckets value. The new code if buckets.Error != nil will panic if buckets itself is nil (i.e., if GetBuckets() returns a nil plugin result). The original nil check was likely intentional — consider keeping a nil guard:

if buckets == nil || buckets.Error != nil {
    if buckets != nil {
        return nil, buckets.Error
    }
    return nil, nil
}

Or verify that GetBuckets() is guaranteed to never return a nil result struct.

return nil, buckets.Error
}
for i := range buckets.Data {
Expand Down Expand Up @@ -1205,12 +1205,12 @@ func (a *GcrImages) Name() string {
func (a *GcrImages) ListRepository(repository string, recursive bool) ([]*inventory.Asset, error) {
repo, err := name.NewRepository(repository)
if err != nil {
log.Fatal().Err(err).Str("repository", repository).Msg("could not create repository")
return nil, fmt.Errorf("could not create repository %s: %w", repository, err)
}

auth, err := google.Keychain.Resolve(repo.Registry)
if err != nil {
log.Fatal().Err(err).Str("repository", repository).Msg("failed to get auth for repository")
return nil, fmt.Errorf("failed to get auth for repository %s: %w", repository, err)
}

imgs := []*inventory.Asset{}
Expand Down
2 changes: 1 addition & 1 deletion providers/gcp/resources/filestore.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func (g *mqlGcpProjectFilestoreService) instances() ([]any, error) {
"description": llx.StringData(instance.Description),
"tier": llx.StringData(instance.Tier.String()),
"state": llx.StringData(instance.State.String()),
"createTime": llx.TimeData(instance.CreateTime.AsTime()),
"createTime": llx.TimeDataPtr(timestampAsTimePtr(instance.CreateTime)),
"labels": llx.MapData(convert.MapToInterfaceMap(instance.Labels), types.String),
"fileShares": llx.ArrayData(fileShares, types.Resource("gcp.project.filestoreService.instance.fileShare")),
"networks": llx.ArrayData(networks, types.Resource("gcp.project.filestoreService.instance.network")),
Expand Down
6 changes: 6 additions & 0 deletions providers/gcp/resources/gke.go
Original file line number Diff line number Diff line change
Expand Up @@ -1028,6 +1028,9 @@ func (g *mqlGcpProjectGkeServiceClusterNetworkConfig) network() (*mqlGcpProjectC

// Format is projects/project-1/global/networks/net-1
params := strings.Split(networkPath, "/")
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🔵 suggestion — The bounds check len(params) < 2 only protects params[1], but params[len(params)-1] a few lines above would also need len(params) >= 1. More importantly, the path format is projects/project-1/global/networks/net-1 (5 segments), so a stricter check like len(params) < 5 would better validate the expected structure, consistent with the checks added in common.go.

if len(params) < 2 {
return nil, errors.New("malformed network path: " + networkPath)
}
res, err := CreateResource(g.MqlRuntime, "gcp.project.computeService.network", map[string]*llx.RawData{
"name": llx.StringData(params[len(params)-1]),
"projectId": llx.StringData(params[1]),
Expand All @@ -1046,6 +1049,9 @@ func (g *mqlGcpProjectGkeServiceClusterNetworkConfig) subnetwork() (*mqlGcpProje

// Format is projects/project-1/regions/us-central1/subnetworks/subnet-1
params := strings.Split(subnetPath, "/")
if len(params) < 6 {
return nil, errors.New("malformed subnetwork path: " + subnetPath)
}
regionUrl := strings.SplitN(subnetPath, "/subnetworks", 2)
res, err := NewResource(g.MqlRuntime, "gcp.project.computeService.subnetwork", map[string]*llx.RawData{
"name": llx.StringData(params[len(params)-1]),
Expand Down
22 changes: 17 additions & 5 deletions providers/gcp/resources/kms.go
Original file line number Diff line number Diff line change
Expand Up @@ -338,15 +338,18 @@ func (g *mqlGcpProjectKmsServiceKeyring) cryptokeys() ([]any, error) {
"name": llx.StringData(parseResourceName(k.Name)),
"primary": llx.ResourceData(mqlPrimary, "gcp.project.kmsService.keyring.cryptokey.version"),
"purpose": llx.StringData(k.Purpose.String()),
"created": llx.TimeData(k.CreateTime.AsTime()),
"nextRotation": llx.TimeData(k.NextRotationTime.AsTime()),
"created": llx.TimeDataPtr(timestampAsTimePtr(k.CreateTime)),
"nextRotation": llx.TimeDataPtr(timestampAsTimePtr(k.NextRotationTime)),
"rotationPeriod": llx.TimeDataPtr(mqlRotationPeriod),
"versionTemplate": llx.DictData(versionTemplate),
"labels": llx.MapData(convert.MapToInterfaceMap(k.Labels), types.String),
"importOnly": llx.BoolData(k.ImportOnly),
"destroyScheduledDuration": llx.TimeDataPtr(mqlDestroyScheduledDuration),
"cryptoKeyBackend": llx.StringData(k.CryptoKeyBackend),
})
if err != nil {
return nil, err
}

keys = append(keys, mqlKey)
}
Expand Down Expand Up @@ -390,6 +393,9 @@ func (g *mqlGcpProjectKmsServiceKeyringCryptokey) versions() ([]any, error) {
}

mqlVersion, err := cryptoKeyVersionToMql(g.MqlRuntime, v)
if err != nil {
return nil, err
}
versions = append(versions, mqlVersion)
}
return versions, nil
Expand Down Expand Up @@ -438,11 +444,17 @@ func (g *mqlGcpProjectKmsServiceKeyringCryptokey) iamPolicy() ([]any, error) {
func cryptoKeyVersionToMql(runtime *plugin.Runtime, v *kmspb.CryptoKeyVersion) (plugin.Resource, error) {
var mqlAttestation plugin.Resource
if v.Attestation != nil {
var caviumCerts, googleCardCerts, googlePartitionCerts []any
if v.Attestation.CertChains != nil {
caviumCerts = convert.SliceAnyToInterface(v.Attestation.CertChains.CaviumCerts)
googleCardCerts = convert.SliceAnyToInterface(v.Attestation.CertChains.GoogleCardCerts)
googlePartitionCerts = convert.SliceAnyToInterface(v.Attestation.CertChains.GooglePartitionCerts)
}
mqlAttestationCertChains, err := CreateResource(runtime, "gcp.project.kmsService.keyring.cryptokey.version.attestation.certificatechains", map[string]*llx.RawData{
"cryptoKeyVersionName": llx.StringData(v.Name),
"caviumCerts": llx.ArrayData(convert.SliceAnyToInterface(v.Attestation.CertChains.CaviumCerts), types.String),
"googleCardCerts": llx.ArrayData(convert.SliceAnyToInterface(v.Attestation.CertChains.GoogleCardCerts), types.String),
"googlePartitionCerts": llx.ArrayData(convert.SliceAnyToInterface(v.Attestation.CertChains.GooglePartitionCerts), types.String),
"caviumCerts": llx.ArrayData(caviumCerts, types.String),
"googleCardCerts": llx.ArrayData(googleCardCerts, types.String),
"googlePartitionCerts": llx.ArrayData(googlePartitionCerts, types.String),
})
if err != nil {
return nil, err
Expand Down
Loading
Loading