diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index a264b568c..dd5d79afe 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -85,7 +85,6 @@ rules: resources: - jobs verbs: - - delete - deletecollection - get - list diff --git a/controllers/container_image/deployment_handler.go b/controllers/container_image/deployment_handler.go index 70a21041a..f9ec41373 100644 --- a/controllers/container_image/deployment_handler.go +++ b/controllers/container_image/deployment_handler.go @@ -5,7 +5,6 @@ package container_image import ( "context" - "reflect" "go.mondoo.com/mondoo-operator/api/v1alpha2" "go.mondoo.com/mondoo-operator/pkg/utils/k8s" @@ -16,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/labels" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) var logger = ctrl.Log.WithName("k8s-images-scanning") @@ -71,20 +71,10 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { return err } - updated, err := n.syncConfigMap(ctx, clusterUid) - if err != nil { + if err := n.syncConfigMap(ctx, clusterUid); err != nil { return err } - // TODO: for CronJob we might consider triggering the CronJob now after the ConfigMap has been changed. It will make sense from the - // user perspective to want to run the jobs after you have updated the config. - if updated { - logger.Info( - "Inventory ConfigMap was just updated. The job will use the new config during the next scheduled run.", - "namespace", n.Mondoo.Namespace, - "name", CronJobName(n.Mondoo.Name)) - } - // Reconcile private registry secrets (merges multiple secrets if needed) privateRegistrySecretName, err := k8s.ReconcilePrivateRegistriesSecret(ctx, n.KubeClient, n.Mondoo) if err != nil { @@ -92,35 +82,20 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { return err } - existing := &batchv1.CronJob{} desired := CronJob(mondooClientImage, integrationMrn, clusterUid, privateRegistrySecretName, n.Mondoo, *n.MondooOperatorConfig) - if err := ctrl.SetControllerReference(n.Mondoo, desired, n.KubeClient.Scheme()); err != nil { - logger.Error(err, "Failed to set ControllerReference", "namespace", desired.Namespace, "name", desired.Name) - return err - } - - created, err := k8s.CreateIfNotExist(ctx, n.KubeClient, existing, desired) + obj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} + op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, obj, n.Mondoo, logger, func() error { + k8s.UpdateCronJobFields(obj, desired) + return nil + }) if err != nil { - logger.Error(err, "Failed to create CronJob", "namespace", desired.Namespace, "name", desired.Name) return err } - if created { - logger.Info("Created CronJob", "namespace", desired.Namespace, "name", desired.Name) - } else if !k8s.AreCronJobsEqual(*existing, *desired) { - existing.Spec.JobTemplate = desired.Spec.JobTemplate - existing.Spec.Schedule = desired.Spec.Schedule - existing.Spec.ConcurrencyPolicy = desired.Spec.ConcurrencyPolicy - existing.SetOwnerReferences(desired.GetOwnerReferences()) - - // Remove completed/failed jobs because they won't be updated when the cronjob changes. - // Active jobs are preserved to avoid killing in-progress scans. + // When a CronJob is updated, remove completed Jobs so they don't linger with stale config + if op == controllerutil.OperationResultUpdated { if err := k8s.DeleteCompletedJobs(ctx, n.KubeClient, n.Mondoo.Namespace, CronJobLabels(*n.Mondoo), logger); err != nil { - return err - } - - if err := n.KubeClient.Update(ctx, existing); err != nil { - logger.Error(err, "Failed to update CronJob", "namespace", existing.Namespace, "name", existing.Name) + logger.Error(err, "Failed to clean up completed Jobs after CronJob update") return err } } @@ -154,53 +129,29 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { return nil } -// syncConfigMap syncs the inventory ConfigMap. Returns a boolean indicating whether the ConfigMap has been updated. It -// can only be "true", if the ConfigMap existed before this reconcile cycle and the inventory was different from the -// desired state. -func (n *DeploymentHandler) syncConfigMap(ctx context.Context, clusterUid string) (bool, error) { - existing := &corev1.ConfigMap{} - +func (n *DeploymentHandler) syncConfigMap(ctx context.Context, clusterUid string) error { integrationMrn, err := k8s.TryGetIntegrationMrnForAuditConfig(ctx, n.KubeClient, *n.Mondoo) if err != nil { logger.Error(err, "failed to retrieve IntegrationMRN") - return false, err + return err } desired, err := ConfigMap(integrationMrn, clusterUid, *n.Mondoo, *n.MondooOperatorConfig) if err != nil { logger.Error(err, "failed to generate desired ConfigMap with inventory") - return false, err - } - - if err := ctrl.SetControllerReference(n.Mondoo, desired, n.KubeClient.Scheme()); err != nil { - logger.Error(err, "Failed to set ControllerReference", "namespace", desired.Namespace, "name", desired.Name) - return false, err - } - - created, err := k8s.CreateIfNotExist(ctx, n.KubeClient, existing, desired) - if err != nil { - logger.Error(err, "Failed to create inventory ConfigMap", "namespace", desired.Namespace, "name", desired.Name) - return false, err + return err } - if created { - logger.Info("Created inventory ConfigMap", "namespace", desired.Namespace, "name", desired.Name) - return false, nil + obj := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} + if _, err := k8s.CreateOrUpdate(ctx, n.KubeClient, obj, n.Mondoo, logger, func() error { + obj.Labels = desired.Labels + obj.Data = desired.Data + return nil + }); err != nil { + return err } - updated := false - if existing.Data["inventory"] != desired.Data["inventory"] || - !reflect.DeepEqual(existing.GetOwnerReferences(), desired.GetOwnerReferences()) { - existing.Data["inventory"] = desired.Data["inventory"] - existing.SetOwnerReferences(desired.GetOwnerReferences()) - - if err := n.KubeClient.Update(ctx, existing); err != nil { - logger.Error(err, "Failed to update inventory ConfigMap", "namespace", existing.Namespace, "name", existing.Name) - return false, err - } - updated = true - } - return updated, nil + return nil } func (n *DeploymentHandler) getCronJobsForAuditConfig(ctx context.Context) ([]batchv1.CronJob, error) { diff --git a/controllers/container_image/deployment_handler_test.go b/controllers/container_image/deployment_handler_test.go index ebfbb153d..7dab54f3b 100644 --- a/controllers/container_image/deployment_handler_test.go +++ b/controllers/container_image/deployment_handler_test.go @@ -5,7 +5,6 @@ package container_image import ( "context" - "fmt" "testing" "time" @@ -16,7 +15,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -65,17 +63,13 @@ func (s *DeploymentHandlerSuite) TestReconcile_Create() { s.NoError(err) expected := CronJob(image, "", test.KubeSystemNamespaceUid, "", &s.auditConfig, mondoov1alpha2.MondooOperatorConfig{}) - s.NoError(ctrl.SetControllerReference(&s.auditConfig, expected, d.KubeClient.Scheme())) - - // Set some fields that the kube client sets - expected.ResourceVersion = "1" created := &batchv1.CronJob{} created.Name = expected.Name created.Namespace = expected.Namespace s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(created), created)) - s.Equal(expected, created) + s.Equal(expected.Spec, created.Spec) } func (s *DeploymentHandlerSuite) TestReconcile_Create_CustomEnvVars() { @@ -93,10 +87,6 @@ func (s *DeploymentHandlerSuite) TestReconcile_Create_CustomEnvVars() { s.NoError(err) expected := CronJob(image, "", test.KubeSystemNamespaceUid, "", &s.auditConfig, mondoov1alpha2.MondooOperatorConfig{}) - s.NoError(ctrl.SetControllerReference(&s.auditConfig, expected, d.KubeClient.Scheme())) - - // Set some fields that the kube client sets - expected.ResourceVersion = "1" created := &batchv1.CronJob{} created.Name = expected.Name @@ -107,7 +97,7 @@ func (s *DeploymentHandlerSuite) TestReconcile_Create_CustomEnvVars() { utils.SortEnvVars(expected.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) utils.SortEnvVars(created.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) - s.Equal(expected, created) + s.Equal(expected.Spec, created.Spec) } func (s *DeploymentHandlerSuite) TestReconcile_CreateWithCustomImage() { @@ -126,17 +116,13 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateWithCustomImage() { s.NoError(err) expected := CronJob(image, "", test.KubeSystemNamespaceUid, "", &s.auditConfig, mondoov1alpha2.MondooOperatorConfig{}) - s.NoError(ctrl.SetControllerReference(&s.auditConfig, expected, d.KubeClient.Scheme())) - - // Set some fields that the kube client sets - expected.ResourceVersion = "1" created := &batchv1.CronJob{} created.Name = expected.Name created.Namespace = expected.Namespace s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(created), created)) - s.Equal(expected, created) + s.Equal(expected.Spec, created.Spec) } func (s *DeploymentHandlerSuite) TestReconcile_CreateWithCustomSchedule() { @@ -192,17 +178,13 @@ func (s *DeploymentHandlerSuite) TestReconcile_Create_PrivateRegistriesSecret() s.NoError(err) expected := CronJob(image, "", test.KubeSystemNamespaceUid, s.auditConfig.Spec.Scanner.PrivateRegistriesPullSecretRef.Name, &s.auditConfig, mondoov1alpha2.MondooOperatorConfig{}) - s.NoError(ctrl.SetControllerReference(&s.auditConfig, expected, d.KubeClient.Scheme())) - - // Set some fields that the kube client sets - expected.ResourceVersion = "1" created := &batchv1.CronJob{} created.Name = expected.Name created.Namespace = expected.Namespace s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(created), created)) - s.Equal(expected, created) + s.Equal(expected.Spec, created.Spec) } func (s *DeploymentHandlerSuite) TestReconcile_Create_MultiplePrivateRegistriesSecrets() { @@ -292,17 +274,13 @@ func (s *DeploymentHandlerSuite) TestReconcile_Create_ConsoleIntegration() { s.NoError(err) expected := CronJob(image, integrationMrn, test.KubeSystemNamespaceUid, "", &s.auditConfig, mondoov1alpha2.MondooOperatorConfig{}) - s.NoError(ctrl.SetControllerReference(&s.auditConfig, expected, d.KubeClient.Scheme())) - - // Set some fields that the kube client sets - expected.ResourceVersion = "1" created := &batchv1.CronJob{} created.Name = expected.Name created.Namespace = expected.Namespace s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(created), created)) - s.Equal(expected, created) + s.Equal(expected.Spec, created.Spec) } func (s *DeploymentHandlerSuite) TestReconcile_Update() { @@ -324,17 +302,13 @@ func (s *DeploymentHandlerSuite) TestReconcile_Update() { s.True(result.IsZero()) expected := CronJob(image, "", test.KubeSystemNamespaceUid, "", &s.auditConfig, mondoov1alpha2.MondooOperatorConfig{}) - s.NoError(ctrl.SetControllerReference(&s.auditConfig, expected, d.KubeClient.Scheme())) - - // The second node has an updated cron job so resource version is +1 - expected.ResourceVersion = fmt.Sprintf("%d", 2) created := &batchv1.CronJob{} created.Name = expected.Name created.Namespace = expected.Namespace s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(created), created)) - s.Equal(expected, created) + s.Equal(expected.Spec, created.Spec) } func (s *DeploymentHandlerSuite) TestReconcile_K8sContainerImageScanningStatus() { diff --git a/controllers/container_image/resources.go b/controllers/container_image/resources.go index 527aed3df..5080b184a 100644 --- a/controllers/container_image/resources.go +++ b/controllers/container_image/resources.go @@ -98,7 +98,9 @@ func CronJob(image, integrationMrn, clusterUid, privateRegistrySecretName string MountPath: "/tmp", }, }, - Env: envVars, + Env: envVars, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, }, }, ServiceAccountName: m.Spec.Scanner.ServiceAccountName, diff --git a/controllers/k8s_scan/deployment_handler.go b/controllers/k8s_scan/deployment_handler.go index f6191dde2..705a76867 100644 --- a/controllers/k8s_scan/deployment_handler.go +++ b/controllers/k8s_scan/deployment_handler.go @@ -6,7 +6,6 @@ package k8s_scan import ( "context" "fmt" - "reflect" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -19,6 +18,7 @@ import ( "go.mondoo.com/mondoo-operator/api/v1alpha2" "go.mondoo.com/mondoo-operator/pkg/utils/k8s" "go.mondoo.com/mondoo-operator/pkg/utils/mondoo" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) var logger = ctrl.Log.WithName("k8s-resources-scanning") @@ -156,48 +156,24 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { return err } - updated, err := n.syncConfigMap(ctx, integrationMrn, clusterUid) - if err != nil { + if err := n.syncConfigMap(ctx, integrationMrn, clusterUid); err != nil { return err } - // If ConfigMap was just updated, the job will pick up the new config during the next scheduled run - if updated { - logger.Info( - "Inventory ConfigMap was just updated. The job will use the new config during the next scheduled run.", - "namespace", n.Mondoo.Namespace, - "name", CronJobName(n.Mondoo.Name)) - } - - existing := &batchv1.CronJob{} desired := CronJob(mondooOperatorImage, integrationMrn, clusterUid, n.Mondoo, *n.MondooOperatorConfig) - if err := ctrl.SetControllerReference(n.Mondoo, desired, n.KubeClient.Scheme()); err != nil { - logger.Error(err, "Failed to set ControllerReference", "namespace", desired.Namespace, "name", desired.Name) - return err - } - - created, err := k8s.CreateIfNotExist(ctx, n.KubeClient, existing, desired) + obj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} + op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, obj, n.Mondoo, logger, func() error { + k8s.UpdateCronJobFields(obj, desired) + return nil + }) if err != nil { - logger.Error(err, "Failed to create CronJob", "namespace", desired.Namespace, "name", desired.Name) return err } - if created { - logger.Info("Created CronJob", "namespace", desired.Namespace, "name", desired.Name) - } else if !k8s.AreCronJobsEqual(*existing, *desired) { - existing.Spec.JobTemplate = desired.Spec.JobTemplate - existing.Spec.Schedule = desired.Spec.Schedule - existing.Spec.ConcurrencyPolicy = desired.Spec.ConcurrencyPolicy - existing.SetOwnerReferences(desired.GetOwnerReferences()) - - // Remove completed/failed jobs because they won't be updated when the cronjob changes. - // Active jobs are preserved to avoid killing in-progress scans. + // When a CronJob is updated, remove completed Jobs so they don't linger with stale config + if op == controllerutil.OperationResultUpdated { if err := k8s.DeleteCompletedJobs(ctx, n.KubeClient, n.Mondoo.Namespace, CronJobLabels(*n.Mondoo), logger); err != nil { - return err - } - - if err := n.KubeClient.Update(ctx, existing); err != nil { - logger.Error(err, "Failed to update CronJob", "namespace", existing.Namespace, "name", existing.Name) + logger.Error(err, "Failed to clean up completed Jobs after CronJob update") return err } } @@ -225,45 +201,23 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { return n.cleanupWorkloadDeployment(ctx) } -// syncConfigMap syncs the inventory ConfigMap. Returns a boolean indicating whether the ConfigMap has been updated. -func (n *DeploymentHandler) syncConfigMap(ctx context.Context, integrationMrn, clusterUid string) (bool, error) { - existing := &corev1.ConfigMap{} - +func (n *DeploymentHandler) syncConfigMap(ctx context.Context, integrationMrn, clusterUid string) error { desired, err := ConfigMap(integrationMrn, clusterUid, *n.Mondoo, *n.MondooOperatorConfig) if err != nil { logger.Error(err, "failed to generate desired ConfigMap with inventory") - return false, err - } - - if err := ctrl.SetControllerReference(n.Mondoo, desired, n.KubeClient.Scheme()); err != nil { - logger.Error(err, "Failed to set ControllerReference", "namespace", desired.Namespace, "name", desired.Name) - return false, err - } - - created, err := k8s.CreateIfNotExist(ctx, n.KubeClient, existing, desired) - if err != nil { - logger.Error(err, "Failed to create inventory ConfigMap", "namespace", desired.Namespace, "name", desired.Name) - return false, err + return err } - if created { - logger.Info("Created inventory ConfigMap", "namespace", desired.Namespace, "name", desired.Name) - return false, nil + obj := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} + if _, err := k8s.CreateOrUpdate(ctx, n.KubeClient, obj, n.Mondoo, logger, func() error { + obj.Labels = desired.Labels + obj.Data = desired.Data + return nil + }); err != nil { + return err } - updated := false - if existing.Data["inventory"] != desired.Data["inventory"] || - !reflect.DeepEqual(existing.GetOwnerReferences(), desired.GetOwnerReferences()) { - existing.Data["inventory"] = desired.Data["inventory"] - existing.SetOwnerReferences(desired.GetOwnerReferences()) - - if err := n.KubeClient.Update(ctx, existing); err != nil { - logger.Error(err, "Failed to update inventory ConfigMap", "namespace", existing.Namespace, "name", existing.Name) - return false, err - } - updated = true - } - return updated, nil + return nil } // reconcileExternalClusters reconciles CronJobs for external clusters @@ -333,78 +287,40 @@ func (n *DeploymentHandler) reconcileExternalClusters(ctx context.Context) error } func (n *DeploymentHandler) syncExternalClusterConfigMap(ctx context.Context, integrationMrn, clusterUid string, cluster v1alpha2.ExternalCluster) error { - existing := &corev1.ConfigMap{} - desired, err := ExternalClusterConfigMap(integrationMrn, clusterUid, cluster, *n.Mondoo, *n.MondooOperatorConfig) if err != nil { logger.Error(err, "failed to generate desired ConfigMap for external cluster", "cluster", cluster.Name) return err } - if err := ctrl.SetControllerReference(n.Mondoo, desired, n.KubeClient.Scheme()); err != nil { - logger.Error(err, "Failed to set ControllerReference", "namespace", desired.Namespace, "name", desired.Name) - return err - } - - created, err := k8s.CreateIfNotExist(ctx, n.KubeClient, existing, desired) - if err != nil { - logger.Error(err, "Failed to create inventory ConfigMap for external cluster", "cluster", cluster.Name) - return err - } - - if created { - logger.Info("Created inventory ConfigMap for external cluster", "cluster", cluster.Name) + obj := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} + if _, err := k8s.CreateOrUpdate(ctx, n.KubeClient, obj, n.Mondoo, logger, func() error { + obj.Labels = desired.Labels + obj.Data = desired.Data return nil - } - - if existing.Data["inventory"] != desired.Data["inventory"] || - !reflect.DeepEqual(existing.GetOwnerReferences(), desired.GetOwnerReferences()) { - existing.Data["inventory"] = desired.Data["inventory"] - existing.SetOwnerReferences(desired.GetOwnerReferences()) - - if err := n.KubeClient.Update(ctx, existing); err != nil { - logger.Error(err, "Failed to update inventory ConfigMap for external cluster", "cluster", cluster.Name) - return err - } + }); err != nil { + return err } return nil } func (n *DeploymentHandler) syncExternalClusterCronJob(ctx context.Context, image string, cluster v1alpha2.ExternalCluster) error { - existing := &batchv1.CronJob{} desired := ExternalClusterCronJob(image, cluster, n.Mondoo, *n.MondooOperatorConfig) - if err := ctrl.SetControllerReference(n.Mondoo, desired, n.KubeClient.Scheme()); err != nil { - logger.Error(err, "Failed to set ControllerReference", "namespace", desired.Namespace, "name", desired.Name) - return err - } - - created, err := k8s.CreateIfNotExist(ctx, n.KubeClient, existing, desired) + obj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} + op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, obj, n.Mondoo, logger, func() error { + k8s.UpdateCronJobFields(obj, desired) + return nil + }) if err != nil { - logger.Error(err, "Failed to create CronJob for external cluster", "cluster", cluster.Name) return err } - if created { - logger.Info("Created CronJob for external cluster", "cluster", cluster.Name) - return nil - } - - if !k8s.AreCronJobsEqual(*existing, *desired) { - existing.Spec.JobTemplate = desired.Spec.JobTemplate - existing.Spec.Schedule = desired.Spec.Schedule - existing.Spec.ConcurrencyPolicy = desired.Spec.ConcurrencyPolicy - existing.SetOwnerReferences(desired.GetOwnerReferences()) - - // Remove completed/failed jobs because they won't be updated when the cronjob changes. - // Active jobs are preserved to avoid killing in-progress scans. + // When a CronJob is updated, remove completed Jobs so they don't linger with stale config + if op == controllerutil.OperationResultUpdated { if err := k8s.DeleteCompletedJobs(ctx, n.KubeClient, n.Mondoo.Namespace, ExternalClusterCronJobLabels(*n.Mondoo, cluster.Name), logger); err != nil { - return err - } - - if err := n.KubeClient.Update(ctx, existing); err != nil { - logger.Error(err, "Failed to update CronJob for external cluster", "cluster", cluster.Name) + logger.Error(err, "Failed to clean up completed Jobs after CronJob update for external cluster", "cluster", cluster.Name) return err } } @@ -553,34 +469,15 @@ func (n *DeploymentHandler) cleanupWorkloadDeployment(ctx context.Context) error // syncExternalClusterSAKubeconfigConfigMap syncs a ConfigMap containing the generated kubeconfig for ServiceAccountAuth func (n *DeploymentHandler) syncExternalClusterSAKubeconfigConfigMap(ctx context.Context, cluster v1alpha2.ExternalCluster) error { - existing := &corev1.ConfigMap{} desired := ExternalClusterSAKubeconfigConfigMap(cluster, n.Mondoo) - if err := ctrl.SetControllerReference(n.Mondoo, desired, n.KubeClient.Scheme()); err != nil { - logger.Error(err, "Failed to set ControllerReference", "namespace", desired.Namespace, "name", desired.Name) - return err - } - - created, err := k8s.CreateIfNotExist(ctx, n.KubeClient, existing, desired) - if err != nil { - logger.Error(err, "Failed to create SA kubeconfig ConfigMap for external cluster", "cluster", cluster.Name) - return err - } - - if created { - logger.Info("Created SA kubeconfig ConfigMap for external cluster", "cluster", cluster.Name) + obj := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} + if _, err := k8s.CreateOrUpdate(ctx, n.KubeClient, obj, n.Mondoo, logger, func() error { + obj.Labels = desired.Labels + obj.Data = desired.Data return nil - } - - if existing.Data["kubeconfig"] != desired.Data["kubeconfig"] || - !reflect.DeepEqual(existing.GetOwnerReferences(), desired.GetOwnerReferences()) { - existing.Data["kubeconfig"] = desired.Data["kubeconfig"] - existing.SetOwnerReferences(desired.GetOwnerReferences()) - - if err := n.KubeClient.Update(ctx, existing); err != nil { - logger.Error(err, "Failed to update SA kubeconfig ConfigMap for external cluster", "cluster", cluster.Name) - return err - } + }); err != nil { + return err } return nil @@ -590,43 +487,13 @@ func (n *DeploymentHandler) syncExternalClusterSAKubeconfigConfigMap(ctx context func (n *DeploymentHandler) syncWIFServiceAccount(ctx context.Context, cluster v1alpha2.ExternalCluster) error { desired := WIFServiceAccount(cluster, n.Mondoo) - if err := ctrl.SetControllerReference(n.Mondoo, desired, n.KubeClient.Scheme()); err != nil { - logger.Error(err, "Failed to set ControllerReference", "namespace", desired.Namespace, "name", desired.Name) - return err - } - - existing := &corev1.ServiceAccount{} - created, err := k8s.CreateIfNotExist(ctx, n.KubeClient, existing, desired) - if err != nil { - logger.Error(err, "Failed to create WIF ServiceAccount for external cluster", "cluster", cluster.Name) - return err - } - - if created { - logger.Info("Created WIF ServiceAccount for external cluster", "cluster", cluster.Name) + obj := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} + if _, err := k8s.CreateOrUpdate(ctx, n.KubeClient, obj, n.Mondoo, logger, func() error { + obj.Labels = desired.Labels + obj.Annotations = desired.Annotations return nil - } - - // Update if annotations or labels changed - needsUpdate := false - if !reflect.DeepEqual(existing.Annotations, desired.Annotations) { - existing.Annotations = desired.Annotations - needsUpdate = true - } - if !reflect.DeepEqual(existing.Labels, desired.Labels) { - existing.Labels = desired.Labels - needsUpdate = true - } - if !reflect.DeepEqual(existing.GetOwnerReferences(), desired.GetOwnerReferences()) { - existing.SetOwnerReferences(desired.GetOwnerReferences()) - needsUpdate = true - } - - if needsUpdate { - if err := n.KubeClient.Update(ctx, existing); err != nil { - logger.Error(err, "Failed to update WIF ServiceAccount for external cluster", "cluster", cluster.Name) - return err - } + }); err != nil { + return err } return nil diff --git a/controllers/k8s_scan/resources.go b/controllers/k8s_scan/resources.go index 413cf7ac4..d6df4353b 100644 --- a/controllers/k8s_scan/resources.go +++ b/controllers/k8s_scan/resources.go @@ -120,7 +120,9 @@ func CronJob(image, integrationMrn, clusterUid string, m *v1alpha2.MondooAuditCo MountPath: "/tmp", }, }, - Env: envVars, + Env: envVars, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, }, }, ServiceAccountName: m.Spec.Scanner.ServiceAccountName, @@ -431,8 +433,10 @@ func ExternalClusterCronJob(image string, cluster v1alpha2.ExternalCluster, m *v RunAsUser: ptr.To(int64(101)), Privileged: ptr.To(false), }, - VolumeMounts: volumeMounts, - Env: envVars, + VolumeMounts: volumeMounts, + Env: envVars, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, }, }, Volumes: volumes, @@ -722,6 +726,8 @@ retry az aks get-credentials \ corev1.ResourceMemory: resource.MustParse("256Mi"), }, }, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, SecurityContext: &corev1.SecurityContext{ AllowPrivilegeEscalation: ptr.To(false), ReadOnlyRootFilesystem: ptr.To(true), @@ -848,6 +854,8 @@ kill $HELPER_PID 2>/dev/null || true corev1.ResourceMemory: resource.MustParse("128Mi"), }, }, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, SecurityContext: &corev1.SecurityContext{ AllowPrivilegeEscalation: ptr.To(false), ReadOnlyRootFilesystem: ptr.To(true), diff --git a/controllers/nodes/deployment_handler.go b/controllers/nodes/deployment_handler.go index 6f371730b..c5497b3b2 100644 --- a/controllers/nodes/deployment_handler.go +++ b/controllers/nodes/deployment_handler.go @@ -86,23 +86,14 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { return err } - updated, err := n.syncConfigMap(ctx, clusterUid) - if err != nil { + if err := n.syncConfigMap(ctx, clusterUid); err != nil { return err } - // TODO: for CronJob we might consider triggering the CronJob now after the ConfigMap has been changed. It will make sense from the - // user perspective to want to run the jobs after you have updated the config. - if updated { - logger.Info( - "Inventory ConfigMap was just updated. The job will use the new config during the next scheduled run.", - "namespace", n.Mondoo.Namespace, - "name", CronJobName(n.Mondoo.Name, node.Name)) - } - - cronJob := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: CronJobName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}} + desired := CronJob(mondooClientImage, node, n.Mondoo, n.IsOpenshift, *n.MondooOperatorConfig) + cronJob := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, cronJob, n.Mondoo, logger, func() error { - UpdateCronJob(cronJob, mondooClientImage, node, n.Mondoo, n.IsOpenshift, *n.MondooOperatorConfig) + k8s.UpdateCronJobFields(cronJob, desired) return nil }) if err != nil { @@ -111,15 +102,14 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { switch op { case controllerutil.OperationResultCreated: - if err = mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger); err != nil { + if err := mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger); err != nil { logger.Error(err, "Failed to update MondooAuditConfig", "namespace", n.Mondoo.Namespace, "name", n.Mondoo.Name) return err } - continue case controllerutil.OperationResultUpdated: - // Remove completed/failed jobs because they won't be updated when the cronjob changes. - // Active jobs are preserved to avoid killing in-progress scans. + // Remove completed Jobs so they don't linger with stale config if err := k8s.DeleteCompletedJobs(ctx, n.KubeClient, n.Mondoo.Namespace, NodeScanningLabels(*n.Mondoo), logger); err != nil { + logger.Error(err, "Failed to clean up completed Jobs after CronJob update") return err } } @@ -201,18 +191,10 @@ func (n *DeploymentHandler) syncDaemonSet(ctx context.Context) error { return err } - updated, err := n.syncConfigMap(ctx, clusterUid) - if err != nil { + if err := n.syncConfigMap(ctx, clusterUid); err != nil { return err } - if updated { - logger.Info( - "Inventory ConfigMap was just updated. The daemonset will use the new config during the next scheduled run.", - "namespace", n.Mondoo.Namespace, - "name", DeploymentName(n.Mondoo.Name, node.Name)) - } - if n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_Deployment { dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}} if err := k8s.DeleteIfExists(ctx, n.KubeClient, dep); err != nil { @@ -228,10 +210,10 @@ func (n *DeploymentHandler) syncDaemonSet(ctx context.Context) error { } } - ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace}} + desired := DaemonSet(*n.Mondoo, n.IsOpenshift, mondooClientImage, *n.MondooOperatorConfig, slices.Collect(maps.Keys(tolerations))) + ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, ds, n.Mondoo, logger, func() error { - UpdateDaemonSet(ds, *n.Mondoo, n.IsOpenshift, mondooClientImage, *n.MondooOperatorConfig, - slices.Collect(maps.Keys(tolerations))) + k8s.UpdateDaemonSetFields(ds, desired) return nil }) if err != nil { @@ -239,8 +221,7 @@ func (n *DeploymentHandler) syncDaemonSet(ctx context.Context) error { } if op == controllerutil.OperationResultCreated { - err = mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger) - if err != nil { + if err := mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger); err != nil { logger.Error(err, "Failed to update MondooAuditConfig", "namespace", n.Mondoo.Namespace, "name", n.Mondoo.Name) return err } @@ -276,25 +257,29 @@ func (n *DeploymentHandler) syncDaemonSet(ctx context.Context) error { return nil } -// syncConfigMap syncs the inventory ConfigMap. Returns a boolean indicating whether the ConfigMap has been updated. It -// can only be "true", if the ConfigMap existed before this reconcile cycle and the inventory was different from the -// desired state. -func (n *DeploymentHandler) syncConfigMap(ctx context.Context, clusterUid string) (bool, error) { +func (n *DeploymentHandler) syncConfigMap(ctx context.Context, clusterUid string) error { integrationMrn, err := k8s.TryGetIntegrationMrnForAuditConfig(ctx, n.KubeClient, *n.Mondoo) if err != nil { logger.Error(err, "failed to retrieve IntegrationMRN") - return false, err + return err } - cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace}} - op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, cm, n.Mondoo, logger, func() error { - return UpdateConfigMap(cm, integrationMrn, clusterUid, *n.Mondoo) - }) + desired, err := ConfigMap(integrationMrn, clusterUid, *n.Mondoo) if err != nil { - return false, err + logger.Error(err, "failed to generate ConfigMap") + return err + } + + obj := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} + if _, err := k8s.CreateOrUpdate(ctx, n.KubeClient, obj, n.Mondoo, logger, func() error { + obj.Labels = desired.Labels + obj.Data = desired.Data + return nil + }); err != nil { + return err } - return op == controllerutil.OperationResultUpdated, nil + return nil } // cleanupCronJobsForDeletedNodes deletes dangling CronJobs for nodes that have been deleted from the cluster. diff --git a/controllers/nodes/deployment_handler_test.go b/controllers/nodes/deployment_handler_test.go index d30ea245d..87d6858e9 100644 --- a/controllers/nodes/deployment_handler_test.go +++ b/controllers/nodes/deployment_handler_test.go @@ -20,7 +20,6 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -70,9 +69,9 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateConfigMap() { }} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) - cfgMapExpected := cfgMap.DeepCopy() - s.Require().NoError(UpdateConfigMap(cfgMapExpected, "", testClusterUID, s.auditConfig)) - s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) + cfgMapExpected, err := ConfigMap("", testClusterUID, s.auditConfig) + s.Require().NoError(err) + s.Equal(cfgMapExpected.Data, cfgMap.Data) } func (s *DeploymentHandlerSuite) TestReconcile_CreateConfigMapWithIntegrationMRN() { @@ -109,9 +108,9 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateConfigMapWithIntegrationMRN }} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) - cfgMapExpected := cfgMap.DeepCopy() - s.Require().NoError(UpdateConfigMap(cfgMapExpected, testIntegrationMRN, testClusterUID, s.auditConfig)) - s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) + cfgMapExpected, err := ConfigMap(testIntegrationMRN, testClusterUID, s.auditConfig) + s.Require().NoError(err) + s.Equal(cfgMapExpected.Data, cfgMap.Data) } func (s *DeploymentHandlerSuite) TestReconcile_UpdateConfigMap() { @@ -123,10 +122,8 @@ func (s *DeploymentHandlerSuite) TestReconcile_UpdateConfigMap() { nodes := &corev1.NodeList{} s.NoError(d.KubeClient.List(s.ctx, nodes)) - cfgMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace, - }} - s.Require().NoError(UpdateConfigMap(cfgMap, "", testClusterUID, s.auditConfig)) + cfgMap, err := ConfigMap("", testClusterUID, s.auditConfig) + s.Require().NoError(err) cfgMap.Data["inventory"] = "" s.NoError(d.KubeClient.Create(s.ctx, cfgMap)) @@ -139,9 +136,9 @@ func (s *DeploymentHandlerSuite) TestReconcile_UpdateConfigMap() { }} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) - cfgMapExpected := cfgMap.DeepCopy() - s.Require().NoError(UpdateConfigMap(cfgMapExpected, "", testClusterUID, s.auditConfig)) - s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) + cfgMapExpected, err := ConfigMap("", testClusterUID, s.auditConfig) + s.Require().NoError(err) + s.Equal(cfgMapExpected.Data, cfgMap.Data) } func (s *DeploymentHandlerSuite) TestReconcile_CronJob_CleanConfigMapsForDeletedNodes() { @@ -176,9 +173,9 @@ func (s *DeploymentHandlerSuite) TestReconcile_CronJob_CleanConfigMapsForDeleted }} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) - cfgMapExpected := cfgMap.DeepCopy() - s.Require().NoError(UpdateConfigMap(cfgMapExpected, "", testClusterUID, s.auditConfig)) - s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) + cfgMapExpected, err := ConfigMap("", testClusterUID, s.auditConfig) + s.Require().NoError(err) + s.Equal(cfgMapExpected.Data, cfgMap.Data) } func (s *DeploymentHandlerSuite) TestReconcile_Deployment_CleanConfigMapsForDeletedNodes() { @@ -214,9 +211,9 @@ func (s *DeploymentHandlerSuite) TestReconcile_Deployment_CleanConfigMapsForDele }} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) - cfgMapExpected := cfgMap.DeepCopy() - s.Require().NoError(UpdateConfigMap(cfgMapExpected, "", testClusterUID, s.auditConfig)) - s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) + cfgMapExpected, err := ConfigMap("", testClusterUID, s.auditConfig) + s.Require().NoError(err) + s.Equal(cfgMapExpected.Data, cfgMap.Data) } func (s *DeploymentHandlerSuite) TestReconcile_CreateCronJobs() { @@ -240,12 +237,11 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateCronJobs() { cj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: CronJobName(s.auditConfig.Name, n.Name), Namespace: s.auditConfig.Namespace}} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cj), cj)) - cjExpected := cj.DeepCopy() - UpdateCronJob(cjExpected, image, n, &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) + cjExpected := CronJob(image, n, &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) // Make sure the env vars for both are sorted utils.SortEnvVars(cjExpected.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) utils.SortEnvVars(cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) - s.True(equality.Semantic.DeepEqual(cjExpected, cj)) + s.Equal(cjExpected.Spec, cj.Spec) } // Verify node garbage collection cronjob does not exist (removed in simplification) @@ -275,12 +271,11 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateCronJobs_CustomEnvVars() { cj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: CronJobName(s.auditConfig.Name, n.Name), Namespace: s.auditConfig.Namespace}} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cj), cj)) - cjExpected := cj.DeepCopy() - UpdateCronJob(cjExpected, image, n, &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) + cjExpected := CronJob(image, n, &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) // Make sure the env vars for both are sorted utils.SortEnvVars(cjExpected.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) utils.SortEnvVars(cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) - s.True(equality.Semantic.DeepEqual(cjExpected, cj)) + s.Equal(cjExpected.Spec, cj.Spec) } // Verify node garbage collection cronjob does not exist (removed in simplification) @@ -309,12 +304,11 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateCronJobs_Switch() { cj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: CronJobName(s.auditConfig.Name, n.Name), Namespace: s.auditConfig.Namespace}} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cj), cj)) - cjExpected := cj.DeepCopy() - UpdateCronJob(cjExpected, image, n, &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) + cjExpected := CronJob(image, n, &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) // Make sure the env vars for both are sorted utils.SortEnvVars(cjExpected.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) utils.SortEnvVars(cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) - s.True(equality.Semantic.DeepEqual(cjExpected, cj)) + s.Equal(cjExpected.Spec, cj.Spec) } mondooAuditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) @@ -350,8 +344,7 @@ func (s *DeploymentHandlerSuite) TestReconcile_UpdateCronJobs() { s.NoError(err) // Make sure a cron job exists for one of the nodes - cj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: CronJobName(s.auditConfig.Name, nodes.Items[1].Name), Namespace: s.auditConfig.Namespace}} - UpdateCronJob(cj, image, nodes.Items[1], &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) + cj := CronJob(image, nodes.Items[1], &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Command = []string{"test-command"} s.NoError(d.KubeClient.Create(s.ctx, cj)) @@ -363,12 +356,11 @@ func (s *DeploymentHandlerSuite) TestReconcile_UpdateCronJobs() { cj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: CronJobName(s.auditConfig.Name, n.Name), Namespace: s.auditConfig.Namespace}} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cj), cj)) - cjExpected := cj.DeepCopy() - UpdateCronJob(cjExpected, image, n, &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) + cjExpected := CronJob(image, n, &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) // Make sure the env vars for both are sorted utils.SortEnvVars(cjExpected.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) utils.SortEnvVars(cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) - s.True(equality.Semantic.DeepEqual(cjExpected, cj)) + s.Equal(cjExpected.Spec, cj.Spec) } } @@ -410,12 +402,11 @@ func (s *DeploymentHandlerSuite) TestReconcile_CleanCronJobsForDeletedNodes() { cj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: CronJobName(s.auditConfig.Name, nodes.Items[0].Name), Namespace: s.auditConfig.Namespace}} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cj), cj)) - cjExpected := cj.DeepCopy() - UpdateCronJob(cjExpected, image, nodes.Items[0], &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) + cjExpected := CronJob(image, nodes.Items[0], &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) // Make sure the env vars for both are sorted utils.SortEnvVars(cjExpected.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) utils.SortEnvVars(cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env) - s.True(equality.Semantic.DeepEqual(cjExpected, cj)) + s.Equal(cjExpected.Spec, cj.Spec) } func (s *DeploymentHandlerSuite) TestReconcile_CreateDaemonSets() { @@ -439,13 +430,12 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateDaemonSets() { ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace}} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(ds), ds)) - dsExpected := ds.DeepCopy() - UpdateDaemonSet(dsExpected, s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}, + dsExpected := DaemonSet(s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}, []corev1.Toleration{{Key: "node-role.kubernetes.io/master", Value: "true", Effect: corev1.TaintEffectNoExecute}}) // Make sure the env vars for both are sorted utils.SortEnvVars(dsExpected.Spec.Template.Spec.Containers[0].Env) utils.SortEnvVars(ds.Spec.Template.Spec.Containers[0].Env) - s.True(equality.Semantic.DeepEqual(dsExpected, ds)) + s.Equal(dsExpected.Spec, ds.Spec) // Verify node garbage collection cronjob does not exist (removed in simplification) gcCj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: GarbageCollectCronJobName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace}} @@ -473,10 +463,9 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateDaemonSets_Switch() { ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace}} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(ds), ds)) - dsExpected := ds.DeepCopy() - UpdateDaemonSet(dsExpected, s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}, + dsExpected := DaemonSet(s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}, []corev1.Toleration{{Key: "node-role.kubernetes.io/master", Value: "true", Effect: corev1.TaintEffectNoExecute}}) - s.True(equality.Semantic.DeepEqual(dsExpected, ds)) + s.Equal(dsExpected.Spec, ds.Spec) mondooAuditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_CronJob result, err = d.Reconcile(s.ctx) @@ -512,8 +501,7 @@ func (s *DeploymentHandlerSuite) TestReconcile_UpdateDaemonSets() { s.NoError(err) // Make sure a daemonset exists - ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace}} - UpdateDaemonSet(ds, s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}, nil) + ds := DaemonSet(s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}, nil) ds.Spec.Template.Spec.Containers[0].Command = []string{"test-command"} s.NoError(d.KubeClient.Create(s.ctx, ds)) @@ -524,10 +512,9 @@ func (s *DeploymentHandlerSuite) TestReconcile_UpdateDaemonSets() { ds = &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace}} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(ds), ds)) - depExpected := ds.DeepCopy() - UpdateDaemonSet(depExpected, s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}, + depExpected := DaemonSet(s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}, []corev1.Toleration{{Key: "node-role.kubernetes.io/master", Value: "true", Effect: corev1.TaintEffectNoExecute}}) - s.True(equality.Semantic.DeepEqual(depExpected, ds)) + s.Equal(depExpected.Spec, ds.Spec) } func (s *DeploymentHandlerSuite) TestReconcile_CronJob_NodeScanningStatus() { diff --git a/controllers/nodes/resources.go b/controllers/nodes/resources.go index cce02a15b..dfea08149 100644 --- a/controllers/nodes/resources.go +++ b/controllers/nodes/resources.go @@ -37,7 +37,8 @@ const ( ignoreAnnotationValue = "ignore" ) -func UpdateCronJob(cj *batchv1.CronJob, image string, node corev1.Node, m *v1alpha2.MondooAuditConfig, isOpenshift bool, cfg v1alpha2.MondooOperatorConfig) { +// CronJob creates a CronJob for node scanning +func CronJob(image string, node corev1.Node, m *v1alpha2.MondooAuditConfig, isOpenshift bool, cfg v1alpha2.MondooOperatorConfig) *batchv1.CronJob { ls := NodeScanningLabels(*m) cmd := []string{ "cnspec", "scan", "local", @@ -49,148 +50,126 @@ func UpdateCronJob(cj *batchv1.CronJob, image string, node corev1.Node, m *v1alp cmd = append(cmd, []string{"--api-proxy", *cfg.Spec.HttpProxy}...) } - cj.Labels = ls - cj.Annotations = map[string]string{ - ignoreQueryAnnotationPrefix + "mondoo-kubernetes-security-cronjob-runasnonroot": ignoreAnnotationValue, - } - cj.Spec.Schedule = m.Spec.Nodes.Schedule - cj.Spec.ConcurrencyPolicy = batchv1.ForbidConcurrent - cj.Spec.SuccessfulJobsHistoryLimit = ptr.To(int32(1)) - cj.Spec.FailedJobsHistoryLimit = ptr.To(int32(1)) - // Allow one retry for node scanning (transient issues possible) - cj.Spec.JobTemplate.Spec.BackoffLimit = ptr.To(int32(1)) - cj.Spec.JobTemplate.Annotations = map[string]string{ - ignoreQueryAnnotationPrefix + "mondoo-kubernetes-security-job-runasnonroot": ignoreAnnotationValue, - } - cj.Spec.JobTemplate.Labels = ls - cj.Spec.JobTemplate.Spec.Template.Annotations = map[string]string{ - ignoreQueryAnnotationPrefix + "mondoo-kubernetes-security-pod-runasnonroot": ignoreAnnotationValue, - } - cj.Spec.JobTemplate.Spec.Template.Labels = ls - cj.Spec.JobTemplate.Spec.Template.Spec.NodeName = node.Name - cj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyOnFailure - cj.Spec.JobTemplate.Spec.Template.Spec.Tolerations = k8s.TaintsToTolerations(node.Spec.Taints) - // The node scanning does not use the Kubernetes API at all, therefore the service account token - // should not be mounted at all. - cj.Spec.JobTemplate.Spec.Template.Spec.AutomountServiceAccountToken = ptr.To(false) containerResources := k8s.ResourcesRequirementsWithDefaults(m.Spec.Nodes.Resources, k8s.DefaultNodeScanningResources) gcLimit := gomemlimit.CalculateGoMemLimit(containerResources) - cj.Spec.JobTemplate.Spec.Template.Spec.Containers = []corev1.Container{ - { - Image: image, - Name: "cnspec", - Command: cmd, - Resources: containerResources, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(isOpenshift), - ReadOnlyRootFilesystem: ptr.To(true), - RunAsNonRoot: ptr.To(false), - RunAsUser: ptr.To(int64(0)), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{ - "ALL", - }, - }, - // RHCOS requires to run as privileged to properly do node scanning. If the container - // is not privileged, then we have no access to /proc. - Privileged: ptr.To(isOpenshift), - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "root", - ReadOnly: true, - MountPath: "/mnt/host/", - }, - { - Name: "config", - ReadOnly: true, - MountPath: "/etc/opt/", - }, - { - Name: "temp", - MountPath: "/tmp", - }, + return &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: CronJobName(m.Name, node.Name), + Namespace: m.Namespace, + Labels: ls, + Annotations: map[string]string{ + ignoreQueryAnnotationPrefix + "mondoo-kubernetes-security-cronjob-runasnonroot": ignoreAnnotationValue, }, - Env: k8s.MergeEnv([]corev1.EnvVar{ - { - Name: "DEBUG", - Value: "false", - }, - { - Name: "MONDOO_PROCFS", - Value: "on", - }, - { - Name: "MONDOO_AUTO_UPDATE", - Value: "false", - }, - { - Name: "NODE_NAME", - Value: node.Name, - }, - { - Name: "GOMEMLIMIT", - Value: gcLimit, - }, - }, m.Spec.Nodes.Env), - TerminationMessagePath: "/dev/termination-log", - TerminationMessagePolicy: corev1.TerminationMessageReadFile, - ImagePullPolicy: corev1.PullIfNotPresent, }, - } - cj.Spec.JobTemplate.Spec.Template.Spec.Volumes = []corev1.Volume{ - { - Name: "root", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{Path: "/", Type: ptr.To(corev1.HostPathUnset)}, - }, - }, - { - Name: "config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - DefaultMode: ptr.To(corev1.ProjectedVolumeSourceDefaultMode), - Sources: []corev1.VolumeProjection{ - { - ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{Name: ConfigMapName(m.Name)}, - Items: []corev1.KeyToPath{{ - Key: "inventory", - Path: "mondoo/inventory_template.yml", - }}, + Spec: batchv1.CronJobSpec{ + Schedule: m.Spec.Nodes.Schedule, + ConcurrencyPolicy: batchv1.ForbidConcurrent, + SuccessfulJobsHistoryLimit: ptr.To(int32(1)), + FailedJobsHistoryLimit: ptr.To(int32(1)), + JobTemplate: batchv1.JobTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: ls, + Annotations: map[string]string{ + ignoreQueryAnnotationPrefix + "mondoo-kubernetes-security-job-runasnonroot": ignoreAnnotationValue, + }, + }, + Spec: batchv1.JobSpec{ + // Allow one retry for node scanning (transient issues possible) + BackoffLimit: ptr.To(int32(1)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: ls, + Annotations: map[string]string{ + ignoreQueryAnnotationPrefix + "mondoo-kubernetes-security-pod-runasnonroot": ignoreAnnotationValue, }, }, - { - Secret: &corev1.SecretProjection{ - LocalObjectReference: m.Spec.MondooCredsSecretRef, - Items: []corev1.KeyToPath{{ - Key: "config", - Path: "mondoo/mondoo.yml", - }}, + Spec: corev1.PodSpec{ + NodeName: node.Name, + RestartPolicy: corev1.RestartPolicyOnFailure, + Tolerations: k8s.TaintsToTolerations(node.Spec.Taints), + // The node scanning does not use the Kubernetes API at all, therefore the service account token + // should not be mounted at all. + AutomountServiceAccountToken: ptr.To(false), + Containers: []corev1.Container{ + { + Image: image, + Name: "cnspec", + Command: cmd, + Resources: containerResources, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(isOpenshift), + ReadOnlyRootFilesystem: ptr.To(true), + RunAsNonRoot: ptr.To(false), + RunAsUser: ptr.To(int64(0)), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + // RHCOS requires to run as privileged to properly do node scanning. If the container + // is not privileged, then we have no access to /proc. + Privileged: ptr.To(isOpenshift), + }, + VolumeMounts: []corev1.VolumeMount{ + {Name: "root", ReadOnly: true, MountPath: "/mnt/host/"}, + {Name: "config", ReadOnly: true, MountPath: "/etc/opt/"}, + {Name: "temp", MountPath: "/tmp"}, + }, + Env: k8s.MergeEnv([]corev1.EnvVar{ + {Name: "DEBUG", Value: "false"}, + {Name: "MONDOO_PROCFS", Value: "on"}, + {Name: "MONDOO_AUTO_UPDATE", Value: "false"}, + {Name: "NODE_NAME", Value: node.Name}, + {Name: "GOMEMLIMIT", Value: gcLimit}, + }, m.Spec.Nodes.Env), + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, + ImagePullPolicy: corev1.PullIfNotPresent, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "root", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{Path: "/", Type: ptr.To(corev1.HostPathUnset)}, + }, + }, + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + DefaultMode: ptr.To(corev1.ProjectedVolumeSourceDefaultMode), + Sources: []corev1.VolumeProjection{ + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: ConfigMapName(m.Name)}, + Items: []corev1.KeyToPath{{Key: "inventory", Path: "mondoo/inventory_template.yml"}}, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: m.Spec.MondooCredsSecretRef, + Items: []corev1.KeyToPath{{Key: "config", Path: "mondoo/mondoo.yml"}}, + }, + }, + }, + }, + }, + }, + { + Name: "temp", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, }, }, }, }, }, }, - { - Name: "temp", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, } } -func UpdateDaemonSet( - ds *appsv1.DaemonSet, - m v1alpha2.MondooAuditConfig, - isOpenshift bool, - image string, - cfg v1alpha2.MondooOperatorConfig, - tolerations []corev1.Toleration, -) { +// DaemonSet creates a DaemonSet for node scanning +func DaemonSet(m v1alpha2.MondooAuditConfig, isOpenshift bool, image string, cfg v1alpha2.MondooOperatorConfig, tolerations []corev1.Toleration) *appsv1.DaemonSet { labels := NodeScanningLabels(m) cmd := []string{ "cnspec", "serve", @@ -202,147 +181,121 @@ func UpdateDaemonSet( cmd = append(cmd, []string{"--api-proxy", *cfg.Spec.HttpProxy}...) } - ds.Labels = labels - if ds.Annotations == nil { - ds.Annotations = map[string]string{} - } - ds.Annotations[ignoreQueryAnnotationPrefix+"mondoo-kubernetes-security-deployment-runasnonroot"] = ignoreAnnotationValue - ds.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: labels, - } - ds.Spec.Template.Labels = labels - if ds.Spec.Template.Annotations == nil { - ds.Spec.Template.Annotations = map[string]string{} - } - ds.Spec.Template.Annotations[ignoreQueryAnnotationPrefix+"mondoo-kubernetes-security-pod-runasnonroot"] = ignoreAnnotationValue - ds.Spec.Template.Spec.PriorityClassName = m.Spec.Nodes.PriorityClassName - // The node scanning does not use the Kubernetes API at all, therefore the service account token - // should not be mounted at all. - ds.Spec.Template.Spec.AutomountServiceAccountToken = ptr.To(false) containerResources := k8s.ResourcesRequirementsWithDefaults(m.Spec.Nodes.Resources, k8s.DefaultNodeScanningResources) - - ds.Spec.Template.Spec.Tolerations = tolerations - gcLimit := gomemlimit.CalculateGoMemLimit(containerResources) - ds.Spec.Template.Spec.Containers = []corev1.Container{ - { - Image: image, - Name: "cnspec", - Command: cmd, - Resources: containerResources, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(isOpenshift), - ReadOnlyRootFilesystem: ptr.To(true), - RunAsNonRoot: ptr.To(false), - RunAsUser: ptr.To(int64(0)), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{ - "ALL", - }, - }, - // RHCOS requires to run as privileged to properly do node scanning. If the container - // is not privileged, then we have no access to /proc. - Privileged: ptr.To(isOpenshift), + return &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: DaemonSetName(m.Name), + Namespace: m.Namespace, + Labels: labels, + Annotations: map[string]string{ + ignoreQueryAnnotationPrefix + "mondoo-kubernetes-security-deployment-runasnonroot": ignoreAnnotationValue, }, - TerminationMessagePath: "/dev/termination-log", - TerminationMessagePolicy: corev1.TerminationMessageReadFile, - ImagePullPolicy: corev1.PullIfNotPresent, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "root", - ReadOnly: true, - MountPath: "/mnt/host/", - }, - { - Name: "config", - ReadOnly: true, - MountPath: "/etc/opt/", - }, - { - Name: "temp", - MountPath: "/tmp", - }, - }, - Env: k8s.MergeEnv([]corev1.EnvVar{ - { - Name: "DEBUG", - Value: "false", - }, - { - Name: "MONDOO_PROCFS", - Value: "on", - }, - { - Name: "MONDOO_AUTO_UPDATE", - Value: "false", - }, - { - Name: "GOMEMLIMIT", - Value: gcLimit, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: labels}, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Annotations: map[string]string{ + ignoreQueryAnnotationPrefix + "mondoo-kubernetes-security-pod-runasnonroot": ignoreAnnotationValue, + }, }, - { - Name: "NODE_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "spec.nodeName", + Spec: corev1.PodSpec{ + PriorityClassName: m.Spec.Nodes.PriorityClassName, + // The node scanning does not use the Kubernetes API at all, therefore the service account token + // should not be mounted at all. + AutomountServiceAccountToken: ptr.To(false), + Tolerations: tolerations, + Containers: []corev1.Container{ + { + Image: image, + Name: "cnspec", + Command: cmd, + Resources: containerResources, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(isOpenshift), + ReadOnlyRootFilesystem: ptr.To(true), + RunAsNonRoot: ptr.To(false), + RunAsUser: ptr.To(int64(0)), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + // RHCOS requires to run as privileged to properly do node scanning. If the container + // is not privileged, then we have no access to /proc. + Privileged: ptr.To(isOpenshift), + }, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, + ImagePullPolicy: corev1.PullIfNotPresent, + VolumeMounts: []corev1.VolumeMount{ + {Name: "root", ReadOnly: true, MountPath: "/mnt/host/"}, + {Name: "config", ReadOnly: true, MountPath: "/etc/opt/"}, + {Name: "temp", MountPath: "/tmp"}, + }, + Env: k8s.MergeEnv([]corev1.EnvVar{ + {Name: "DEBUG", Value: "false"}, + {Name: "MONDOO_PROCFS", Value: "on"}, + {Name: "MONDOO_AUTO_UPDATE", Value: "false"}, + {Name: "GOMEMLIMIT", Value: gcLimit}, + {Name: "NODE_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "spec.nodeName"}}}, + }, m.Spec.Nodes.Env), }, }, - }, - }, m.Spec.Nodes.Env), - }, - } - ds.Spec.Template.Spec.Volumes = []corev1.Volume{ - { - Name: "root", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{Path: "/", Type: ptr.To(corev1.HostPathUnset)}, - }, - }, - { - Name: "config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - DefaultMode: ptr.To(corev1.ProjectedVolumeSourceDefaultMode), - Sources: []corev1.VolumeProjection{ + Volumes: []corev1.Volume{ { - ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{Name: ConfigMapName(m.Name)}, - Items: []corev1.KeyToPath{{ - Key: "inventory", - Path: "mondoo/inventory_template.yml", - }}, + Name: "root", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{Path: "/", Type: ptr.To(corev1.HostPathUnset)}, }, }, { - Secret: &corev1.SecretProjection{ - LocalObjectReference: m.Spec.MondooCredsSecretRef, - Items: []corev1.KeyToPath{{ - Key: "config", - Path: "mondoo/mondoo.yml", - }}, + Name: "config", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + DefaultMode: ptr.To(corev1.ProjectedVolumeSourceDefaultMode), + Sources: []corev1.VolumeProjection{ + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: ConfigMapName(m.Name)}, + Items: []corev1.KeyToPath{{Key: "inventory", Path: "mondoo/inventory_template.yml"}}, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: m.Spec.MondooCredsSecretRef, + Items: []corev1.KeyToPath{{Key: "config", Path: "mondoo/mondoo.yml"}}, + }, + }, + }, + }, }, }, + { + Name: "temp", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, }, }, }, }, - { - Name: "temp", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, } } -func UpdateConfigMap(cm *corev1.ConfigMap, integrationMRN, clusterUID string, m v1alpha2.MondooAuditConfig) error { +// ConfigMap creates a ConfigMap for node scanning inventory +func ConfigMap(integrationMRN, clusterUID string, m v1alpha2.MondooAuditConfig) (*corev1.ConfigMap, error) { inv, err := Inventory(integrationMRN, clusterUID, m) if err != nil { - return err + return nil, err } - cm.Data = map[string]string{"inventory": inv} - return nil + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName(m.Name), + Namespace: m.Namespace, + }, + Data: map[string]string{"inventory": inv}, + }, nil } func CronJobName(prefix, suffix string) string { diff --git a/controllers/nodes/resources_test.go b/controllers/nodes/resources_test.go index 471f5e5ca..b2953a6bc 100644 --- a/controllers/nodes/resources_test.go +++ b/controllers/nodes/resources_test.go @@ -17,8 +17,6 @@ import ( "go.mondoo.com/mondoo-operator/pkg/constants" "go.mondoo.com/mondoo-operator/pkg/utils/k8s" "go.mondoo.com/mondoo-operator/tests/framework/utils" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -114,14 +112,13 @@ func TestResources(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - testNode := &corev1.Node{ + testNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node-name", }, } - mac := *test.mondooauditconfig() - cj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: mac.Namespace}} - UpdateCronJob(cj, "test123", *testNode, &mac, false, v1alpha2.MondooOperatorConfig{}) + mac := test.mondooauditconfig() + cj := CronJob("test123", testNode, mac, false, v1alpha2.MondooOperatorConfig{}) assert.Equal(t, test.expectedResources, cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Resources) }) } @@ -170,14 +167,13 @@ func TestResources_GOMEMLIMIT(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - testNode := &corev1.Node{ + testNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node-name", }, } - mac := *test.mondooauditconfig() - cj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: mac.Namespace}} - UpdateCronJob(cj, "test123", *testNode, &mac, false, v1alpha2.MondooOperatorConfig{}) + mac := test.mondooauditconfig() + cj := CronJob("test123", testNode, mac, false, v1alpha2.MondooOperatorConfig{}) goMemLimitEnv := corev1.EnvVar{} for _, env := range cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env { if env.Name == "GOMEMLIMIT" { @@ -192,8 +188,7 @@ func TestResources_GOMEMLIMIT(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { mac := *test.mondooauditconfig() - ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: mac.Namespace}} - UpdateDaemonSet(ds, mac, false, "test123", v1alpha2.MondooOperatorConfig{}, nil) + ds := DaemonSet(mac, false, "test123", v1alpha2.MondooOperatorConfig{}, nil) goMemLimitEnv := corev1.EnvVar{} for _, env := range ds.Spec.Template.Spec.Containers[0].Env { if env.Name == "GOMEMLIMIT" { @@ -207,27 +202,25 @@ func TestResources_GOMEMLIMIT(t *testing.T) { } func TestCronJob_PrivilegedOpenshift(t *testing.T) { - testNode := &corev1.Node{ + testNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node-name", }, } mac := testMondooAuditConfig() - cj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: mac.Namespace}} - UpdateCronJob(cj, "test123", *testNode, mac, true, v1alpha2.MondooOperatorConfig{}) + cj := CronJob("test123", testNode, mac, true, v1alpha2.MondooOperatorConfig{}) assert.True(t, *cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].SecurityContext.Privileged) assert.True(t, *cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation) } func TestCronJob_Privileged(t *testing.T) { - testNode := &corev1.Node{ + testNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node-name", }, } mac := testMondooAuditConfig() - cj := &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: mac.Namespace}} - UpdateCronJob(cj, "test123", *testNode, mac, false, v1alpha2.MondooOperatorConfig{}) + cj := CronJob("test123", testNode, mac, false, v1alpha2.MondooOperatorConfig{}) assert.False(t, *cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].SecurityContext.Privileged) assert.False(t, *cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation) } diff --git a/controllers/resource_watcher/deployment_handler.go b/controllers/resource_watcher/deployment_handler.go index 24701e9a4..0cb70e918 100644 --- a/controllers/resource_watcher/deployment_handler.go +++ b/controllers/resource_watcher/deployment_handler.go @@ -5,7 +5,6 @@ package resource_watcher import ( "context" - "reflect" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -63,33 +62,15 @@ func (h *DeploymentHandler) syncDeployment(ctx context.Context) error { deploymentHandlerLogger.Info("Failed to get integration MRN, continuing without it", "error", err) } - existing := &appsv1.Deployment{} desired := Deployment(mondooClientImage, integrationMRN, clusterUID, h.Mondoo, *h.MondooOperatorConfig) - - if err := ctrl.SetControllerReference(h.Mondoo, desired, h.KubeClient.Scheme()); err != nil { - deploymentHandlerLogger.Error(err, "Failed to set ControllerReference", "namespace", desired.Namespace, "name", desired.Name) - return err - } - - created, err := k8s.CreateIfNotExist(ctx, h.KubeClient, existing, desired) - if err != nil { - deploymentHandlerLogger.Error(err, "Failed to create resource watcher Deployment", "namespace", desired.Namespace, "name", desired.Name) + obj := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: desired.Name, Namespace: desired.Namespace}} + if _, err := k8s.CreateOrUpdate(ctx, h.KubeClient, obj, h.Mondoo, deploymentHandlerLogger, func() error { + k8s.UpdateDeploymentFields(obj, desired) + return nil + }); err != nil { return err } - if created { - deploymentHandlerLogger.Info("Created resource watcher Deployment", "namespace", desired.Namespace, "name", desired.Name) - } else if !areDeploymentsEqual(*existing, *desired) { - existing.Spec = desired.Spec - existing.SetOwnerReferences(desired.GetOwnerReferences()) - - if err := h.KubeClient.Update(ctx, existing); err != nil { - deploymentHandlerLogger.Error(err, "Failed to update resource watcher Deployment", "namespace", existing.Namespace, "name", existing.Name) - return err - } - deploymentHandlerLogger.Info("Updated resource watcher Deployment", "namespace", existing.Namespace, "name", existing.Name) - } - // Get deployment status for condition updates deployments, err := h.getDeploymentsForAuditConfig(ctx) if err != nil { @@ -154,24 +135,3 @@ func (h *DeploymentHandler) down(ctx context.Context) error { return nil } - -// areDeploymentsEqual compares two deployments for equality in the fields we care about. -func areDeploymentsEqual(existing, desired appsv1.Deployment) bool { - // Compare specs (excluding status and metadata that changes) - if !reflect.DeepEqual(existing.Spec.Template.Spec.Containers, desired.Spec.Template.Spec.Containers) { - return false - } - if !reflect.DeepEqual(existing.Spec.Template.Spec.Volumes, desired.Spec.Template.Spec.Volumes) { - return false - } - if existing.Spec.Template.Spec.ServiceAccountName != desired.Spec.Template.Spec.ServiceAccountName { - return false - } - if !reflect.DeepEqual(existing.Spec.Selector, desired.Spec.Selector) { - return false - } - if !reflect.DeepEqual(existing.GetOwnerReferences(), desired.GetOwnerReferences()) { - return false - } - return true -} diff --git a/controllers/resource_watcher/resources.go b/controllers/resource_watcher/resources.go index 14968abf8..f5204d646 100644 --- a/controllers/resource_watcher/resources.go +++ b/controllers/resource_watcher/resources.go @@ -150,7 +150,9 @@ func Deployment(image, integrationMRN, clusterUID string, m *v1alpha2.MondooAudi MountPath: "/tmp", }, }, - Env: envVars, + Env: envVars, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, }, }, ServiceAccountName: m.Spec.Scanner.ServiceAccountName, diff --git a/go.mod b/go.mod index cd892085b..207e48392 100644 --- a/go.mod +++ b/go.mod @@ -223,7 +223,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/go-cmp v0.7.0 + github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-containerregistry v0.20.7 github.com/google/uuid v1.6.0 // indirect github.com/json-iterator/go v1.1.12 // indirect diff --git a/pkg/utils/k8s/equality.go b/pkg/utils/k8s/equality.go deleted file mode 100644 index a99eed801..000000000 --- a/pkg/utils/k8s/equality.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright Mondoo, Inc. 2026 -// SPDX-License-Identifier: BUSL-1.1 - -package k8s - -import ( - "reflect" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" -) - -// AreDeploymentsEqual returns a value indicating whether 2 deployments are equal. Note that it does not perform a full -// comparison but checks just some of the properties of a deployment (only the ones we are currently interested at). -func AreDeploymentsEqual(a, b appsv1.Deployment) bool { - return len(a.Spec.Template.Spec.Containers) == len(b.Spec.Template.Spec.Containers) && - reflect.DeepEqual(a.Spec.Replicas, b.Spec.Replicas) && - reflect.DeepEqual(a.Spec.Selector, b.Spec.Selector) && - a.Spec.Template.Spec.ServiceAccountName == b.Spec.Template.Spec.ServiceAccountName && - reflect.DeepEqual(a.Spec.Template.Spec.Containers[0].Image, b.Spec.Template.Spec.Containers[0].Image) && - reflect.DeepEqual(a.Spec.Template.Spec.Containers[0].Command, b.Spec.Template.Spec.Containers[0].Command) && - reflect.DeepEqual(a.Spec.Template.Spec.Containers[0].Args, b.Spec.Template.Spec.Containers[0].Args) && - reflect.DeepEqual(a.Spec.Template.Spec.Containers[0].VolumeMounts, b.Spec.Template.Spec.Containers[0].VolumeMounts) && - AreEnvVarsEqual(a.Spec.Template.Spec.Containers[0].Env, b.Spec.Template.Spec.Containers[0].Env) && - AreResouceRequirementsEqual(a.Spec.Template.Spec.Containers[0].Resources, b.Spec.Template.Spec.Containers[0].Resources) && - reflect.DeepEqual(a.Spec.Template.Spec.Volumes, b.Spec.Template.Spec.Volumes) && - reflect.DeepEqual(a.Spec.Template.Spec.Affinity, b.Spec.Template.Spec.Affinity) && - AreSecurityContextsEqual(a.Spec.Template.Spec.Containers[0].SecurityContext, b.Spec.Template.Spec.Containers[0].SecurityContext) && - reflect.DeepEqual(a.GetOwnerReferences(), b.GetOwnerReferences()) -} - -// AreSecurityContextsEqual checks whether the provided Pod SecurityContexts are equal -// for the fields we are interested in. -func AreSecurityContextsEqual(a, b *corev1.SecurityContext) bool { - // If both left undefined, then they're equal to us - if a == nil && b == nil { - return true - } - // If not both are undefined, but one is, then unequal - if a == nil || b == nil { - return false - } - - // Finally do the field comparisons for the filds we care about - return reflect.DeepEqual(a.AllowPrivilegeEscalation, b.AllowPrivilegeEscalation) && - reflect.DeepEqual(a.ReadOnlyRootFilesystem, b.ReadOnlyRootFilesystem) && - reflect.DeepEqual(a.RunAsNonRoot, b.RunAsNonRoot) && - reflect.DeepEqual(a.Capabilities, b.Capabilities) && - reflect.DeepEqual(a.RunAsUser, b.RunAsUser) -} - -// AreServicesEqual return a value indicating whether 2 services are equal. Note that it -// does not perform a full comparison but checks just some of the properties of a deployment -// (only the ones we are currently interested at). -func AreServicesEqual(a, b corev1.Service) bool { - return reflect.DeepEqual(a.Spec.Ports, b.Spec.Ports) && - reflect.DeepEqual(a.Spec.Selector, b.Spec.Selector) && - reflect.DeepEqual(a.GetOwnerReferences(), b.GetOwnerReferences()) && - a.Spec.Type == b.Spec.Type -} - -// AreCronJobsEqual returns a value indicating whether 2 cron jobs are equal. Note that it does not perform a full -// comparison but checks just some of the properties of a deployment (only the ones we are currently interested at). -func AreCronJobsEqual(a, b batchv1.CronJob) bool { - aPodSpec := a.Spec.JobTemplate.Spec.Template.Spec - bPodSpec := b.Spec.JobTemplate.Spec.Template.Spec - - if len(aPodSpec.Containers) != len(bPodSpec.Containers) { - return false - } - if aPodSpec.ServiceAccountName != bPodSpec.ServiceAccountName { - return false - } - if !reflect.DeepEqual(aPodSpec.Tolerations, bPodSpec.Tolerations) { - return false - } - if !reflect.DeepEqual(aPodSpec.NodeName, bPodSpec.NodeName) { - return false - } - if !reflect.DeepEqual(aPodSpec.Containers[0].Image, bPodSpec.Containers[0].Image) { - return false - } - if !reflect.DeepEqual(aPodSpec.Containers[0].Command, bPodSpec.Containers[0].Command) { - return false - } - if !reflect.DeepEqual(aPodSpec.Containers[0].Args, bPodSpec.Containers[0].Args) { - return false - } - if !reflect.DeepEqual(aPodSpec.Containers[0].VolumeMounts, bPodSpec.Containers[0].VolumeMounts) { - return false - } - if !AreEnvVarsEqual(aPodSpec.Containers[0].Env, bPodSpec.Containers[0].Env) { - return false - } - if !AreResouceRequirementsEqual(aPodSpec.Containers[0].Resources, bPodSpec.Containers[0].Resources) { - return false - } - if !AreSecurityContextsEqual(aPodSpec.Containers[0].SecurityContext, bPodSpec.Containers[0].SecurityContext) { - return false - } - if !reflect.DeepEqual(aPodSpec.Volumes, bPodSpec.Volumes) { - return false - } - if !reflect.DeepEqual(a.Spec.SuccessfulJobsHistoryLimit, b.Spec.SuccessfulJobsHistoryLimit) { - return false - } - if a.Spec.ConcurrencyPolicy != b.Spec.ConcurrencyPolicy { - return false - } - if a.Spec.Schedule != b.Spec.Schedule { - return false - } - if !reflect.DeepEqual(a.Spec.FailedJobsHistoryLimit, b.Spec.FailedJobsHistoryLimit) { - return false - } - if !reflect.DeepEqual(a.GetOwnerReferences(), b.GetOwnerReferences()) { - return false - } - - return true -} - -// AreResouceRequirementsEqual returns a value indicating whether 2 resource requirements are equal. -func AreResouceRequirementsEqual(x corev1.ResourceRequirements, y corev1.ResourceRequirements) bool { - if x.Limits.Cpu().Equal(*y.Limits.Cpu()) && - x.Limits.Memory().Equal(*y.Limits.Memory()) && - x.Requests.Cpu().Equal(*y.Requests.Cpu()) && - x.Requests.Memory().Equal(*y.Requests.Memory()) { - return true - } - return false -} - -// AreEnvVarsEqual returns a value indicating whether 2 slices of environment variables are equal. Ordering -// is ignored. -func AreEnvVarsEqual(a, b []corev1.EnvVar) bool { - return cmp.Equal(a, b, cmpopts.SortSlices(func(a, b corev1.EnvVar) bool { return a.Name < b.Name })) -} diff --git a/pkg/utils/k8s/equality_test.go b/pkg/utils/k8s/equality_test.go deleted file mode 100644 index a427e39e6..000000000 --- a/pkg/utils/k8s/equality_test.go +++ /dev/null @@ -1,662 +0,0 @@ -// Copyright Mondoo, Inc. 2026 -// SPDX-License-Identifier: BUSL-1.1 - -package k8s - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/ptr" - - "k8s.io/client-go/kubernetes/scheme" - ctrl "sigs.k8s.io/controller-runtime" -) - -func TestAreDeploymentsEqual(t *testing.T) { - labels := map[string]string{"label": "value"} - a := appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "deployment", - Namespace: "ns", - Labels: labels, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - Replicas: ptr.To(int32(1)), - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Image: "test-image:latest", - Name: "mondoo-client", - Command: []string{"mondoo", "serve", "--api", "--config", "/etc/opt/mondoo/mondoo.yml"}, - Args: []string{"argA", "argB", "argC"}, - Resources: DefaultCnspecResources, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/Health/Check", - Port: intstr.FromInt(443), - }, - }, - InitialDelaySeconds: 5, - PeriodSeconds: 300, - TimeoutSeconds: 5, - }, - StartupProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/Health/Check", - Port: intstr.FromInt(443), - }, - }, - InitialDelaySeconds: 5, - PeriodSeconds: 5, - FailureThreshold: 5, - }, - SecurityContext: &corev1.SecurityContext{ - Privileged: ptr.To(false), - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "config", - ReadOnly: true, - MountPath: "/etc/opt/", - }, - }, - Ports: []corev1.ContainerPort{ - {ContainerPort: 443, Protocol: corev1.ProtocolTCP}, - }, - Env: []corev1.EnvVar{ - {Name: "DEBUG", Value: "false"}, - {Name: "MONDOO_PROCFS", Value: "on"}, - {Name: "PORT", Value: fmt.Sprintf("%d", 443)}, - }, - }}, - ServiceAccountName: "service-account", - Volumes: []corev1.Volume{ - { - Name: "config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{ - { - Secret: &corev1.SecretProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "secret", - }, - Items: []corev1.KeyToPath{{ - Key: "config", - Path: "mondoo/mondoo.yml", - }}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - tests := []struct { - name string - createB func(appsv1.Deployment) appsv1.Deployment - shouldBeEqual bool - }{ - { - name: "should be equal when identical", - createB: func(a appsv1.Deployment) appsv1.Deployment { - return *a.DeepCopy() - }, - shouldBeEqual: true, - }, - { - name: "should not be equal when container count differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Template.Spec.Containers = append( - b.Spec.Template.Spec.Containers, b.Spec.Template.Spec.Containers[0]) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when replicas differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Replicas = ptr.To(int32(3)) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when selectors differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Selector.MatchLabels["newLabel"] = "newValue" - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when service accounts differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Template.Spec.ServiceAccountName = "test" - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when container images differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Template.Spec.Containers[0].Image = "test" - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when container commands differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Template.Spec.Containers[0].Command = []string{"test"} - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when volume mounts differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Template.Spec.Containers[0].VolumeMounts = make([]corev1.VolumeMount, 0) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when env vars differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Template.Spec.Containers[0].Env = make([]corev1.EnvVar, 0) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when container resource requirements differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = resource.MustParse("233m") - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when owner references differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - assert.NoError(t, ctrl.SetControllerReference(&a, &b, scheme.Scheme)) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when container args differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Template.Spec.Containers[0].Args = []string{"some", "different", "args"} - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when Pod volume definition(s) differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Template.Spec.Volumes[0].VolumeSource.Projected.Sources[0].Secret.Items[0].Key = "differentkey" - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when securityContext differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Template.Spec.Containers[0].SecurityContext = &corev1.SecurityContext{} - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when securityContext.capabilities differ", - createB: func(a appsv1.Deployment) appsv1.Deployment { - b := *a.DeepCopy() - b.Spec.Template.Spec.Containers[0].SecurityContext.Capabilities.Add = []corev1.Capability{"NET_RAW"} - return b - }, - shouldBeEqual: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if test.shouldBeEqual { - assert.True(t, AreDeploymentsEqual(a, test.createB(a))) - } else { - assert.False(t, AreDeploymentsEqual(a, test.createB(a))) - } - }) - } -} - -func TestAreServicesEqual(t *testing.T) { - a := corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "name", - Namespace: "ns", - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: 443, - Protocol: corev1.ProtocolTCP, - TargetPort: intstr.FromInt(443), - }, - }, - Selector: map[string]string{"label": "value"}, - Type: corev1.ServiceTypeClusterIP, - }, - } - - tests := []struct { - name string - createB func(corev1.Service) corev1.Service - shouldBeEqual bool - }{ - { - name: "should be equal when identical", - createB: func(a corev1.Service) corev1.Service { - return *a.DeepCopy() - }, - shouldBeEqual: true, - }, - { - name: "should not be equal when ports differ", - createB: func(a corev1.Service) corev1.Service { - b := *a.DeepCopy() - b.Spec.Ports[0].Name = "test" - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when selectors differ", - createB: func(a corev1.Service) corev1.Service { - b := *a.DeepCopy() - b.Spec.Selector["newLabel"] = "newValue" - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when types differ", - createB: func(a corev1.Service) corev1.Service { - b := *a.DeepCopy() - b.Spec.Type = corev1.ServiceTypeExternalName - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when owner references differ", - createB: func(a corev1.Service) corev1.Service { - b := *a.DeepCopy() - assert.NoError(t, ctrl.SetControllerReference(&a, &b, scheme.Scheme)) - return b - }, - shouldBeEqual: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if test.shouldBeEqual { - assert.True(t, AreServicesEqual(a, test.createB(a))) - } else { - assert.False(t, AreServicesEqual(a, test.createB(a))) - } - }) - } -} - -func TestAreCronJobsEqual(t *testing.T) { - labels := map[string]string{"label": "value"} - a := batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cronjob", - Namespace: "ns", - Labels: labels, - }, - Spec: batchv1.CronJobSpec{ - Schedule: "0 * * * *", - ConcurrencyPolicy: batchv1.AllowConcurrent, - JobTemplate: batchv1.JobTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: labels}, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: labels}, - Spec: corev1.PodSpec{ - NodeName: "node01", - RestartPolicy: corev1.RestartPolicyOnFailure, - Tolerations: []corev1.Toleration{{ - Key: "key", - Effect: corev1.TaintEffectNoExecute, - Value: "value", - }}, - // The node scanning does not use the Kubernetes API at all, therefore the service account token - // should not be mounted at all. - AutomountServiceAccountToken: ptr.To(false), - Containers: []corev1.Container{ - { - Image: "test-image:latest", - Name: "mondoo-client", - Command: []string{ - "mondoo", "scan", - "--config", "/etc/opt/mondoo/mondoo.yml", - "--inventory-file", "/etc/opt/mondoo/inventory.yml", - "--exit-0-on-success", - }, - Resources: DefaultCnspecResources, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "root", - ReadOnly: true, - MountPath: "/mnt/host/", - }, - { - Name: "config", - ReadOnly: true, - MountPath: "/etc/opt/", - }, - }, - Env: []corev1.EnvVar{ - { - Name: "DEBUG", - Value: "false", - }, - { - Name: "MONDOO_PROCFS", - Value: "on", - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{ - { - ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{Name: "configMap"}, - Items: []corev1.KeyToPath{{ - Key: "inventory", - Path: "mondoo/inventory.yml", - }}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - SuccessfulJobsHistoryLimit: ptr.To(int32(1)), - FailedJobsHistoryLimit: ptr.To(int32(1)), - }, - } - - tests := []struct { - name string - createB func(batchv1.CronJob) batchv1.CronJob - shouldBeEqual bool - }{ - { - name: "should be equal when identical", - createB: func(a batchv1.CronJob) batchv1.CronJob { - return *a.DeepCopy() - }, - shouldBeEqual: true, - }, - { - name: "should not be equal when container count differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.JobTemplate.Spec.Template.Spec.Containers = append( - b.Spec.JobTemplate.Spec.Template.Spec.Containers, b.Spec.JobTemplate.Spec.Template.Spec.Containers[0]) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when service accounts differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.JobTemplate.Spec.Template.Spec.ServiceAccountName = "test" - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when tolerations differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.JobTemplate.Spec.Template.Spec.Tolerations = append(b.Spec.JobTemplate.Spec.Template.Spec.Tolerations, b.Spec.JobTemplate.Spec.Template.Spec.Tolerations[0]) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when concurrency policy differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.ConcurrencyPolicy = batchv1.ForbidConcurrent - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when node names differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.JobTemplate.Spec.Template.Spec.NodeName = "test-node" - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when container images differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image = "test" - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when container commands differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Command = []string{"test"} - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when volume mounts differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = make([]corev1.VolumeMount, 0) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when env vars differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env = make([]corev1.EnvVar, 0) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when container resource requirements differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = resource.MustParse("233m") - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when owner references differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - assert.NoError(t, ctrl.SetControllerReference(&a, &b, scheme.Scheme)) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when container args differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Args = []string{"some", "different", "args"} - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when Pod volume definition(s) differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.JobTemplate.Spec.Template.Spec.Volumes[0].VolumeSource.Projected.Sources[0].ConfigMap.Items[0].Key = "differentkey" - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when successful jobs history limits differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.SuccessfulJobsHistoryLimit = ptr.To(int32(100)) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when failed jobs history limits differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.FailedJobsHistoryLimit = ptr.To(int32(100)) - return b - }, - shouldBeEqual: false, - }, - { - name: "should not be equal when schedules differ", - createB: func(a batchv1.CronJob) batchv1.CronJob { - b := *a.DeepCopy() - b.Spec.Schedule = "1 * * * *" - return b - }, - shouldBeEqual: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if test.shouldBeEqual { - assert.True(t, AreCronJobsEqual(a, test.createB(a))) - } else { - assert.False(t, AreCronJobsEqual(a, test.createB(a))) - } - }) - } -} - -func TestAreResouceRequirementsEqual(t *testing.T) { - r := corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("1G"), - corev1.ResourceCPU: resource.MustParse("500m"), - }, - - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("500M"), // 50% of the limit - corev1.ResourceCPU: resource.MustParse("50m"), // 10% of the limit - }, - } - - assert.True(t, AreResouceRequirementsEqual(r, r)) - assert.True(t, AreResouceRequirementsEqual(r, corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("1G"), - corev1.ResourceCPU: resource.MustParse("0.5"), // used instead of 500m - }, - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("500M"), // 50% of the limit - corev1.ResourceCPU: resource.MustParse("50m"), // 10% of the limit - }, - })) -} - -func TestAreEnvVarsEqual(t *testing.T) { - a := []corev1.EnvVar{ - {Name: "a", Value: "2"}, - {Name: "a1", Value: "3"}, - } - - b := []corev1.EnvVar{ - {Name: "a", Value: "2"}, - {Name: "a1", Value: "3"}, - } - assert.True(t, AreEnvVarsEqual(a, b)) -} - -func TestAreEnvVarsEqual_DifferentOrder(t *testing.T) { - a := []corev1.EnvVar{ - {Name: "a", Value: "2"}, - {Name: "a1", Value: "3"}, - } - - b := []corev1.EnvVar{ - {Name: "a1", Value: "3"}, - {Name: "a", Value: "2"}, - } - assert.True(t, AreEnvVarsEqual(a, b)) -} diff --git a/pkg/utils/k8s/private_registries.go b/pkg/utils/k8s/private_registries.go index f13cdcdb5..54043a446 100644 --- a/pkg/utils/k8s/private_registries.go +++ b/pkg/utils/k8s/private_registries.go @@ -112,25 +112,18 @@ func ReconcilePrivateRegistriesSecret(ctx context.Context, kubeClient client.Cli } mergedSecretName := MergedSecretName(m) - mergedSecret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: mergedSecretName, - Namespace: m.Namespace, - }, - } - - _, err = CreateOrUpdate(ctx, kubeClient, mergedSecret, m, prLogger, func() error { - mergedSecret.Labels = map[string]string{ + obj := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: mergedSecretName, Namespace: m.Namespace}} + if _, err := CreateOrUpdate(ctx, kubeClient, obj, m, prLogger, func() error { + obj.Labels = map[string]string{ "app.kubernetes.io/managed-by": "mondoo-operator", "mondoo_cr": m.Name, } - mergedSecret.Type = corev1.SecretTypeDockerConfigJson - mergedSecret.Data = map[string][]byte{ + obj.Type = corev1.SecretTypeDockerConfigJson + obj.Data = map[string][]byte{ ".dockerconfigjson": mergedConfig, } return nil - }) - if err != nil { + }); err != nil { return "", fmt.Errorf("failed to create/update merged secret: %w", err) } diff --git a/pkg/utils/k8s/update.go b/pkg/utils/k8s/update.go deleted file mode 100644 index d0a3b5231..000000000 --- a/pkg/utils/k8s/update.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright Mondoo, Inc. 2026 -// SPDX-License-Identifier: BUSL-1.1 - -package k8s - -import ( - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" -) - -// UpdateService updates a service such that it matches a desired state. The function does not -// replace all fields but only a set of fields that we are interested at. -func UpdateService(current *corev1.Service, desired corev1.Service) { - current.Spec.Ports = desired.Spec.Ports - current.Spec.Selector = desired.Spec.Selector - current.Spec.Type = desired.Spec.Type - current.SetOwnerReferences(desired.GetOwnerReferences()) -} - -// UpdateDeployment updates a deployment such that it matches a desired state. The function does -// not replace all fields but only a set of fields that we are interested at. -func UpdateDeployment(current *appsv1.Deployment, desired appsv1.Deployment) { - current.Spec = desired.Spec - current.SetOwnerReferences(desired.GetOwnerReferences()) -} diff --git a/pkg/utils/k8s/update_fields.go b/pkg/utils/k8s/update_fields.go new file mode 100644 index 000000000..66d0d3aad --- /dev/null +++ b/pkg/utils/k8s/update_fields.go @@ -0,0 +1,76 @@ +// Copyright Mondoo, Inc. 2026 +// SPDX-License-Identifier: BUSL-1.1 + +package k8s + +import ( + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" +) + +// UpdateCronJobFields copies managed fields from desired to obj, +// preserving server-set defaults on unmanaged fields like +// Suspend, Completions, Parallelism, DNSPolicy, SchedulerName, etc. +func UpdateCronJobFields(obj, desired *batchv1.CronJob) { + obj.Labels = desired.Labels + obj.Annotations = desired.Annotations + obj.Spec.Schedule = desired.Spec.Schedule + obj.Spec.ConcurrencyPolicy = desired.Spec.ConcurrencyPolicy + obj.Spec.SuccessfulJobsHistoryLimit = desired.Spec.SuccessfulJobsHistoryLimit + obj.Spec.FailedJobsHistoryLimit = desired.Spec.FailedJobsHistoryLimit + + obj.Spec.JobTemplate.Labels = desired.Spec.JobTemplate.Labels + obj.Spec.JobTemplate.Annotations = desired.Spec.JobTemplate.Annotations + obj.Spec.JobTemplate.Spec.BackoffLimit = desired.Spec.JobTemplate.Spec.BackoffLimit + + obj.Spec.JobTemplate.Spec.Template.Labels = desired.Spec.JobTemplate.Spec.Template.Labels + obj.Spec.JobTemplate.Spec.Template.Annotations = desired.Spec.JobTemplate.Spec.Template.Annotations + + ps := &obj.Spec.JobTemplate.Spec.Template.Spec + dps := &desired.Spec.JobTemplate.Spec.Template.Spec + ps.RestartPolicy = dps.RestartPolicy + ps.NodeName = dps.NodeName + ps.Tolerations = dps.Tolerations + ps.ServiceAccountName = dps.ServiceAccountName + ps.AutomountServiceAccountToken = dps.AutomountServiceAccountToken + ps.PriorityClassName = dps.PriorityClassName + ps.InitContainers = dps.InitContainers + ps.Containers = dps.Containers + ps.Volumes = dps.Volumes +} + +// UpdateDeploymentFields copies managed fields from desired to obj, +// preserving server-set defaults on unmanaged fields like +// RevisionHistoryLimit, ProgressDeadlineSeconds, Strategy, DNSPolicy, etc. +func UpdateDeploymentFields(obj, desired *appsv1.Deployment) { + obj.Labels = desired.Labels + obj.Spec.Replicas = desired.Spec.Replicas + obj.Spec.Selector = desired.Spec.Selector + obj.Spec.Template.Labels = desired.Spec.Template.Labels + obj.Spec.Template.Annotations = desired.Spec.Template.Annotations + + ps := &obj.Spec.Template.Spec + dps := &desired.Spec.Template.Spec + ps.ServiceAccountName = dps.ServiceAccountName + ps.Containers = dps.Containers + ps.Volumes = dps.Volumes +} + +// UpdateDaemonSetFields copies managed fields from desired to obj, +// preserving server-set defaults on unmanaged fields like +// UpdateStrategy, RevisionHistoryLimit, DNSPolicy, etc. +func UpdateDaemonSetFields(obj, desired *appsv1.DaemonSet) { + obj.Labels = desired.Labels + obj.Annotations = desired.Annotations + obj.Spec.Selector = desired.Spec.Selector + obj.Spec.Template.Labels = desired.Spec.Template.Labels + obj.Spec.Template.Annotations = desired.Spec.Template.Annotations + + ps := &obj.Spec.Template.Spec + dps := &desired.Spec.Template.Spec + ps.PriorityClassName = dps.PriorityClassName + ps.AutomountServiceAccountToken = dps.AutomountServiceAccountToken + ps.Tolerations = dps.Tolerations + ps.Containers = dps.Containers + ps.Volumes = dps.Volumes +} diff --git a/pkg/utils/k8s/update_test.go b/pkg/utils/k8s/update_test.go deleted file mode 100644 index 43950c253..000000000 --- a/pkg/utils/k8s/update_test.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright Mondoo, Inc. 2026 -// SPDX-License-Identifier: BUSL-1.1 - -package k8s - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/utils/ptr" - ctrl "sigs.k8s.io/controller-runtime" -) - -func TestUpdateService(t *testing.T) { - current := corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "name", - Namespace: "ns", - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: 443, - Protocol: corev1.ProtocolTCP, - TargetPort: intstr.FromInt(443), - }, - }, - Selector: map[string]string{"label": "value"}, - Type: corev1.ServiceTypeClusterIP, - }, - } - - tests := []struct { - name string - desired corev1.Service - validation func(*testing.T, corev1.Service, corev1.Service) - }{ - { - name: "should update ports", - desired: func() corev1.Service { - s := *current.DeepCopy() - s.Spec.Ports = append(s.Spec.Ports, s.Spec.Ports[0]) - return s - }(), - validation: func(t *testing.T, a, b corev1.Service) { - assert.Equal(t, a.Spec.Ports, b.Spec.Ports) - }, - }, - { - name: "should update selector", - desired: func() corev1.Service { - s := *current.DeepCopy() - s.Spec.Selector["key"] = "value" - return s - }(), - validation: func(t *testing.T, a, b corev1.Service) { - assert.Equal(t, a.Spec.Selector, b.Spec.Selector) - }, - }, - { - name: "should update type", - desired: func() corev1.Service { - s := *current.DeepCopy() - s.Spec.Type = corev1.ServiceTypeLoadBalancer - return s - }(), - validation: func(t *testing.T, a, b corev1.Service) { - assert.Equal(t, a.Spec.Type, b.Spec.Type) - }, - }, - { - name: "should update owner references", - desired: func() corev1.Service { - s := current.DeepCopy() - assert.NoError(t, ctrl.SetControllerReference(¤t, s, scheme.Scheme)) - return *s - }(), - validation: func(t *testing.T, a, b corev1.Service) { - assert.Equal(t, a.GetOwnerReferences(), b.GetOwnerReferences()) - }, - }, - { - name: "should not update labels", - desired: func() corev1.Service { - s := *current.DeepCopy() - metav1.SetMetaDataLabel(&s.ObjectMeta, "key", "value") - return s - }(), - validation: func(t *testing.T, a, b corev1.Service) { - assert.Equal(t, current.Labels, a.Labels) - }, - }, - { - name: "should not update annotations", - desired: func() corev1.Service { - s := *current.DeepCopy() - metav1.SetMetaDataAnnotation(&s.ObjectMeta, "key", "value") - return s - }(), - validation: func(t *testing.T, a, b corev1.Service) { - assert.Equal(t, current.Annotations, a.Annotations) - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - c := current.DeepCopy() - UpdateService(c, test.desired) - test.validation(t, *c, test.desired) - }) - } -} - -func TestUpdateDeployment(t *testing.T) { - labels := map[string]string{"label": "value"} - current := appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "deployment", - Namespace: "ns", - Labels: labels, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - Replicas: ptr.To(int32(1)), - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Image: "test-image:latest", - Name: "mondoo-client", - Command: []string{"mondoo", "serve", "--api", "--config", "/etc/opt/mondoo/mondoo.yml"}, - Resources: DefaultCnspecResources, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/Health/Check", - Port: intstr.FromInt(443), - }, - }, - InitialDelaySeconds: 5, - PeriodSeconds: 300, - TimeoutSeconds: 5, - }, - StartupProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/Health/Check", - Port: intstr.FromInt(443), - }, - }, - InitialDelaySeconds: 5, - PeriodSeconds: 5, - FailureThreshold: 5, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "config", - ReadOnly: true, - MountPath: "/etc/opt/", - }, - }, - Ports: []corev1.ContainerPort{ - {ContainerPort: 443, Protocol: corev1.ProtocolTCP}, - }, - Env: []corev1.EnvVar{ - {Name: "DEBUG", Value: "false"}, - {Name: "MONDOO_PROCFS", Value: "on"}, - {Name: "PORT", Value: fmt.Sprintf("%d", 443)}, - }, - }}, - ServiceAccountName: "service-account", - Volumes: []corev1.Volume{ - { - Name: "config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{ - { - Secret: &corev1.SecretProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "secret", - }, - Items: []corev1.KeyToPath{{ - Key: "config", - Path: "mondoo/mondoo.yml", - }}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - tests := []struct { - name string - desired appsv1.Deployment - validation func(*testing.T, appsv1.Deployment, appsv1.Deployment) - }{ - { - name: "should update spec", - desired: func() appsv1.Deployment { - d := *current.DeepCopy() - d.Spec = appsv1.DeploymentSpec{} - return d - }(), - validation: func(t *testing.T, a, b appsv1.Deployment) { - assert.Equal(t, a.Spec, b.Spec) - }, - }, - { - name: "should update owner references", - desired: func() appsv1.Deployment { - d := current.DeepCopy() - assert.NoError(t, ctrl.SetControllerReference(¤t, d, scheme.Scheme)) - return *d - }(), - validation: func(t *testing.T, a, b appsv1.Deployment) { - assert.Equal(t, a.GetOwnerReferences(), b.GetOwnerReferences()) - }, - }, - { - name: "should not update labels", - desired: func() appsv1.Deployment { - s := *current.DeepCopy() - metav1.SetMetaDataLabel(&s.ObjectMeta, "key", "value") - return s - }(), - validation: func(t *testing.T, a, b appsv1.Deployment) { - assert.Equal(t, current.Labels, a.Labels) - }, - }, - { - name: "should not update annotations", - desired: func() appsv1.Deployment { - s := *current.DeepCopy() - metav1.SetMetaDataAnnotation(&s.ObjectMeta, "key", "value") - return s - }(), - validation: func(t *testing.T, a, b appsv1.Deployment) { - assert.Equal(t, current.Annotations, a.Annotations) - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - c := current.DeepCopy() - UpdateDeployment(c, test.desired) - test.validation(t, *c, test.desired) - }) - } -} diff --git a/tests/framework/utils/audit_config.go b/tests/framework/utils/audit_config.go index 9e890eef8..75c7f32a2 100644 --- a/tests/framework/utils/audit_config.go +++ b/tests/framework/utils/audit_config.go @@ -40,6 +40,10 @@ func DefaultAuditConfigMinimal(ns string, workloads, containers, nodes bool) mon startScan := now.Add(time.Minute).Add(time.Second * 15) schedule := fmt.Sprintf("%d * * * *", startScan.Minute()) auditConfig := mondoov2.MondooAuditConfig{ + TypeMeta: v1.TypeMeta{ + APIVersion: "k8s.mondoo.com/v1alpha2", + Kind: "MondooAuditConfig", + }, ObjectMeta: v1.ObjectMeta{ Name: "mondoo-client", Namespace: ns, @@ -77,6 +81,10 @@ func DefaultAuditConfigMinimal(ns string, workloads, containers, nodes bool) mon // make sure a tests passes (e.g. setting the correct secret name). func DefaultAuditConfig(ns string, workloads, containers, nodes bool) mondoov2.MondooAuditConfig { return mondoov2.MondooAuditConfig{ + TypeMeta: v1.TypeMeta{ + APIVersion: "k8s.mondoo.com/v1alpha2", + Kind: "MondooAuditConfig", + }, ObjectMeta: v1.ObjectMeta{ Name: "mondoo-client", Namespace: ns,