diff --git a/tests/integration/audit_config_base_suite.go b/tests/integration/audit_config_base_suite.go index 4b966c3ad..45f58e79c 100644 --- a/tests/integration/audit_config_base_suite.go +++ b/tests/integration/audit_config_base_suite.go @@ -4,12 +4,8 @@ package integration import ( - "bytes" "context" - "crypto/tls" "fmt" - "net/http" - "os/exec" "strings" "time" @@ -17,14 +13,10 @@ import ( "github.com/stretchr/testify/suite" "go.uber.org/zap" - webhooksv1 "k8s.io/api/admissionregistration/v1" - appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" mondoov2 "go.mondoo.com/mondoo-operator/api/v1alpha2" @@ -45,13 +37,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ) -const ( - webhookLocalPort = 18443 - maxRetriesWebhookConnect = 120 - maxRetriesProcessGone = 5 - maxRetriesCreate = 5 -) - type AuditConfigBaseSuite struct { suite.Suite ctx context.Context @@ -245,10 +230,10 @@ func (s *AuditConfigBaseSuite) testMondooAuditConfigKubernetesResources(auditCon func (s *AuditConfigBaseSuite) testMondooAuditConfigContainers(auditConfig mondoov2.MondooAuditConfig) { nginxLabel := "app.kubernetes.io/name=nginx" - _, err := s.testCluster.K8sHelper.Kubectl("run", "-n", "default", "nginx", "--image", "nginx", "-l", nginxLabel) + _, err := s.testCluster.K8sHelper.Kubectl("run", "-n", "default", "nginx", "--image", "ghcr.io/nginx/nginx-unprivileged", "-l", nginxLabel) s.Require().NoError(err, "Failed to create nginx pod.") redisLabel := "app.kubernetes.io/name=redis" - _, err = s.testCluster.K8sHelper.Kubectl("run", "-n", "default", "redis", "--image", "redis", "-l", redisLabel) + _, err = s.testCluster.K8sHelper.Kubectl("run", "-n", "default", "redis", "--image", "quay.io/opstree/redis", "-l", redisLabel) s.Require().NoError(err, "Failed to create redis pod.") s.True(s.testCluster.K8sHelper.IsPodReady(nginxLabel, "default"), "nginx pod is not ready") @@ -446,297 +431,6 @@ func (s *AuditConfigBaseSuite) testMondooAuditConfigNodesCronjobs(auditConfig mo s.Equal("ACTIVE", status) } -func (s *AuditConfigBaseSuite) testMondooAuditConfigNodesDaemonSets(auditConfig mondoov2.MondooAuditConfig) { - s.auditConfig = auditConfig - - // Disable container image resolution to be able to run the k8s resources scan CronJob with a local image. - cleanup := s.disableContainerImageResolution() - defer cleanup() - - zap.S().Info("Create an audit config that enables only nodes scanning.") - s.NoErrorf( - s.testCluster.K8sHelper.Clientset.Create(s.ctx, &auditConfig), - "Failed to create Mondoo audit config.") - - s.Require().True(s.testCluster.K8sHelper.WaitUntilMondooClientSecretExists(s.ctx, s.auditConfig.Namespace), "Mondoo SA not created") - - zap.S().Info("Verify the nodes scanning daemonset is created.") - - nodeList := &corev1.NodeList{} - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, nodeList)) - - // Verify DaemonSet is created - ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: nodes.DaemonSetName(auditConfig.Name), Namespace: auditConfig.Namespace}} - err := s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - if err := s.testCluster.K8sHelper.Clientset.Get(s.ctx, client.ObjectKeyFromObject(ds), ds); err != nil { - return false, nil - } - return true, nil - }) - s.NoError(err, "DaemonSet was not created.") - - // Verify the garbage collect cron job - gcCronJobs := &batchv1.CronJobList{} - gcCronJobLabels := nodes.GarbageCollectCronJobLabels(auditConfig) - - // List only the CronJobs in the namespace of the MondooAuditConfig and only the ones that exactly match our labels. - gcListOpts := &client.ListOptions{Namespace: auditConfig.Namespace, LabelSelector: labels.SelectorFromSet(gcCronJobLabels)} - - // Verify the amount of CronJobs created is 1 - err = s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, gcCronJobs, gcListOpts)) - if len(gcCronJobs.Items) == 1 { - return true, nil - } - return false, nil - }) - s.NoErrorf( - err, - "The amount of garbage collect CronJobs is not 1 expected: 1; actual: %d", len(gcCronJobs.Items)) - - err = s.testCluster.K8sHelper.CheckForReconciledOperatorVersion(&auditConfig, version.Version) - s.NoErrorf(err, "Couldn't find expected version in MondooAuditConfig.Status.ReconciledByOperatorVersion") - - // Sleep for a while since the scan happens only in about 1min since the deployment has started. - time.Sleep(40 * time.Second) - - // Verify nodes are sent upstream and have scores. - nodes := &corev1.NodeList{} - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, nodes)) - - nodeNames := make([]string, 0, len(nodes.Items)) - for _, node := range nodes.Items { - nodeNames = append(nodeNames, node.Name) - } - - // Retry until we see the assets in the space. We have no other way of checking whether the scan happened, - // since the deployment provides us with no feedback. - err = s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - assets, err := s.spaceClient.ListAssetsWithScores(s.ctx) - if err != nil { - return false, err - } - if len(assets) == len(nodes.Items) { - for _, asset := range assets { - if asset.Grade == "U" { - return false, nil - } - } - return true, nil - } - return false, nil - }) - s.Require().NoError(err) - - // The number of assets from upstream is limited by paganiation. - // In case we have more than 100 workloads, we need to call this multiple times, with different page numbers. - assets, err := s.spaceClient.ListAssetsWithScores(s.ctx) - s.NoError(err, "Failed to list assets") - assetNames := utils.AssetNames(assets) - - s.ElementsMatch(assetNames, nodeNames, "Node names do not match") - s.AssetsNotUnscored(assets) - - status, err := s.integration.GetStatus(s.ctx) - s.NoError(err, "Failed to get status") - s.Equal("ACTIVE", status) - - // Verify that the node scanning daemonset isn't constantly updating - s.NoError(s.testCluster.K8sHelper.Clientset.Get(s.ctx, client.ObjectKeyFromObject(ds), ds)) - s.Less(ds.Generation, int64(10)) -} - -func (s *AuditConfigBaseSuite) testMondooAuditConfigAdmission(auditConfig mondoov2.MondooAuditConfig) { - // Disable imageResolution for the webhook image to be runnable. - // Otherwise, mondoo-operator will try to resolve the locally-built mondoo-operator container - // image, and fail because we haven't pushed this image publicly. - cleanup := s.disableContainerImageResolution() - defer cleanup() - s.verifyAdmissionWorking(auditConfig) -} - -func (s *AuditConfigBaseSuite) verifyAdmissionWorking(auditConfig mondoov2.MondooAuditConfig) { - s.auditConfig = auditConfig - // Generate certificates manually - caCert, err := s.manuallyCreateCertificates() - - // Don't bother with further webhook tests if we couldnt' save the certificates - s.Require().NoErrorf(err, "Error while generating/saving certificates for webhook service") - - // Enable webhook - zap.S().Info("Create an audit config that enables only admission control.") - s.NoErrorf( - s.testCluster.K8sHelper.Clientset.Create(s.ctx, &auditConfig), - "Failed to create Mondoo audit config.") - - s.Require().True(s.testCluster.K8sHelper.WaitUntilMondooClientSecretExists(s.ctx, s.auditConfig.Namespace), "Mondoo SA not created") - - // Wait for Ready Pod - zap.S().Info("Waiting for webhook Pod to become ready.") - webhookLabelsString := s.getWebhookLabelsString() - s.Truef( - s.testCluster.K8sHelper.IsPodReady(webhookLabelsString, auditConfig.Namespace), - "Mondoo webhook Pod is not in a Ready state.") - zap.S().Info("Webhook Pod is ready.") - - // Verify scan API deployment and service - s.validateScanApiDeployment(auditConfig) - - // Check number of Pods depending on mode - webhookListOpts, err := s.getWebhookListOps() - s.NoError(err) - deployments := &appsv1.DeploymentList{} - s.Require().NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, deployments, webhookListOpts)) - s.Require().Lenf(deployments.Items, 1, "Deployments count for webhook should be precisely one") - numReplicas := 1 - if auditConfig.Spec.Admission.Mode == mondoov2.Enforcing { - numReplicas = 2 - } - if auditConfig.Spec.Admission.Replicas != nil { - numReplicas = int(*auditConfig.Spec.Admission.Replicas) - } - failMessage := fmt.Sprintf("Pods count for webhook should be precisely %d because of mode and replicas", numReplicas) - s.Equalf(numReplicas, int(*deployments.Items[0].Spec.Replicas), failMessage) - - s.verifyWebhookAndStart(webhookListOpts, caCert) - - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.AdmissionDegraded, corev1.ConditionFalse, "") - s.NoErrorf(err, "Admission shouldn't be in degraded state") - - err = s.testCluster.K8sHelper.CheckForReconciledOperatorVersion(&auditConfig, version.Version) - s.NoErrorf(err, "Couldn't find expected version in MondooAuditConfig.Status.ReconciledByOperatorVersion") - - zap.S().Info("Waiting for Webhook to accept connections (max 120s).") - err = s.checkWebhookAvailability() - s.NoErrorf(err, "Couldn't access Webhook via port-forward") - zap.S().Info("Webhook should be working by now.") - s.checkDeployments(&auditConfig) - - status, err := s.integration.GetStatus(s.ctx) - s.NoError(err, "Failed to get status") - s.Equal("ACTIVE", status) -} - -func (s *AuditConfigBaseSuite) testMondooAuditConfigAdmissionScaleDownScanApi(auditConfig mondoov2.MondooAuditConfig) { - // Disable imageResolution for the webhook image to be runnable. - // Otherwise, mondoo-operator will try to resolve the locally-built mondoo-operator container - // image, and fail because we haven't pushed this image publicly. - cleanup := s.disableContainerImageResolution() - defer cleanup() - - // first verify admission is working - s.verifyAdmissionWorking(auditConfig) - - // now check what happens when it is degraded - listOpts, err := utils.LabelSelectorListOptions(utils.LabelsToLabelSelector(scanapi.DeploymentLabels(auditConfig))) - s.NoError(err) - listOpts.Namespace = auditConfig.Namespace - - podList := &corev1.PodList{} - err = s.testCluster.K8sHelper.Clientset.List(s.ctx, podList, listOpts) - s.NoErrorf(err, "Scan API Pod should be present") - - err = s.testCluster.K8sHelper.Clientset.Delete(s.ctx, &podList.Items[0], &client.DeleteOptions{}) - s.NoErrorf(err, "Scan API Pod could not be deleted") - - err = s.testCluster.K8sHelper.WaitForResourceDeletion(&podList.Items[0]) - s.NoErrorf(err, "Scan API Pod did not get deleted") - - zap.S().Info("MondooAuditConfig condition should be updated to degraded.") - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.AdmissionDegraded, corev1.ConditionTrue, "") - s.NoErrorf(err, "Admission should be in degraded state") - - // try to change deployment => should fail - deployments := &appsv1.DeploymentList{} - webhookListOpts, err := s.getWebhookListOps() - s.NoError(err) - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, deployments, webhookListOpts)) - - s.Equalf(1, len(deployments.Items), "Deployments count for webhook should be precisely one") - - deployments.Items[0].Labels["testLabel"] = "testValue" - - s.Errorf( - s.testCluster.K8sHelper.Clientset.Update(s.ctx, &deployments.Items[0]), - "Expected failed updated of Deployment because Scan API is unreachable") -} - -func (s *AuditConfigBaseSuite) testMondooAuditConfigAdmissionMissingSA(auditConfig mondoov2.MondooAuditConfig) { - s.auditConfig = auditConfig - // Disable imageResolution for the webhook image to be runnable. - // Otherwise, mondoo-operator will try to resolve the locally-built mondoo-operator container - // image, and fail because we haven't pushed this image publicly. - operatorConfig := &mondoov2.MondooOperatorConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: mondoov2.MondooOperatorConfigName, - }, - Spec: mondoov2.MondooOperatorConfigSpec{ - SkipContainerResolution: true, - }, - } - s.Require().NoErrorf( - s.testCluster.K8sHelper.Clientset.Create(s.ctx, operatorConfig), "Failed to create MondooOperatorConfig") - - // Enable webhook - zap.S().Info("Create an audit config that enables only admission control.") - s.NoErrorf( - s.testCluster.K8sHelper.Clientset.Create(s.ctx, &auditConfig), - "Failed to create Mondoo audit config.") - - s.Require().True(s.testCluster.K8sHelper.WaitUntilMondooClientSecretExists(s.ctx, s.auditConfig.Namespace), "Mondoo SA not created") - - // Pod should not start, because of missing service account - - // do not wait until IsPodReady timeout, pod will not be present - // something like eventually from ginko would be nice, first iteration just with a sleep. - // just a grace period - time.Sleep(10 * time.Second) - listOpts, err := utils.LabelSelectorListOptions(utils.LabelsToLabelSelector(scanapi.DeploymentLabels(auditConfig))) - s.NoError(err) - listOpts.Namespace = auditConfig.Namespace - podList := &corev1.PodList{} - - err = s.testCluster.K8sHelper.Clientset.List(s.ctx, podList, listOpts) - s.NoErrorf(err, "Couldn't list scan API pod.") - s.Equalf(0, len(podList.Items), "No ScanAPI Pod should be present") - - err = s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - // Check for the ScanAPI Deployment to be present. - deployments := &appsv1.DeploymentList{} - if err := s.testCluster.K8sHelper.Clientset.List(s.ctx, deployments, listOpts); err != nil { - return false, nil - } - - return len(deployments.Items) == 1, nil - }) - s.NoErrorf(err, "Deployments count for ScanAPI should be precisely one") - - err = s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - // Condition of MondooAuditConfig should be updated - foundMondooAuditConfig, err := s.testCluster.K8sHelper.GetMondooAuditConfigFromCluster(auditConfig.Name, auditConfig.Namespace) - if err != nil { - zap.S().Errorf("Failed to get mondoo audit config: %s", err.Error()) - return false, err - } - condition, err := s.testCluster.K8sHelper.GetMondooAuditConfigConditionByType(foundMondooAuditConfig, mondoov2.ScanAPIDegraded) - if err != nil { - zap.S().Errorf("Failed to get condition: %s", err.Error()) - return false, nil // The condition might not exist yet. This doesn't mean we should stop trying. - } - zap.S().Infof("Condition message: %s", condition.Message) - if strings.Contains(condition.Message, "error looking up service account") { - return true, nil - } - return false, nil - }) - - s.NoErrorf(err, "Couldn't find condition message about missing service account") - - // The SA is missing, but the actual reconcile loop gets finished. The SA is outside of the operators scope. - err = s.testCluster.K8sHelper.CheckForReconciledOperatorVersion(&auditConfig, version.Version) - s.NoErrorf(err, "Couldn't find expected version in MondooAuditConfig.Status.ReconciledByOperatorVersion") -} - func (s *AuditConfigBaseSuite) testMondooAuditConfigAllDisabled(auditConfig mondoov2.MondooAuditConfig) { s.auditConfig = auditConfig // Disable imageResolution for the webhook image to be runnable. @@ -767,56 +461,6 @@ func (s *AuditConfigBaseSuite) testMondooAuditConfigAllDisabled(auditConfig mond s.Equal("ACTIVE", status) } -func (s *AuditConfigBaseSuite) testUpgradePreviousReleaseToLatest(auditConfig mondoov2.MondooAuditConfig) { - s.auditConfig = auditConfig - - serviceDNSNames := []string{ - // DNS names will take the form of ServiceName.ServiceNamespace.svc and .svc.cluster.local - fmt.Sprintf("%s-webhook-service.%s.svc", auditConfig.Name, auditConfig.Namespace), - fmt.Sprintf("%s-webhook-service.%s.svc.cluster.local", auditConfig.Name, auditConfig.Namespace), - } - secretName := mondooadmission.GetTLSCertificatesSecretName(auditConfig.Name) - _, err := s.testCluster.GenerateServiceCerts(&auditConfig, secretName, serviceDNSNames) - - // Don't bother with further webhook tests if we couldnt' save the certificates - s.Require().NoErrorf(err, "Error while generating/saving certificates for webhook service") - - // Disable imageResolution for the webhook image to be runnable. - // Otherwise, mondoo-operator will try to resolve the locally-built mondoo-operator container - // image, and fail because we haven't pushed this image publicly. - cleanup := s.disableContainerImageResolution() - defer cleanup() - - s.NoErrorf( - s.testCluster.K8sHelper.Clientset.Create(s.ctx, &auditConfig), - "Failed to create Mondoo audit config.") - - s.Require().True(s.testCluster.K8sHelper.WaitUntilMondooClientSecretExists(s.ctx, s.auditConfig.Namespace), "Mondoo SA not created") - - // Verify scan API deployment and service - s.validateScanApiDeployment(auditConfig) - - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.AdmissionDegraded, corev1.ConditionFalse, "") - s.Require().NoErrorf(err, "Admission shouldn't be in degraded state") - - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.NodeScanningDegraded, corev1.ConditionFalse, "") - s.Require().NoErrorf(err, "Node scanning shouldn't be in degraded state") - - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.K8sResourcesScanningDegraded, corev1.ConditionFalse, "") - s.Require().NoErrorf(err, "k8s resource scanning shouldn't be in degraded state") - - // everything is fine, now upgrade to current branch/release - - branchInstaller := installer.NewMondooInstaller(installer.NewDefaultSettings().SetToken(s.integration.Token()), s.T) - err = branchInstaller.InstallOperator() - s.NoErrorf(err, "Failed updating the latest operator release to this branch") - - s.validateScanApiDeployment(auditConfig) - - err = s.testCluster.K8sHelper.CheckForReconciledOperatorVersion(&auditConfig, version.Version) - s.NoErrorf(err, "Couldn't find release version in MondooAuditConfig.Status.ReconciledByOperatorVersion") -} - func (s *AuditConfigBaseSuite) validateScanApiDeployment(auditConfig mondoov2.MondooAuditConfig) { scanApiLabelsString := utils.LabelsToLabelSelector(scanapi.DeploymentLabels(auditConfig)) zap.S().Info("Waiting for scan API Pod to become ready.") @@ -872,356 +516,6 @@ func (s *AuditConfigBaseSuite) disableContainerImageResolution() func() { } } -func (s *AuditConfigBaseSuite) getPassingDeployment() *appsv1.Deployment { - labels := map[string]string{ - "admission-result": "pass", - } - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "passing-deployment", - Namespace: "mondoo-operator", - Labels: labels, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: ptr.To(false), - Containers: []corev1.Container{ - { - Name: "ubuntu", - Image: "ubuntu:20.04", - Command: []string{"/bin/sh", "-c"}, - Args: []string{"exit 0"}, - ImagePullPolicy: corev1.PullAlways, - Resources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("100Mi"), - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("100Mi"), - }, - }, - SecurityContext: &corev1.SecurityContext{ - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{"NET_RAW"}, - }, - RunAsNonRoot: ptr.To(true), - RunAsUser: ptr.To(int64(1000)), - ReadOnlyRootFilesystem: ptr.To(true), - AllowPrivilegeEscalation: ptr.To(false), - Privileged: ptr.To(false), - }, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - Exec: &corev1.ExecAction{ - Command: []string{"/bin/sh", "-c", "exit 0"}, - }, - }, - }, - LivenessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - Exec: &corev1.ExecAction{ - Command: []string{"/bin/sh", "-c", "exit 0"}, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func (s *AuditConfigBaseSuite) getFailingDeployment() *appsv1.Deployment { - labels := map[string]string{ - "admission-result": "fail", - } - deployment := s.getPassingDeployment().DeepCopy() - deployment.Name = "failing-deployment" - deployment.Labels = labels - deployment.Spec.Template.Spec.Containers[0].SecurityContext = &corev1.SecurityContext{ - Privileged: ptr.To(true), - } - return deployment -} - -func (s *AuditConfigBaseSuite) checkDeployments(auditConfig *mondoov2.MondooAuditConfig) { - passingDeployment := s.getPassingDeployment() - failingDeployment := s.getFailingDeployment() - - // retry because sometimes we see these errors, although all previous checks reached the endpoint: - // Internal error occurred: failed calling webhook "policy.k8s.mondoo.com": - // failed to call webhook: - // Post "https://mondoo-client-webhook-service.mondoo-operator.svc:443/validate-k8s-mondoo-com?timeout=10s": EOF - zap.S().Infof("Create a Deployment which should pass. (max. %d retries)", maxRetriesCreate) - var err error - for i := 0; i < maxRetriesCreate; i++ { - err = s.testCluster.K8sHelper.Clientset.Create(s.ctx, passingDeployment) - if err == nil { - break - } - time.Sleep(2 * time.Second) - } - s.NoErrorf(err, "Failed to create Deployment which should pass.") - - time.Sleep(5 * time.Second) - cicdProject, err := s.integration.GetCiCdProject(s.ctx) - s.Require().NoErrorf(err, "Failed to get CICD project") - - assets, err := cicdProject.ListAssets(s.ctx) - s.Require().NoErrorf(err, "Failed to list CICD assets") - - assetNames := utils.CiCdJobNames(assets) - s.Contains(assetNames, fmt.Sprintf("%s/%s", passingDeployment.Namespace, passingDeployment.Name)) - s.CiCdJobNotUnscored(assets) - - zap.S().Info("Create a Deployment which should be denied in enforcing mode.") - err = s.testCluster.K8sHelper.Clientset.Create(s.ctx, failingDeployment) - - if auditConfig.Spec.Admission.Mode == mondoov2.Enforcing { - s.Errorf(err, "Created Deployment which should have been denied.") - } else { - s.NoErrorf(err, "Failed creating a Deployment in permissive mode.") - } - - assets, err = cicdProject.ListAssets(s.ctx) - s.Require().NoErrorf(err, "Failed to list CICD assets") - - assetNames = utils.CiCdJobNames(assets) - s.Contains(assetNames, fmt.Sprintf("%s/%s", failingDeployment.Namespace, failingDeployment.Name)) - s.CiCdJobNotUnscored(assets) - - s.NoErrorf(s.testCluster.K8sHelper.DeleteResourceIfExists(passingDeployment), "Failed to delete passingDeployment") - s.NoErrorf(s.testCluster.K8sHelper.DeleteResourceIfExists(failingDeployment), "Failed to delete failingDeployment") - s.NoErrorf(s.testCluster.K8sHelper.WaitForResourceDeletion(passingDeployment), "Error waiting for deleteion of passingDeployment") - s.NoErrorf(s.testCluster.K8sHelper.WaitForResourceDeletion(failingDeployment), "Error waiting for deleteion of failingDeployment") -} - -func (s *AuditConfigBaseSuite) getWebhookLabelsString() string { - webhookDeploymentLabels := mondooadmission.WebhookDeploymentLabels() - - keyValuesWithEquals := []string{} - for key, val := range webhookDeploymentLabels { - keyValuesWithEquals = append(keyValuesWithEquals, key+"="+val) - } - webhookLabelsString := strings.Join(keyValuesWithEquals, ",") - return webhookLabelsString -} - -func (s *AuditConfigBaseSuite) getWebhookListOps() (*client.ListOptions, error) { - webhookListOpts, err := utils.LabelSelectorListOptions(s.getWebhookLabelsString()) - if err != nil { - return webhookListOpts, err - } - webhookListOpts.Namespace = s.auditConfig.Namespace - return webhookListOpts, nil -} - -func (s *AuditConfigBaseSuite) manuallyCreateCertificates() (*bytes.Buffer, error) { - serviceDNSNames := []string{ - // DNS names will take the form of ServiceName.ServiceNamespace.svc and .svc.cluster.local - fmt.Sprintf("%s-webhook-service.%s.svc", s.auditConfig.Name, s.auditConfig.Namespace), - fmt.Sprintf("%s-webhook-service.%s.svc.cluster.local", s.auditConfig.Name, s.auditConfig.Namespace), - } - secretName := mondooadmission.GetTLSCertificatesSecretName(s.auditConfig.Name) - return s.testCluster.GenerateServiceCerts(&s.auditConfig, secretName, serviceDNSNames) -} - -// verifyWebhookAndStart Checks the ValidatingWebhookConfiguration, adds the CA data and waits for webhook to start working -func (s *AuditConfigBaseSuite) verifyWebhookAndStart(webhookListOpts *client.ListOptions, caCert *bytes.Buffer) { - vwc := &webhooksv1.ValidatingWebhookConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s-mondoo", s.auditConfig.Namespace, s.auditConfig.Name), - }, - } - s.NoErrorf(s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - if err := s.testCluster.K8sHelper.Clientset.Get(s.ctx, client.ObjectKeyFromObject(vwc), vwc); err == nil { - return true, nil - } - return false, nil - }), "Failed to retrieve ValidatingWebhookConfiguration") - - if s.auditConfig.Spec.Admission.Mode == mondoov2.Enforcing { - s.Equalf(*vwc.Webhooks[0].FailurePolicy, webhooksv1.Fail, "Webhook failurePolicy should be 'Fail' because of enforcing mode") - } else { - s.Equalf(*vwc.Webhooks[0].FailurePolicy, webhooksv1.Ignore, "Webhook failurePolicy should be 'Ignore' because of permissive mode") - } - - if *vwc.Webhooks[0].FailurePolicy == webhooksv1.Fail { - - deployments := &appsv1.DeploymentList{} - err := s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - if err := s.testCluster.K8sHelper.Clientset.List(s.ctx, deployments, webhookListOpts); err == nil { - if len(deployments.Items) == 1 { - return true, nil - } - } - return false, nil - }) - s.NoError(err) - s.Equalf(1, len(deployments.Items), "Deployments count for webhook should be precisely one") - - deployments.Items[0].Labels["testLabel"] = "testValue" - - s.Errorf( - s.testCluster.K8sHelper.Clientset.Update(s.ctx, &deployments.Items[0]), - "Expected failed updated of Deployment because certificate setup is incomplete") - - } - - for i := range vwc.Webhooks { - vwc.Webhooks[i].ClientConfig.CABundle = caCert.Bytes() - } - - // Set the failure policy to ignore always so we can ensure that we can restart the webhook. After the webhook has restarted, - // we can rollback to the original value. This workaround is needed to enforce that the webhook has successfully reloaded the - // CA secret which we set after it is created. If we do not force it to restart, there is no reliable way of knowing when it - // has the correct CA data mounted and the tests become flaky. - currentFailurePolicy := *vwc.Webhooks[0].FailurePolicy - *vwc.Webhooks[0].FailurePolicy = webhooksv1.Ignore - - zap.S().Info("Update the webhook with the CA data.") - s.NoErrorf(s.testCluster.K8sHelper.Clientset.Update(s.ctx, vwc), "Failed to add CA data to Webhook") - - // Restart the scan API pods to ensure the cert secret is reloaded. - webhookLabels := mondooadmission.WebhookDeploymentLabels() - - webhookPods := &corev1.PodList{} - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, webhookPods, webhookListOpts), "Failed to list webhook pods") - - zap.S().Info("Restart the webhook pods such that it is certain the CA secret has been reloaded.") - for _, p := range webhookPods.Items { - s.NoError(s.testCluster.K8sHelper.Clientset.Delete(s.ctx, &p), "Failed to delete webhook pod") - } - - time.Sleep(2 * time.Second) - - s.Truef( - s.testCluster.K8sHelper.IsPodReady(utils.LabelsToLabelSelector(webhookLabels), s.auditConfig.Namespace), - "Mondoo webhook Pod is not in a Ready state.") - zap.S().Info("Webhook Pod is ready.") - - s.NoErrorf( - s.testCluster.K8sHelper.Clientset.Get(s.ctx, client.ObjectKeyFromObject(vwc), vwc), - "Failed to retrieve ValidatingWebhookConfiguration") - *vwc.Webhooks[0].FailurePolicy = currentFailurePolicy - s.NoErrorf(s.testCluster.K8sHelper.Clientset.Update(s.ctx, vwc), "Failed to add CA data to Webhook") - - // Sometime the Pod restart takes longer than 1 second, so we wait for the endpoints to be ready - // when accessing the endpoints later on, we came across such errors: - // ... pod mondoo-client-webhook-manager-6c5ccc449d-d7zn9 and container webhook. Container is in state ContainerCreating - endpoints := &corev1.Endpoints{ // nolint:staticcheck - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-webhook-service", s.auditConfig.Name), - Namespace: s.auditConfig.Namespace, - }, - Subsets: []corev1.EndpointSubset{}, // nolint:staticcheck - } - zap.S().Info("Getting endpoints for webhook.") - for i := 0; i < maxRetriesCreate; i++ { - s.NoErrorf( - s.testCluster.K8sHelper.Clientset.Get(s.ctx, client.ObjectKeyFromObject(endpoints), endpoints), - "Failed to retrieve endpoints for webhook") - if len(endpoints.Subsets) > 0 { - zap.S().Info("endpoints Addresses: ", endpoints.Subsets[0].Addresses) - zap.S().Info("endpoints NotReadyAddresses: ", endpoints.Subsets[0].NotReadyAddresses) - break - } - zap.S().Debug("Endpoints for webhook are not ready yet. Retrying...") - time.Sleep(2 * time.Second) - } - - zap.S().Info("Wait for webhook to start working.") -} - -func (s *AuditConfigBaseSuite) checkWebhookAvailability() error { - webhookService := mondooadmission.WebhookService(s.auditConfig.Namespace, s.auditConfig) - // there is this package https://pkg.go.dev/k8s.io/client-go/tools/portforward - // But it seems this is a bit complicated in combination with minikube - // because of that we use kubectl directly - cmd := s.createPortForwardCmd(webhookService) - err := cmd.Start() - if err != nil { - return fmt.Errorf("couldn't start port-forward: %w", err) - } - // kubectl port-forwarding does not return but will run until interrupted - // We have to get rid of the port-forward at the end, because we need to create a new one for each test - defer func() { - err := s.stopCmd(cmd) - if err != nil { - zap.S().Errorf("couldn't stop port-forward: %v\n", err) - } - }() - zap.S().Info("Created port-forward via kubectl for webhook with pid: ", cmd.Process.Pid) - - webhookUrl := fmt.Sprintf("https://127.0.0.1:%d/readyz", webhookLocalPort) - customTransport := http.DefaultTransport.(*http.Transport).Clone() - customTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint:gosec - client := &http.Client{Transport: customTransport} - client.Timeout = 500 * time.Millisecond - var resp *http.Response - var webhookErr error - for i := 0; i < maxRetriesWebhookConnect; i++ { - time.Sleep(2 * time.Second) - resp, webhookErr = client.Post(webhookUrl, "application/json", strings.NewReader("{}")) - if webhookErr == nil { - zap.S().Infof("Webhook is available: %s", resp.Status) - s.NoError(resp.Body.Close()) - return nil - } else { - zap.S().Debug("Webhook is not available yet: ", webhookErr) - // this error will not recover itself over time, we have to re-connect - if strings.HasSuffix(webhookErr.Error(), "connection refused") { - zap.S().Debug("Trying to restart port-forward") - err := s.stopCmd(cmd) - if err != nil { - zap.S().Errorf("couldn't stop port-forward: %v\n", err) - continue - } - cmd = s.createPortForwardCmd(webhookService) - err = cmd.Start() - if err != nil { - zap.S().Errorf("couldn't start port-forward: %v\n", err) - } else { - zap.S().Info("Created port-forward via kubectl for webhook with pid: ", cmd.Process.Pid) - } - } - } - } - return fmt.Errorf("webhook not available: %w", webhookErr) -} - -func (s *AuditConfigBaseSuite) stopCmd(cmd *exec.Cmd) error { - zap.S().Debug("Trying to stop port-forward with pid: ", cmd.Process.Pid) - err := cmd.Process.Kill() - if err != nil { - return fmt.Errorf("couldn't kill kubectl port-forward: %w", err) - } - _, err = cmd.Process.Wait() - return err -} - -func (s *AuditConfigBaseSuite) createPortForwardCmd(webhookService *corev1.Service) *exec.Cmd { - kubectlArgs := []string{ - "-n", - webhookService.Namespace, - "port-forward", - "svc/" + webhookService.Name, - fmt.Sprintf("%d:%d", webhookLocalPort, webhookService.Spec.Ports[0].Port), - } - - return exec.Command("kubectl", kubectlArgs...) // #nosec G204 -} - var ( defaultK8sPolicyMrns = []string{ "//policy.api.mondoo.app/policies/mondoo-kubernetes-best-practices", diff --git a/tests/integration/audit_config_namespace_test.go b/tests/integration/audit_config_namespace_test.go deleted file mode 100644 index 2d9c614a8..000000000 --- a/tests/integration/audit_config_namespace_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) Mondoo, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package integration - -import ( - "testing" - - "github.com/stretchr/testify/suite" - "go.mondoo.com/mondoo-operator/api/v1alpha2" - "go.mondoo.com/mondoo-operator/tests/framework/utils" - "go.uber.org/zap" - - "sigs.k8s.io/controller-runtime/pkg/client" - - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" -) - -type AuditConfigCustomNamespaceSuite struct { - AuditConfigBaseSuite - objsToCleanup []client.Object - ns *corev1.Namespace - sa *corev1.ServiceAccount - webhookServiceAccount *corev1.ServiceAccount -} - -func (s *AuditConfigCustomNamespaceSuite) SetupSuite() { - s.AuditConfigBaseSuite.SetupSuite() - s.testCluster.Settings.SuiteName = "AuditConfigCustomNamespaceSuite" - - s.ns = &corev1.Namespace{} - s.ns.Name = "some-namespace" - s.Require().NoErrorf(s.testCluster.K8sHelper.Clientset.Create(s.ctx, s.ns), "Failed to create namespace.") - s.objsToCleanup = append(s.objsToCleanup, s.ns) - zap.S().Info("Created test namespace.") - - s.Require().NoErrorf(s.testCluster.CreateClientSecret(s.ns.Name), "Failed to create client secret.") - zap.S().Infof("Created client secret in namespace %q.", s.ns.Name) - - s.webhookServiceAccount = &corev1.ServiceAccount{} - s.webhookServiceAccount.Name = "webhook-sa" - s.webhookServiceAccount.Namespace = s.ns.Name - s.Require().NoErrorf(s.testCluster.K8sHelper.Clientset.Create(s.ctx, s.webhookServiceAccount), "Failed to create webhook ServiceAccount") - s.objsToCleanup = append(s.objsToCleanup, s.webhookServiceAccount) - zap.S().Infof("Created webhook ServiceAccount %q in namespace %q.", s.webhookServiceAccount.Name, s.webhookServiceAccount.Namespace) - - s.sa = &corev1.ServiceAccount{} - s.sa.Name = "mondoo-sa" - s.sa.Namespace = s.ns.Name - s.Require().NoErrorf(s.testCluster.K8sHelper.Clientset.Create(s.ctx, s.sa), "Failed to create service account.") - s.objsToCleanup = append(s.objsToCleanup, s.sa) - zap.S().Infof("Created service account %q in namespace %q.", s.sa.Name, s.ns.Name) - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{} - clusterRoleBinding.Name = "mondoo-operator-k8s-resources-scanning2" - clusterRoleBinding.RoleRef.APIGroup = rbacv1.GroupName - clusterRoleBinding.RoleRef.Kind = "ClusterRole" - clusterRoleBinding.RoleRef.Name = "mondoo-operator-k8s-resources-scanning" - - subject := rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Name: s.sa.Name, Namespace: s.sa.Namespace} - clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, subject) - s.Require().NoErrorf( - s.testCluster.K8sHelper.Clientset.Create(s.ctx, clusterRoleBinding), "Failed to create cluster role binding.") - s.objsToCleanup = append(s.objsToCleanup, clusterRoleBinding) - zap.S().Infof("Created cluster role binding %q.", clusterRoleBinding.Name) -} - -func (s *AuditConfigCustomNamespaceSuite) AfterTest(suiteName, testName string) { - if s.testCluster != nil { - s.testCluster.GatherAllMondooLogs(testName, s.ns.Name) // Gather logs from the custom namespace too. - } - s.AuditConfigBaseSuite.AfterTest(suiteName, testName) -} - -func (s *AuditConfigCustomNamespaceSuite) TearDownSuite() { - for _, o := range s.objsToCleanup { - s.NoError(s.testCluster.K8sHelper.DeleteResourceIfExists(o)) - } - s.AuditConfigBaseSuite.TearDownSuite() -} - -func (s *AuditConfigCustomNamespaceSuite) TestReconcile_KubernetesResources() { - auditConfig := utils.DefaultAuditConfigMinimal(s.ns.Name, true, false, false, false) - auditConfig.Spec.Scanner.ServiceAccountName = s.sa.Name - s.testMondooAuditConfigKubernetesResources(auditConfig) -} - -func (s *AuditConfigCustomNamespaceSuite) TestReconcile_Containers() { - auditConfig := utils.DefaultAuditConfigMinimal(s.ns.Name, false, true, false, false) - auditConfig.Spec.Scanner.ServiceAccountName = s.sa.Name - - // Ignore the operator namespace and the scanner namespace because we cannot scan a local image - // Ignore kube-system to speed up the containers test - auditConfig.Spec.Filtering.Namespaces.Exclude = []string{s.ns.Name, s.testCluster.Settings.Namespace, "kube-system"} - s.testMondooAuditConfigContainers(auditConfig) -} - -func (s *AuditConfigCustomNamespaceSuite) TestReconcile_Nodes_CronJobs() { - auditConfig := utils.DefaultAuditConfigMinimal(s.ns.Name, false, false, true, false) - auditConfig.Spec.Scanner.ServiceAccountName = s.sa.Name - s.testMondooAuditConfigNodesCronjobs(auditConfig) -} - -func (s *AuditConfigCustomNamespaceSuite) TestReconcile_Nodes_DaemonSet() { - auditConfig := utils.DefaultAuditConfigMinimal(s.ns.Name, false, false, true, false) - auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) - auditConfig.Spec.Nodes.IntervalTimer = 1 - auditConfig.Spec.Scanner.ServiceAccountName = s.sa.Name - s.testMondooAuditConfigNodesDaemonSets(auditConfig) -} - -func (s *AuditConfigCustomNamespaceSuite) TestReconcile_Admission() { - auditConfig := utils.DefaultAuditConfigMinimal(s.ns.Name, false, false, false, true) - auditConfig.Spec.Scanner.ServiceAccountName = s.sa.Name - auditConfig.Spec.Admission.ServiceAccountName = s.webhookServiceAccount.Name - s.testMondooAuditConfigAdmission(auditConfig) -} - -func (s *AuditConfigCustomNamespaceSuite) TestReconcile_AdmissionMissingSA() { - auditConfig := utils.DefaultAuditConfigMinimal(s.ns.Name, false, false, false, true) - auditConfig.Spec.Scanner.ServiceAccountName = "missing-serviceaccount" - auditConfig.Spec.Admission.ServiceAccountName = s.webhookServiceAccount.Name - s.testMondooAuditConfigAdmissionMissingSA(auditConfig) -} - -func TestAuditConfigCustomNamespaceSuite(t *testing.T) { - s := new(AuditConfigCustomNamespaceSuite) - defer func(s *AuditConfigCustomNamespaceSuite) { - HandlePanics(recover(), func() { - if err := s.testCluster.UninstallOperator(); err != nil { - zap.S().Errorf("Failed to uninstall Mondoo operator. %v", err) - } - if s.spaceClient != nil { - if err := s.spaceClient.Delete(s.ctx); err != nil { - zap.S().Errorf("Failed to delete Mondoo space. %v", err) - } - } - }, s.T) - }(s) - suite.Run(t, s) -} diff --git a/tests/integration/audit_config_oom_test.go b/tests/integration/audit_config_oom_test.go deleted file mode 100644 index c2d12cc33..000000000 --- a/tests/integration/audit_config_oom_test.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright (c) Mondoo, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package integration - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/suite" - mondoov2 "go.mondoo.com/mondoo-operator/api/v1alpha2" - "go.mondoo.com/mondoo-operator/controllers/nodes" - "go.mondoo.com/mondoo-operator/pkg/utils/mondoo" - "go.mondoo.com/mondoo-operator/tests/framework/utils" - "go.uber.org/zap" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type AuditConfigOOMSuite struct { - AuditConfigBaseSuite -} - -func (s *AuditConfigOOMSuite) SetupSuite() { - s.AuditConfigBaseSuite.SetupSuite() - s.testCluster.Settings.SuiteName = "AuditConfigOOMSuite" -} - -func (s *AuditConfigOOMSuite) TestOOMControllerReporting() { - auditConfig := utils.DefaultAuditConfigMinimal(s.testCluster.Settings.Namespace, false, false, false, false) - s.auditConfig = auditConfig - - // Disable container image resolution to be able to run the k8s resources scan CronJob with a local image. - cleanup := s.disableContainerImageResolution() - defer cleanup() - - zap.S().Info("Create an audit config that enables nothing.") - s.NoErrorf( - s.testCluster.K8sHelper.Clientset.Create(s.ctx, &auditConfig), - "Failed to create Mondoo audit config.") - - s.Require().True(s.testCluster.K8sHelper.WaitUntilMondooClientSecretExists(s.ctx, s.auditConfig.Namespace), "Mondoo SA not created") - - listOpts := &client.ListOptions{ - Namespace: auditConfig.Namespace, - LabelSelector: labels.SelectorFromSet(map[string]string{ - "app.kubernetes.io/name": "mondoo-operator", - }), - } - - err := s.testCluster.K8sHelper.UpdateDeploymentWithRetries(s.ctx, listOpts, func(dep *appsv1.Deployment) { - dep.Spec.Template.Spec.Containers[0].Resources.Requests = corev1.ResourceList{} - dep.Spec.Template.Spec.Containers[0].Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("15Mi"), // this should be low enough to trigger an OOMkilled - } - }) - s.Require().NoErrorf(err, "Failed to reduce memory limit") - - // This will take some time, because: - // a new replicaset should be created - // the first Pod tries to start and gets killed - // on the 2nd start we should get an OOMkilled status update - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.MondooOperatorDegraded, corev1.ConditionTrue, "OOM") - s.Require().NoError(err, "Failed to find degraded condition") - - foundMondooAuditConfig, err := s.testCluster.K8sHelper.GetMondooAuditConfigFromCluster(auditConfig.Name, auditConfig.Namespace) - s.NoError(err, "Failed to find MondooAuditConfig") - cond := mondoo.FindMondooAuditConditions(foundMondooAuditConfig.Status.Conditions, mondoov2.MondooOperatorDegraded) - s.Require().NotNil(cond) - s.Containsf(cond.Message, "OOM", "Failed to find OOMKilled message in degraded condition") - s.Len(cond.AffectedPods, 1, "Failed to find only one pod in degraded condition") - - // Give the integration a chance to update - err = s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - status, err := s.integration.GetStatus(s.ctx) - if err != nil { - return false, err - } - return status == "ERROR", nil - }) - s.NoErrorf(err, "Failed to check for ERROR status") - - deployments := &appsv1.DeploymentList{} - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, deployments, listOpts)) - s.Equalf(1, len(deployments.Items), "mondoo-operator deployment not found") - - zap.S().Info("Increasing memory limit to get controller running again.") - err = s.testCluster.K8sHelper.UpdateDeploymentWithRetries(s.ctx, listOpts, func(dep *appsv1.Deployment) { - dep.Spec.Template.Spec.Containers[0].Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("100Mi"), // this should be low enough to trigger an OOMkilled - } - }) - s.Require().NoErrorf(err, "Failed to reduce memory limit") - - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.MondooOperatorDegraded, corev1.ConditionFalse, "") - s.Require().NoError(err, "Failed to find degraded condition") - foundMondooAuditConfig, err = s.testCluster.K8sHelper.GetMondooAuditConfigFromCluster(auditConfig.Name, auditConfig.Namespace) - s.NoError(err, "Failed to find MondooAuditConfig") - cond = mondoo.FindMondooAuditConditions(foundMondooAuditConfig.Status.Conditions, mondoov2.MondooOperatorDegraded) - s.Require().NotNil(cond) - s.NotContains(cond.Message, "OOM", "Found OOMKilled message in condition") - s.Len(cond.AffectedPods, 0, "Found a pod in condition") - - err = s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - status, err := s.integration.GetStatus(s.ctx) - if err != nil { - return false, err - } - return status == "ACTIVE", nil - }) - s.NoErrorf(err, "Failed to check for ACTIVE status") -} - -func (s *AuditConfigOOMSuite) TestOOMScanAPI() { - auditConfig := utils.DefaultAuditConfigMinimal(s.testCluster.Settings.Namespace, true, false, false, false) - s.auditConfig = auditConfig - - auditConfig.Spec.Scanner.Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("10Mi"), // this should be low enough to trigger an OOMkilled - } - - // Move CronJob into the future to avoid interference with tests. - cronStart := time.Now().Add(30 * time.Minute) - auditConfig.Spec.KubernetesResources.Schedule = fmt.Sprintf("%d * * * *", cronStart.Minute()) - - // Disable container image resolution to be able to run the k8s resources scan CronJob with a local image. - cleanup := s.disableContainerImageResolution() - defer cleanup() - - zap.S().Info("Create an audit config that enables only workloads scanning. (with reduced memory limit)") - s.NoErrorf( - s.testCluster.K8sHelper.Clientset.Create(s.ctx, &auditConfig), - "Failed to create Mondoo audit config.") - - s.Require().True(s.testCluster.K8sHelper.WaitUntilMondooClientSecretExists(s.ctx, s.auditConfig.Namespace), "Mondoo SA not created") - - // This will take some time, because: - // reconcile needs to happen - err := s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.ScanAPIDegraded, corev1.ConditionTrue, "OOM") - s.Require().NoError(err, "Failed to find degraded condition") - - foundMondooAuditConfig, err := s.testCluster.K8sHelper.GetMondooAuditConfigFromCluster(auditConfig.Name, auditConfig.Namespace) - s.Require().NoError(err) - - cond := mondoo.FindMondooAuditConditions(foundMondooAuditConfig.Status.Conditions, mondoov2.ScanAPIDegraded) - s.Require().NoError(err, "Failed to find degraded condition") - s.Require().NotNil(cond) - s.Containsf(cond.Message, "OOM", "Failed to find OOMKilled message in degraded condition") - s.Len(cond.AffectedPods, 1, "Failed to find only one pod in degraded condition") - - // Give the integration a chance to update - time.Sleep(2 * time.Second) - - status, err := s.integration.GetStatus(s.ctx) - s.NoError(err, "Failed to get status") - s.Equal("ERROR", status) - - err = s.testCluster.K8sHelper.UpdateAuditConfigWithRetries(auditConfig.Name, auditConfig.Namespace, func(config *mondoov2.MondooAuditConfig) { - config.Spec.Scanner.Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("200Mi"), // this should be enough to get the ScanAPI running again - } - }) - s.Require().NoError(err) - - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.ScanAPIDegraded, corev1.ConditionFalse, "") - s.Require().NoError(err, "Failed to find degraded condition") - foundMondooAuditConfig, err = s.testCluster.K8sHelper.GetMondooAuditConfigFromCluster(auditConfig.Name, auditConfig.Namespace) - s.NoError(err, "Failed to find MondooAuditConfig") - - cond = mondoo.FindMondooAuditConditions(foundMondooAuditConfig.Status.Conditions, mondoov2.ScanAPIDegraded) - s.Require().NotNil(cond) - s.NotContains(cond.Message, "OOM", "Found OOMKilled message in condition") - s.Len(cond.AffectedPods, 0, "Found a pod in condition") - - // Give the integration a chance to update - time.Sleep(2 * time.Second) - - status, err = s.integration.GetStatus(s.ctx) - s.NoError(err, "Failed to get status") - s.Equal("ACTIVE", status) -} - -func (s *AuditConfigOOMSuite) TestOOMNodeScan_CronJob() { - auditConfig := utils.DefaultAuditConfigMinimal(s.testCluster.Settings.Namespace, false, false, true, false) - s.auditConfig = auditConfig - - auditConfig.Spec.Nodes.Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("10Mi"), // this should be low enough to trigger an OOMkilled - } - - // Disable container image resolution to be able to run the k8s resources scan CronJob with a local image. - cleanup := s.disableContainerImageResolution() - defer cleanup() - - zap.S().Info("Create an audit config that enables only nodes scanning. (with reduced memory limit)") - s.NoErrorf( - s.testCluster.K8sHelper.Clientset.Create(s.ctx, &auditConfig), - "Failed to create Mondoo audit config.") - - s.Require().True(s.testCluster.K8sHelper.WaitUntilMondooClientSecretExists(s.ctx, s.auditConfig.Namespace), "Mondoo SA not created") - - cronJobs := &batchv1.CronJobList{} - cronJobLabels := nodes.NodeScanningLabels(auditConfig) - - // List only the CronJobs in the namespace of the MondooAuditConfig and only the ones that exactly match our labels. - listOpts := &client.ListOptions{Namespace: auditConfig.Namespace, LabelSelector: labels.SelectorFromSet(cronJobLabels)} - - nodeList := &corev1.NodeList{} - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, nodeList)) - - // Verify the amount of CronJobs created is equal to the amount of nodes - err := s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, cronJobs, listOpts)) - if len(nodeList.Items) == len(cronJobs.Items) { - return true, nil - } - return false, nil - }) - s.NoErrorf( - err, - "The amount of node scanning CronJobs is not equal to the amount of cluster nodes. expected: %d; actual: %d", - len(nodeList.Items), len(cronJobs.Items)) - - // This will take some time, because: - // reconcile needs to happen - // a new replicaset should be created - // the first Pod tries to start and gets killed - // on the 2nd start we should get an OOMkilled status update - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.NodeScanningDegraded, corev1.ConditionTrue, "OOM") - s.Require().NoError(err, "Failed to find degraded condition") - - foundMondooAuditConfig, err := s.testCluster.K8sHelper.GetMondooAuditConfigFromCluster(auditConfig.Name, auditConfig.Namespace) - s.NoError(err, "Failed to find MondooAuditConfig") - cond := mondoo.FindMondooAuditConditions(foundMondooAuditConfig.Status.Conditions, mondoov2.NodeScanningDegraded) - s.Require().NotNil(cond) - s.Containsf(cond.Message, "OOM", "Failed to find OOMKilled message in degraded condition") - s.Len(cond.AffectedPods, 1, "Failed to find only one pod in degraded condition") - - // Give the integration a chance to update - time.Sleep(2 * time.Second) - - status, err := s.integration.GetStatus(s.ctx) - s.NoError(err, "Failed to get status") - s.Equal("ERROR", status) - - zap.S().Info("Increasing memory limit to get node Scans running again.") - err = s.testCluster.K8sHelper.UpdateAuditConfigWithRetries(auditConfig.Name, auditConfig.Namespace, func(config *mondoov2.MondooAuditConfig) { - config.Spec.Nodes.Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("200Mi"), // this should be enough to get the ScanAPI running again - } - foundMondooAuditConfig.Spec.Nodes.Schedule = "*/1 * * * *" - }) - s.Require().NoError(err) - - // Wait for the next run of the CronJob - time.Sleep(30 * time.Second) - - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.NodeScanningDegraded, corev1.ConditionFalse, "") - s.Require().NoError(err, "Failed to find degraded condition") - foundMondooAuditConfig, err = s.testCluster.K8sHelper.GetMondooAuditConfigFromCluster(auditConfig.Name, auditConfig.Namespace) - s.NoError(err, "Failed to find MondooAuditConfig") - cond = mondoo.FindMondooAuditConditions(foundMondooAuditConfig.Status.Conditions, mondoov2.ScanAPIDegraded) - s.Require().NotNil(cond) - s.NotContains(cond.Message, "OOM", "Found OOMKilled message in condition") - s.Len(cond.AffectedPods, 0, "Found a pod in condition") - - // Give the integration a chance to update - time.Sleep(2 * time.Second) - - status, err = s.integration.GetStatus(s.ctx) - s.NoError(err, "Failed to get status") - s.Equal("ACTIVE", status) -} - -func (s *AuditConfigOOMSuite) TestOOMNodeScan_DaemonSet() { - auditConfig := utils.DefaultAuditConfigMinimal(s.testCluster.Settings.Namespace, false, false, true, false) - auditConfig.Spec.Nodes.Style = mondoov2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) - s.auditConfig = auditConfig - - auditConfig.Spec.Nodes.Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("200Mi"), - } - - // Disable container image resolution to be able to run the k8s resources scan CronJob with a local image. - cleanup := s.disableContainerImageResolution() - defer cleanup() - - zap.S().Info("Create an audit config that enables only nodes scanning.") - s.NoErrorf( - s.testCluster.K8sHelper.Clientset.Create(s.ctx, &auditConfig), - "Failed to create Mondoo audit config.") - - s.Require().True(s.testCluster.K8sHelper.WaitUntilMondooClientSecretExists(s.ctx, s.auditConfig.Namespace), "Mondoo SA not created") - - ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: nodes.DaemonSetName(auditConfig.Name), Namespace: auditConfig.Namespace}} - - // Verify that DaemonSet was created - err := s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - if err := s.testCluster.K8sHelper.Clientset.Get(s.ctx, client.ObjectKeyFromObject(ds), ds); err != nil { - return false, nil - } - return true, nil - }) - s.NoError(err, "DaemonSet was not created.") - - // Give the integration a chance to update - time.Sleep(20 * time.Second) - - status, err := s.integration.GetStatus(s.ctx) - s.NoError(err, "Failed to get status") - s.Equal("ACTIVE", status) - - zap.S().Info("Decreasing memory limit to get node Scans running again.") - err = s.testCluster.K8sHelper.UpdateAuditConfigWithRetries(auditConfig.Name, auditConfig.Namespace, func(config *mondoov2.MondooAuditConfig) { - config.Spec.Nodes.Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("10Mi"), // this should be low enough to trigger an OOMkilled - } - }) - s.Require().NoError(err) - - // Give the integration a chance to update - time.Sleep(20 * time.Second) - - status, err = s.integration.GetStatus(s.ctx) - s.NoError(err, "Failed to get status") - s.Equal("ERROR", status) - - zap.S().Info("Increasing memory limit to get node Scans running again.") - err = s.testCluster.K8sHelper.UpdateAuditConfigWithRetries(auditConfig.Name, auditConfig.Namespace, func(config *mondoov2.MondooAuditConfig) { - config.Spec.Nodes.Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("200Mi"), // this should be enough to get the ScanAPI running again - } - }) - s.Require().NoError(err) - - // Wait for the next run of the CronJob - time.Sleep(30 * time.Second) - - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.NodeScanningDegraded, corev1.ConditionFalse, "") - s.Require().NoError(err, "Failed to find degraded condition") - foundMondooAuditConfig, err := s.testCluster.K8sHelper.GetMondooAuditConfigFromCluster(auditConfig.Name, auditConfig.Namespace) - s.NoError(err, "Failed to find MondooAuditConfig") - cond := mondoo.FindMondooAuditConditions(foundMondooAuditConfig.Status.Conditions, mondoov2.ScanAPIDegraded) - s.Require().NotNil(cond) - s.NotContains(cond.Message, "OOM", "Found OOMKilled message in condition") - - s.Len(cond.AffectedPods, 0, "Found a pod in condition") - - status, err = s.integration.GetStatus(s.ctx) - s.NoError(err, "Failed to get status") - s.Equal("ACTIVE", status) -} - -func (s *AuditConfigOOMSuite) TearDownSuite() { - s.AuditConfigBaseSuite.TearDownSuite() -} - -func TestAuditConfigOOMSuite(t *testing.T) { - s := new(AuditConfigOOMSuite) - defer func(s *AuditConfigOOMSuite) { - HandlePanics(recover(), func() { - if err := s.testCluster.UninstallOperator(); err != nil { - zap.S().Errorf("Failed to uninstall Mondoo operator. %v", err) - } - if s.spaceClient != nil { - if err := s.spaceClient.Delete(s.ctx); err != nil { - zap.S().Errorf("Failed to delete Mondoo space. %v", err) - } - } - }, s.T) - }(s) - suite.Run(t, s) -} diff --git a/tests/integration/audit_config_test.go b/tests/integration/audit_config_test.go index a41797463..d998cb036 100644 --- a/tests/integration/audit_config_test.go +++ b/tests/integration/audit_config_test.go @@ -7,10 +7,8 @@ import ( "testing" "github.com/stretchr/testify/suite" - "go.mondoo.com/mondoo-operator/api/v1alpha2" "go.mondoo.com/mondoo-operator/tests/framework/utils" "go.uber.org/zap" - "k8s.io/utils/ptr" ) type AuditConfigSuite struct { @@ -46,32 +44,6 @@ func (s *AuditConfigSuite) TestReconcile_Nodes_CronJobs() { s.testMondooAuditConfigNodesCronjobs(auditConfig) } -func (s *AuditConfigSuite) TestReconcile_Nodes_DaemonSet() { - auditConfig := utils.DefaultAuditConfigMinimal(s.testCluster.Settings.Namespace, false, false, true, false) - auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_DaemonSet - auditConfig.Spec.Nodes.IntervalTimer = 1 - s.testMondooAuditConfigNodesDaemonSets(auditConfig) -} - -func (s *AuditConfigSuite) TestReconcile_AdmissionPermissive() { - auditConfig := utils.DefaultAuditConfigMinimal(s.testCluster.Settings.Namespace, false, false, false, true) - s.testMondooAuditConfigAdmission(auditConfig) -} - -func (s *AuditConfigSuite) TestReconcile_AdmissionEnforcing() { - auditConfig := utils.DefaultAuditConfigMinimal(s.testCluster.Settings.Namespace, false, false, false, true) - auditConfig.Spec.Admission.Mode = v1alpha2.Enforcing - s.testMondooAuditConfigAdmission(auditConfig) -} - -func (s *AuditConfigSuite) TestReconcile_AdmissionEnforcingScaleDownScanApi() { - auditConfig := utils.DefaultAuditConfigMinimal(s.testCluster.Settings.Namespace, false, false, false, true) - auditConfig.Spec.Admission.Mode = v1alpha2.Enforcing - auditConfig.Spec.Admission.Replicas = ptr.To(int32(1)) - auditConfig.Spec.Scanner.Replicas = ptr.To(int32(1)) - s.testMondooAuditConfigAdmissionScaleDownScanApi(auditConfig) -} - func (s *AuditConfigSuite) TearDownSuite() { s.AuditConfigBaseSuite.TearDownSuite() } diff --git a/tests/integration/audit_config_upgrade_test.go b/tests/integration/audit_config_upgrade_test.go deleted file mode 100644 index 92523b34f..000000000 --- a/tests/integration/audit_config_upgrade_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) Mondoo, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package integration - -import ( - "testing" - - "github.com/stretchr/testify/suite" - "go.mondoo.com/mondoo-operator/tests/framework/installer" - "go.mondoo.com/mondoo-operator/tests/framework/utils" - "go.uber.org/zap" -) - -type AuditConfigUpgradeSuite struct { - AuditConfigBaseSuite -} - -func (s *AuditConfigUpgradeSuite) SetupSuite() { - s.AuditConfigBaseSuite.SetupSuite() - s.testCluster.Settings.SuiteName = "AuditConfigUpgradeSuite" -} - -func (s *AuditConfigUpgradeSuite) AfterTest(suiteName, testName string) { - if s.testCluster != nil { - s.testCluster.GatherAllMondooLogs(testName, installer.MondooNamespace) - } -} - -func (s *AuditConfigUpgradeSuite) TearDownSuite() { - s.NoError(s.testCluster.UninstallOperator()) - s.NoError(s.spaceClient.Delete(s.ctx)) -} - -func (s *AuditConfigUpgradeSuite) TestUpgradePreviousReleaseToLatest() { - auditConfig := utils.DefaultAuditConfigMinimal(s.testCluster.Settings.Namespace, true, false, true, false) - s.testUpgradePreviousReleaseToLatest(auditConfig) -} - -func TestAuditConfigUpgradeSuite(t *testing.T) { - s := new(AuditConfigUpgradeSuite) - s.installRelease = true - defer func(s *AuditConfigUpgradeSuite) { - HandlePanics(recover(), func() { - if err := s.testCluster.UninstallOperator(); err != nil { - zap.S().Errorf("Failed to uninstall Mondoo operator. %v", err) - } - if s.spaceClient != nil { - if err := s.spaceClient.Delete(s.ctx); err != nil { - zap.S().Errorf("Failed to delete Mondoo space. %v", err) - } - } - }, s.T) - }(s) - suite.Run(t, s) -}