Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/actions/spelling/expect.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
AAD
AADSTS
acr
Acr
artifactory
artifactregistry
auditconfig
bak
bitnami
containerregistry
Expand All @@ -28,7 +29,6 @@ psat
rolearn
selfsigned
servicemonitor
servicemonitors
spiffe
SResources
SVIDs
Expand Down
33 changes: 25 additions & 8 deletions controllers/container_image/deployment_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,8 @@ type DeploymentHandler struct {
}

func (n *DeploymentHandler) Reconcile(ctx context.Context) (ctrl.Result, error) {
// TODO: remove in next version
// Delete the old container scanning cronjob if it exists
if err := k8s.DeleteIfExists(ctx,
n.KubeClient,
&batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{
Name: OldCronJobName(n.Mondoo.Name),
Namespace: n.Mondoo.Namespace,
}}); err != nil {
// Clean up CronJobs with stale names (from old naming schemes)
if err := n.cleanupStaleCronJobs(ctx); err != nil {
return ctrl.Result{}, err
Comment thread
mondoo-code-review[bot] marked this conversation as resolved.
}

Expand Down Expand Up @@ -235,3 +229,26 @@ func (n *DeploymentHandler) cleanupWIFServiceAccount(ctx context.Context) error
}
return nil
}

// cleanupStaleCronJobs removes CronJobs from old naming schemes by label selection.
func (n *DeploymentHandler) cleanupStaleCronJobs(ctx context.Context) error {
cronJobs := &batchv1.CronJobList{}
listOpts := &client.ListOptions{
Namespace: n.Mondoo.Namespace,
LabelSelector: labels.SelectorFromSet(CronJobLabels(*n.Mondoo)),
}
if err := n.KubeClient.List(ctx, cronJobs, listOpts); err != nil {
return err
}

expectedName := CronJobName(n.Mondoo.Name)
for i := range cronJobs.Items {
if cronJobs.Items[i].Name != expectedName {
logger.Info("Deleting stale container scan CronJob", "name", cronJobs.Items[i].Name)
if err := k8s.DeleteIfExists(ctx, n.KubeClient, &cronJobs.Items[i]); err != nil {
return err
}
}
}
return nil
}
178 changes: 2 additions & 176 deletions controllers/container_image/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,13 @@ import (
// That's the mod k8s relies on https://github.com/kubernetes/kubernetes/blob/master/go.mod#L63

"go.mondoo.com/mondoo-operator/api/v1alpha2"
k8s_scan "go.mondoo.com/mondoo-operator/controllers/k8s_scan"
"go.mondoo.com/mondoo-operator/pkg/constants"
"go.mondoo.com/mondoo-operator/pkg/feature_flags"
"go.mondoo.com/mondoo-operator/pkg/utils/k8s"
mondoo "go.mondoo.com/mondoo-operator/pkg/utils/mondoo"
"go.mondoo.com/mql/v13/providers-sdk/v1/inventory"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
"sigs.k8s.io/yaml"
Expand Down Expand Up @@ -193,7 +191,7 @@ func CronJob(image, integrationMrn, clusterUid, privateRegistrySecretName string
)

// Add init container for registry credential generation
podSpec.InitContainers = append(podSpec.InitContainers, registryWIFInitContainer(wif))
podSpec.InitContainers = append(podSpec.InitContainers, k8s.RegistryWIFInitContainer(wif))

// AKS Workload Identity webhook requires this label on the pod template only.
// Copy labels so we don't mutate the CronJob/Job metadata.
Expand Down Expand Up @@ -232,7 +230,7 @@ func OldCronJobName(prefix string) string {
}

func CronJobName(prefix string) string {
return fmt.Sprintf("%s%s", prefix, CronJobNameSuffix)
return k8s.CronJobName("container-scan", prefix)
}

func ConfigMap(integrationMRN, clusterUID string, m v1alpha2.MondooAuditConfig, cfg v1alpha2.MondooOperatorConfig) (*corev1.ConfigMap, error) {
Expand Down Expand Up @@ -380,175 +378,3 @@ func validateContainerRegistryWIF(wif *v1alpha2.WorkloadIdentityConfig) error {

return nil
}

// registryWIFInitContainer creates an init container that generates docker config credentials
// using cloud-native Workload Identity Federation
func registryWIFInitContainer(wif *v1alpha2.WorkloadIdentityConfig) corev1.Container {
var image, shell, script string
var env []corev1.EnvVar

// Common retry wrapper for transient failures
retryWrapper := `set -euo pipefail
# Retry wrapper for transient failures
retry() {
local max_attempts=3
local delay=5
local attempt=1
while [ $attempt -le $max_attempts ]; do
if "$@"; then
return 0
fi
echo "Attempt $attempt failed, retrying in ${delay}s..."
sleep $delay
attempt=$((attempt + 1))
done
echo "All $max_attempts attempts failed"
return 1
}
`

switch wif.Provider {
case v1alpha2.CloudProviderGKE:
image = k8s_scan.GCloudSDKImage
shell = "/bin/bash"
script = retryWrapper + `
# Use WIF identity to get an access token for Artifact Registry / GCR
TOKEN=$(retry gcloud auth print-access-token)
AUTH=$(echo -n "oauth2accesstoken:${TOKEN}" | base64 -w0)

# All GCP regions and multi-region locations that host Artifact Registry.
# Docker config requires exact hostname matches, so we enumerate them all.
AR_LOCATIONS="
africa-south1 asia-east1 asia-east2 asia-northeast1 asia-northeast2 asia-northeast3
asia-south1 asia-south2 asia-southeast1 asia-southeast2
australia-southeast1 australia-southeast2
europe-central2 europe-north1 europe-southwest1 europe-west1 europe-west2
europe-west3 europe-west4 europe-west6 europe-west8 europe-west9 europe-west10 europe-west12
me-central1 me-central2 me-west1
northamerica-northeast1 northamerica-northeast2
southamerica-east1 southamerica-west1
us-central1 us-east1 us-east4 us-east5 us-south1 us-west1 us-west2 us-west3 us-west4
asia europe us
"

AUTHS=""
add_auth() {
[ -n "$AUTHS" ] && AUTHS="${AUTHS},"
AUTHS="${AUTHS}\"$1\":{\"auth\":\"${AUTH}\"}"
}

for loc in $AR_LOCATIONS; do
add_auth "${loc}-docker.pkg.dev"
done

# Legacy GCR endpoints
for host in gcr.io us.gcr.io eu.gcr.io asia.gcr.io; do
add_auth "$host"
done

cat > /etc/opt/mondoo/docker/config.json <<DOCKEREOF
{"auths":{${AUTHS}}}
DOCKEREOF
echo "Docker config generated for $(echo "$AUTHS" | tr ',' '\n' | wc -l) registry endpoints"
`
env = []corev1.EnvVar{
{Name: "HOME", Value: "/tmp"},
}

case v1alpha2.CloudProviderEKS:
image = k8s_scan.AWSCLIImage
shell = "/bin/bash"
script = retryWrapper + `
# Use IRSA identity to get ECR login password
PASSWORD=$(retry aws ecr get-login-password --region "$AWS_REGION")

# Derive registry URL from role ARN account ID and region
ACCOUNT_ID=$(echo "$ROLE_ARN" | cut -d: -f5)
REGISTRY="${ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com"

# Write docker config
AUTH=$(echo -n "AWS:${PASSWORD}" | base64 -w0)
cat > /etc/opt/mondoo/docker/config.json <<DOCKEREOF
{
"auths": {
"${REGISTRY}": { "auth": "${AUTH}" }
}
}
DOCKEREOF
echo "Docker config generated for ECR registry: ${REGISTRY}"
`
env = []corev1.EnvVar{
{Name: "HOME", Value: "/tmp"},
{Name: "AWS_REGION", Value: wif.EKS.Region},
{Name: "ROLE_ARN", Value: wif.EKS.RoleARN},
}

case v1alpha2.CloudProviderAKS:
image = k8s_scan.AzureCLIImage
shell = "/bin/bash"
script = retryWrapper + `
# Azure WIF webhook injects AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_FEDERATED_TOKEN_FILE
retry az login --federated-token "$(cat "$AZURE_FEDERATED_TOKEN_FILE")" \
--service-principal \
-u "$AZURE_CLIENT_ID" \
-t "$AZURE_TENANT_ID"

# Get ACR access token
TOKEN=$(retry az acr login --name "$ACR_LOGIN_SERVER" --expose-token --output tsv --query accessToken)

# Write docker config
AUTH=$(echo -n "00000000-0000-0000-0000-000000000000:${TOKEN}" | base64 -w0)
cat > /etc/opt/mondoo/docker/config.json <<DOCKEREOF
{
"auths": {
"${ACR_LOGIN_SERVER}": { "auth": "${AUTH}" }
}
}
DOCKEREOF
echo "Docker config generated for ACR: ${ACR_LOGIN_SERVER}"
`
env = []corev1.EnvVar{
{Name: "HOME", Value: "/tmp"},
{Name: "ACR_LOGIN_SERVER", Value: wif.AKS.LoginServer},
}

default:
image = "busybox:1.36"
shell = "/bin/sh"
script = `echo "ERROR: Unknown workload identity provider"; exit 1`
env = []corev1.EnvVar{}
}

return corev1.Container{
Name: "generate-registry-creds",
Image: image,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{shell, "-c", script},
Env: env,
VolumeMounts: []corev1.VolumeMount{
{Name: "docker-config", MountPath: "/etc/opt/mondoo/docker"},
{Name: "temp", MountPath: "/tmp"},
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("50m"),
corev1.ResourceMemory: resource.MustParse("64Mi"),
},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("200m"),
corev1.ResourceMemory: resource.MustParse("256Mi"),
},
},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: corev1.TerminationMessageReadFile,
SecurityContext: &corev1.SecurityContext{
AllowPrivilegeEscalation: ptr.To(false),
ReadOnlyRootFilesystem: ptr.To(true),
RunAsNonRoot: ptr.To(true),
RunAsUser: ptr.To(int64(101)),
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
},
}
}
52 changes: 44 additions & 8 deletions controllers/k8s_scan/deployment_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,11 @@ type DeploymentHandler struct {
}

func (n *DeploymentHandler) Reconcile(ctx context.Context) (ctrl.Result, error) {
// Clean up CronJobs with stale names (from old naming schemes)
if err := n.cleanupStaleCronJobs(ctx); err != nil {
return ctrl.Result{}, err
Comment thread
mondoo-code-review[bot] marked this conversation as resolved.
}

hasExternalClusters := len(n.Mondoo.Spec.KubernetesResources.ExternalClusters) > 0

if !n.Mondoo.Spec.KubernetesResources.Enable {
Expand Down Expand Up @@ -399,10 +404,7 @@ func (n *DeploymentHandler) cleanupOrphanedExternalClusterResources(ctx context.
cronJobs := &batchv1.CronJobList{}
listOpts := &client.ListOptions{
Namespace: n.Mondoo.Namespace,
LabelSelector: labels.SelectorFromSet(map[string]string{
"app": "mondoo-k8s-scan",
"mondoo_cr": n.Mondoo.Name,
}),
LabelSelector: labels.SelectorFromSet(CronJobLabels(*n.Mondoo)),
}
if err := n.KubeClient.List(ctx, cronJobs, listOpts); err != nil {
return err
Expand Down Expand Up @@ -568,10 +570,7 @@ func (n *DeploymentHandler) garbageCollectIfNeeded(ctx context.Context, clusterU
cronJobs := &batchv1.CronJobList{}
listOpts := &client.ListOptions{
Namespace: n.Mondoo.Namespace,
LabelSelector: labels.SelectorFromSet(map[string]string{
"app": "mondoo-k8s-scan",
"mondoo_cr": n.Mondoo.Name,
}),
LabelSelector: labels.SelectorFromSet(CronJobLabels(*n.Mondoo)),
}
if err := n.KubeClient.List(ctx, cronJobs, listOpts); err != nil {
logger.Error(err, "Failed to list CronJobs for garbage collection")
Expand Down Expand Up @@ -718,3 +717,40 @@ func (n *DeploymentHandler) syncWIFServiceAccount(ctx context.Context, cluster v

return nil
}

// cleanupStaleCronJobs removes CronJobs from old naming schemes by label selection.
// CronJobs belonging to removed external clusters are skipped here — cleanupOrphanedExternalClusterResources
// handles those so it can also clean up associated ConfigMaps, ServiceAccounts, and Secrets.
func (n *DeploymentHandler) cleanupStaleCronJobs(ctx context.Context) error {
cronJobs := &batchv1.CronJobList{}
listOpts := &client.ListOptions{
Namespace: n.Mondoo.Namespace,
LabelSelector: labels.SelectorFromSet(CronJobLabels(*n.Mondoo)),
}
if err := n.KubeClient.List(ctx, cronJobs, listOpts); err != nil {
return err
}

expected := map[string]bool{
CronJobName(n.Mondoo.Name): true,
}
configuredClusters := make(map[string]bool)
for _, cluster := range n.Mondoo.Spec.KubernetesResources.ExternalClusters {
expected[ExternalClusterCronJobName(n.Mondoo.Name, cluster.Name)] = true
configuredClusters[cluster.Name] = true
}

for i := range cronJobs.Items {
if expected[cronJobs.Items[i].Name] {
continue
}
if clusterName, ok := cronJobs.Items[i].Labels["cluster_name"]; ok && !configuredClusters[clusterName] {
Comment thread
mondoo-code-review[bot] marked this conversation as resolved.
continue
}
logger.Info("Deleting stale k8s scan CronJob", "name", cronJobs.Items[i].Name)
if err := k8s.DeleteIfExists(ctx, n.KubeClient, &cronJobs.Items[i]); err != nil {
return err
}
}
return nil
}
8 changes: 4 additions & 4 deletions controllers/k8s_scan/deployment_handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1530,11 +1530,11 @@ func TestExternalClusterNaming(t *testing.T) {
t.Run(tt.prefix+"-"+tt.clusterName, func(t *testing.T) {
// Test CronJob name
cronJobName := ExternalClusterCronJobName(tt.prefix, tt.clusterName)
if !strings.HasPrefix(cronJobName, tt.prefix) {
t.Errorf("CronJob name should start with prefix %q, got %q", tt.prefix, cronJobName)
if !strings.HasPrefix(cronJobName, "mondoo-k8s-scan-") {
t.Errorf("CronJob name should start with %q, got %q", "mondoo-k8s-scan-", cronJobName)
}
if !strings.HasSuffix(cronJobName, tt.clusterName) {
t.Errorf("CronJob name should end with cluster name %q, got %q", tt.clusterName, cronJobName)
if !strings.Contains(cronJobName, tt.clusterName) && len(cronJobName) < 52 {
t.Errorf("CronJob name should contain cluster name %q when not truncated, got %q", tt.clusterName, cronJobName)
}

// Test ConfigMap name
Expand Down
Loading
Loading