diff --git a/Makefile b/Makefile
index 46148fc46..08132ad1a 100644
--- a/Makefile
+++ b/Makefile
@@ -54,6 +54,9 @@ helm-unittest:
test-e2e: controller-image audit-scanner-image policy-server-image
$(GO_BUILD_ENV) go test ./e2e/ -v
+.PHONY: test-all
+test-all: test helm-unittest test-e2e
+
.PHONY: fmt-go
fmt-go:
$(GO_BUILD_ENV) go fmt ./...
diff --git a/api/policies/v1/factories.go b/api/policies/v1/factories.go
index 1aff49213..743da4212 100644
--- a/api/policies/v1/factories.go
+++ b/api/policies/v1/factories.go
@@ -487,6 +487,9 @@ type PolicyServerBuilder struct {
requests corev1.ResourceList
sigstoreTrustConfigMap string
namespacedPoliciesCapabilities []string
+ webhookPort *int32
+ readinessProbePort *int32
+ metricsPort *int32
}
func NewPolicyServerFactory() *PolicyServerBuilder {
@@ -535,6 +538,21 @@ func (f *PolicyServerBuilder) WithNamespacedPoliciesCapabilities(capabilities []
return f
}
+func (f *PolicyServerBuilder) WithWebhookPort(port int32) *PolicyServerBuilder {
+ f.webhookPort = &port
+ return f
+}
+
+func (f *PolicyServerBuilder) WithReadinessProbePort(port int32) *PolicyServerBuilder {
+ f.readinessProbePort = &port
+ return f
+}
+
+func (f *PolicyServerBuilder) WithMetricsPort(port int32) *PolicyServerBuilder {
+ f.metricsPort = &port
+ return f
+}
+
func (f *PolicyServerBuilder) Build() *PolicyServer {
policyServer := PolicyServer{
ObjectMeta: metav1.ObjectMeta{
@@ -559,6 +577,9 @@ func (f *PolicyServerBuilder) Build() *PolicyServer {
Requests: f.requests,
SigstoreTrustConfig: f.sigstoreTrustConfigMap,
NamespacedPoliciesCapabilities: f.namespacedPoliciesCapabilities,
+ WebhookPort: f.webhookPort,
+ ReadinessProbePort: f.readinessProbePort,
+ MetricsPort: f.metricsPort,
Env: []corev1.EnvVar{
{
Name: "KUBEWARDEN_LOG_LEVEL",
diff --git a/api/policies/v1/policyserver_types.go b/api/policies/v1/policyserver_types.go
index 9f51172e8..41925ccfa 100644
--- a/api/policies/v1/policyserver_types.go
+++ b/api/policies/v1/policyserver_types.go
@@ -150,6 +150,44 @@ type PolicyServerSpec struct {
// - Specific capability paths (e.g. "oci/v1/verify", "net/v1/dns_lookup_host")
// +optional
NamespacedPoliciesCapabilities []string `json:"namespacedPoliciesCapabilities,omitempty"`
+
+ // Port where the policy server listens for incoming webhook requests.
+ // When unset, defaults to 8443. This is the port the Kubernetes API server
+ // reaches when evaluating admission requests.
+ // +optional
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ WebhookPort *int32 `json:"webhookPort,omitempty"`
+
+ // Port used by the policy server to expose the readiness probe endpoint.
+ // When unset, defaults to 8081.
+ // +optional
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ ReadinessProbePort *int32 `json:"readinessProbePort,omitempty"`
+
+ // Port exposed by the metrics Service for this policy server.
+ // When unset, defaults to the controller-wide default
+ // (KUBEWARDEN_POLICY_SERVER_SERVICES_METRICS_PORT env var, or 8080).
+ // Only relevant when metrics are enabled.
+ //
+ // Use this field to customize which port Prometheus scrapes for this
+ // PolicyServer's metrics Service (e.g. to match naming conventions or
+ // avoid Service-level port collisions).
+ //
+ // NOTE: this field controls only the Service Port (the externally visible
+ // scrape port). The Service TargetPort — the port the pod actually listens
+ // on — is always the controller-wide default and is not affected by this
+ // field. This is intentional: when the OpenTelemetry sidecar mode is
+ // enabled, each pod gets its own injected sidecar, but the pod-side
+ // Prometheus listener port is determined by controller-wide/injection
+ // configuration, not per PolicyServer. Therefore, changing this field does
+ // not change the pod listener port and will not resolve pod-port conflicts
+ // such as those caused by hostNetwork.
+ // +optional
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ MetricsPort *int32 `json:"metricsPort,omitempty"`
}
type ReconciliationTransitionReason string
@@ -222,6 +260,34 @@ func (ps *PolicyServer) AppLabel() string {
return "kubewarden-" + ps.NameWithPrefix()
}
+// EffectiveWebhookPort returns the port the policy server listens on for
+// admission webhook requests, using the CRD field when set or the default constant.
+func (ps *PolicyServer) EffectiveWebhookPort() int32 {
+ if ps.Spec.WebhookPort != nil {
+ return *ps.Spec.WebhookPort
+ }
+ return constants.PolicyServerListenPort
+}
+
+// EffectiveReadinessProbePort returns the port used for the readiness probe,
+// using the CRD field when set or the default constant.
+func (ps *PolicyServer) EffectiveReadinessProbePort() int32 {
+ if ps.Spec.ReadinessProbePort != nil {
+ return *ps.Spec.ReadinessProbePort
+ }
+ return constants.PolicyServerReadinessProbePort
+}
+
+// EffectiveMetricsPort returns the port used to expose the metrics endpoint.
+// It returns the CRD-level override when set, otherwise it falls back to defaultPort
+// (which is typically the controller-wide default configured via environment variable).
+func (ps *PolicyServer) EffectiveMetricsPort(defaultPort int32) int32 {
+ if ps.Spec.MetricsPort != nil {
+ return *ps.Spec.MetricsPort
+ }
+ return defaultPort
+}
+
// CommonLabels returns the common labels to be used with the resources
// associated to a Policy Server. The labels defined follow
// Kubernetes guidelines: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/#labels
diff --git a/api/policies/v1/policyserver_webhook.go b/api/policies/v1/policyserver_webhook.go
index 0b7cb63e2..184120bb4 100644
--- a/api/policies/v1/policyserver_webhook.go
+++ b/api/policies/v1/policyserver_webhook.go
@@ -175,6 +175,7 @@ func (v *policyServerValidator) validate(ctx context.Context, policyServer *Poli
allErrs = append(allErrs, validateLimitsAndRequests(policyServer.Spec.Limits, policyServer.Spec.Requests)...)
allErrs = append(allErrs, validateNamespacedPoliciesCapabilities(policyServer.Spec.NamespacedPoliciesCapabilities)...)
+ allErrs = append(allErrs, v.validatePorts(policyServer)...)
if len(allErrs) == 0 {
return nil
@@ -274,7 +275,6 @@ func validateNamespacedPoliciesCapabilities(capabilities []string) field.ErrorLi
allErrs = append(allErrs, err)
}
}
-
return allErrs
}
@@ -327,3 +327,24 @@ func validateSingleCapability(pattern string, path *field.Path) *field.Error {
fmt.Sprintf("%q is not a complete capability path; use %q to allow all capabilities under it",
pattern, pattern+"/*"))
}
+
+// validatePorts checks that the port fields in the PolicyServer spec do not
+// conflict with each other. Only pod-side ports (webhookPort, readinessProbePort)
+// are validated against each other. spec.metricsPort is a Service-layer-only
+// setting and cannot conflict with pod-side ports.
+func (v *policyServerValidator) validatePorts(policyServer *PolicyServer) field.ErrorList {
+ var allErrs field.ErrorList
+
+ webhookPort := policyServer.EffectiveWebhookPort()
+ readinessPort := policyServer.EffectiveReadinessProbePort()
+
+ if webhookPort == readinessPort {
+ allErrs = append(allErrs, field.Invalid(
+ field.NewPath("spec").Child("readinessProbePort"),
+ readinessPort,
+ fmt.Sprintf("readinessProbePort must differ from webhookPort (%d)", webhookPort),
+ ))
+ }
+
+ return allErrs
+}
diff --git a/api/policies/v1/policyserver_webhook_test.go b/api/policies/v1/policyserver_webhook_test.go
index 26374c1ad..ec5816f86 100644
--- a/api/policies/v1/policyserver_webhook_test.go
+++ b/api/policies/v1/policyserver_webhook_test.go
@@ -412,3 +412,76 @@ func TestPolicyServerValidateNamespacedPoliciesCapabilities(t *testing.T) {
})
}
}
+
+func TestValidatePorts(t *testing.T) {
+ tests := []struct {
+ name string
+ webhookPort *int32
+ readiness *int32
+ metrics *int32
+ errContains string
+ }{
+ {
+ name: "all defaults, no conflict",
+ errContains: "",
+ },
+ {
+ name: "webhookPort equals readinessProbePort",
+ webhookPort: ptr.To[int32](8081),
+ readiness: ptr.To[int32](8081),
+ errContains: "readinessProbePort must differ from webhookPort",
+ },
+ {
+ name: "webhookPort and readinessProbePort distinct custom values",
+ webhookPort: ptr.To[int32](9443),
+ readiness: ptr.To[int32](9081),
+ errContains: "",
+ },
+ {
+ // metricsPort is a Service-layer-only setting and cannot conflict
+ // with pod-side ports (webhookPort, readinessProbePort).
+ name: "metricsPort equal to webhookPort is allowed",
+ webhookPort: ptr.To[int32](8080),
+ metrics: ptr.To[int32](8080),
+ errContains: "",
+ },
+ {
+ // metricsPort is a Service-layer-only setting and cannot conflict
+ // with pod-side ports (webhookPort, readinessProbePort).
+ name: "metricsPort equal to readinessProbePort is allowed",
+ readiness: ptr.To[int32](9000),
+ metrics: ptr.To[int32](9000),
+ errContains: "",
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ k8sClient := fake.NewClientBuilder().Build()
+ builder := NewPolicyServerFactory()
+ if test.webhookPort != nil {
+ builder = builder.WithWebhookPort(*test.webhookPort)
+ }
+ if test.readiness != nil {
+ builder = builder.WithReadinessProbePort(*test.readiness)
+ }
+ if test.metrics != nil {
+ builder = builder.WithMetricsPort(*test.metrics)
+ }
+ policyServer := builder.Build()
+
+ validator := policyServerValidator{
+ deploymentsNamespace: "default",
+ k8sClient: k8sClient,
+ logger: logr.Discard(),
+ }
+ err := validator.validate(t.Context(), policyServer)
+
+ if test.errContains != "" {
+ require.ErrorContains(t, err, test.errContains)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
diff --git a/api/policies/v1/zz_generated.deepcopy.go b/api/policies/v1/zz_generated.deepcopy.go
index 121639f0f..c710a3825 100644
--- a/api/policies/v1/zz_generated.deepcopy.go
+++ b/api/policies/v1/zz_generated.deepcopy.go
@@ -725,6 +725,21 @@ func (in *PolicyServerBuilder) DeepCopyInto(out *PolicyServerBuilder) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.webhookPort != nil {
+ in, out := &in.webhookPort, &out.webhookPort
+ *out = new(int32)
+ **out = **in
+ }
+ if in.readinessProbePort != nil {
+ in, out := &in.readinessProbePort, &out.readinessProbePort
+ *out = new(int32)
+ **out = **in
+ }
+ if in.metricsPort != nil {
+ in, out := &in.metricsPort, &out.metricsPort
+ *out = new(int32)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyServerBuilder.
@@ -870,6 +885,21 @@ func (in *PolicyServerSpec) DeepCopyInto(out *PolicyServerSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.WebhookPort != nil {
+ in, out := &in.WebhookPort, &out.WebhookPort
+ *out = new(int32)
+ **out = **in
+ }
+ if in.ReadinessProbePort != nil {
+ in, out := &in.ReadinessProbePort, &out.ReadinessProbePort
+ *out = new(int32)
+ **out = **in
+ }
+ if in.MetricsPort != nil {
+ in, out := &in.MetricsPort, &out.MetricsPort
+ *out = new(int32)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyServerSpec.
diff --git a/charts/kubewarden-controller/templates/NOTES.txt b/charts/kubewarden-controller/templates/NOTES.txt
index 17391bfeb..9d9824a64 100644
--- a/charts/kubewarden-controller/templates/NOTES.txt
+++ b/charts/kubewarden-controller/templates/NOTES.txt
@@ -11,3 +11,11 @@ If you'd like to support us, we'd love to hear from you as one of our adopters.
Adopters can be public or private.
Learn how to add your organization as a Kubewarden adopter by checking out the ADOPTERS.md file here: https://github.com/kubewarden/community/blob/main/ADOPTERS.md
+
+{{ if .Values.hostNetwork }}
+⚠️ WARNING ⚠️
+Host Network is enabled. Ensure you set appropriate podAntiAffinity rules to prevent host port conflicts between controller replicas on the same node.
+{{ if eq .Values.telemetry.mode "sidecar" }}
+Telemetry sidecar mode (telemetry.mode=sidecar) is incompatible with host network. This chart rejects that configuration and rendering/installation will fail when hostNetwork=true and telemetry.mode=sidecar. Use telemetry.mode=custom with a remote collector instead, or disable hostNetwork.
+{{- end }}
+{{- end }}
diff --git a/charts/kubewarden-controller/templates/_helpers.tpl b/charts/kubewarden-controller/templates/_helpers.tpl
index 0ce33913c..8044d3f42 100644
--- a/charts/kubewarden-controller/templates/_helpers.tpl
+++ b/charts/kubewarden-controller/templates/_helpers.tpl
@@ -181,3 +181,31 @@ are configured.
- {{ .Values.auditScanner.reportCRDsKind }}
{{- end -}}
{{- end -}}
+
+{{/*
+Compute the effective affinity for the controller deployment.
+Uses the controller-specific affinity if set, otherwise falls back to
+global.affinity for backward compatibility.
+
+NOTE: When hostNetwork is enabled, users are responsible for setting
+appropriate podAntiAffinity rules to prevent host-port conflicts between
+controller replicas on the same node.
+*/}}
+{{- define "kubewarden-controller.effectiveAffinity" -}}
+{{- if .Values.affinity -}}
+ {{- toYaml .Values.affinity -}}
+{{- else if .Values.global.affinity -}}
+ {{- toYaml .Values.global.affinity -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate that hostNetwork and telemetry sidecar mode are not both enabled.
+They are incompatible because multiple OTel sidecars on the same node would
+cause port conflicts in host-network mode.
+*/}}
+{{- define "kubewarden-controller.validateHostNetworkSidecar" -}}
+{{- if and .Values.hostNetwork (eq .Values.telemetry.mode "sidecar") (or .Values.telemetry.metrics .Values.telemetry.tracing) -}}
+{{- fail "hostNetwork and telemetry.mode=sidecar are incompatible: OpenTelemetry sidecar injection causes port conflicts in host-network mode. Use telemetry.mode=custom with a remote collector instead." -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/kubewarden-controller/templates/deployment.yaml b/charts/kubewarden-controller/templates/deployment.yaml
index 12309f176..4445b11a0 100644
--- a/charts/kubewarden-controller/templates/deployment.yaml
+++ b/charts/kubewarden-controller/templates/deployment.yaml
@@ -1,3 +1,4 @@
+{{- include "kubewarden-controller.validateHostNetworkSidecar" . -}}
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -19,7 +20,8 @@ spec:
{{ . | quote }}: {{ get $.Values.podAnnotations . | quote}}
{{- end }}
{{- if or .Values.telemetry.metrics .Values.telemetry.tracing }}
- {{- if eq .Values.telemetry.mode "sidecar" }}
+ {{- /* OTel sidecar annotation is incompatible with hostNetwork due to port conflicts */ -}}
+ {{- if and (eq .Values.telemetry.mode "sidecar") (not .Values.hostNetwork) }}
"sidecar.opentelemetry.io/inject": "true"
{{- end }}
{{- end }}
@@ -32,8 +34,9 @@ spec:
{{- include "imagePullSecrets" .Values.imagePullSecrets | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "kubewarden-controller.serviceAccountName" . }}
- {{- if .Values.global.affinity }}
- affinity: {{ .Values.global.affinity | toYaml | nindent 8 }}
+ {{- $effectiveAffinity := include "kubewarden-controller.effectiveAffinity" . }}
+ {{- if $effectiveAffinity }}
+ affinity: {{ $effectiveAffinity | nindent 8 }}
{{- end }}
{{- if .Values.global.tolerations }}
tolerations: {{ .Values.global.tolerations | toYaml | nindent 8 }}
@@ -45,12 +48,24 @@ spec:
{{- if .Values.global.priorityClassName }}
priorityClassName: {{ .Values.global.priorityClassName | toYaml | nindent 8 }}
{{- end }}
+ {{- if .Values.hostNetwork }}
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ {{- end }}
containers:
- name: controller
args:
- --leader-elect
- --deployments-namespace={{ .Release.Namespace }}
- --webhook-service-name={{ include "kubewarden-controller.fullname" . }}-webhook-service
+ - --webhook-server-port={{ .Values.ports.webhook }}
+ - --health-probe-bind-address=:{{ .Values.ports.healthProbe }}
+ {{- if .Values.telemetry.metrics}}
+ - --metrics-bind-address=:{{ .Values.ports.metrics }}
+ {{- else }}
+ # Port "0" disable the controller-runtime metrics port binding
+ - --metrics-bind-address=0
+ {{- end}}
{{- if .Values.alwaysAcceptAdmissionReviewsOnDeploymentsNamespace }}
- --always-accept-admission-reviews-on-deployments-namespace
{{- end }}
@@ -62,8 +77,12 @@ spec:
{{- if $imagePullSecretNames }}
- --image-pull-secrets={{ $imagePullSecretNames }}
{{- end }}
+ {{- if .Values.hostNetwork }}
+ - --host-network
+ {{- end }}
{{- if or .Values.telemetry.metrics .Values.telemetry.tracing }}
- {{- if eq .Values.telemetry.mode "sidecar" }}
+ {{- /* OTel sidecar is incompatible with hostNetwork due to port conflicts */ -}}
+ {{- if and (eq .Values.telemetry.mode "sidecar") (not .Values.hostNetwork) }}
- --enable-otel-sidecar
{{- end }}
{{- if .Values.telemetry.metrics }}
@@ -117,13 +136,13 @@ spec:
livenessProbe:
httpGet:
path: /healthz
- port: 8081
+ port: {{ .Values.ports.healthProbe }}
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
- port: 8081
+ port: {{ .Values.ports.healthProbe }}
initialDelaySeconds: 5
periodSeconds: 10
{{- if and .Values.resources .Values.resources.controller }}
@@ -154,7 +173,7 @@ spec:
readOnly: true
{{- end }}
ports:
- - containerPort: 9443
+ - containerPort: {{ .Values.ports.webhook }}
name: webhook-server
protocol: TCP
volumes:
diff --git a/charts/kubewarden-controller/templates/service.yaml b/charts/kubewarden-controller/templates/service.yaml
index 68e515f68..8682f73de 100644
--- a/charts/kubewarden-controller/templates/service.yaml
+++ b/charts/kubewarden-controller/templates/service.yaml
@@ -1,3 +1,4 @@
+{{- if .Values.telemetry.metrics }}
---
apiVersion: v1
kind: Service
@@ -10,16 +11,12 @@ metadata:
{{- include "kubewarden-controller.annotations" . | nindent 4 }}
spec:
ports:
- {{- if .Values.telemetry.metrics }}
- name: metrics
port: 8080
- targetPort: 8080
- {{- end}}
- - name: https
- port: 8443
- targetPort: https
+ targetPort: {{ .Values.ports.metrics }}
selector:
{{- include "kubewarden-controller.selectorLabels" . | nindent 4 }}
+{{- end}}
---
apiVersion: v1
kind: Service
@@ -33,6 +30,6 @@ metadata:
spec:
ports:
- port: 443
- targetPort: 9443
+ targetPort: {{ .Values.ports.webhook }}
selector:
{{- include "kubewarden-controller.selectorLabels" . | nindent 4 }}
diff --git a/charts/kubewarden-controller/tests/host_network_test.yaml b/charts/kubewarden-controller/tests/host_network_test.yaml
new file mode 100644
index 000000000..0ba218f67
--- /dev/null
+++ b/charts/kubewarden-controller/tests/host_network_test.yaml
@@ -0,0 +1,165 @@
+suite: host-network configuration
+templates:
+ - deployment.yaml
+tests:
+ - it: "should not set hostNetwork and should not pass --host-network flag by default"
+ asserts:
+ - notExists:
+ path: spec.template.spec.hostNetwork
+ - notExists:
+ path: spec.template.spec.dnsPolicy
+ - notContains:
+ path: spec.template.spec.containers[0].args
+ content: "--host-network"
+ any: true
+
+ - it: "should set hostNetwork, dnsPolicy and pass --host-network flag when hostNetwork is true"
+ set:
+ hostNetwork: true
+ asserts:
+ - equal:
+ path: spec.template.spec.hostNetwork
+ value: true
+ - equal:
+ path: spec.template.spec.dnsPolicy
+ value: ClusterFirstWithHostNet
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content: "--host-network"
+
+ - it: "should not set affinity when hostNetwork is true and no affinity is configured"
+ set:
+ hostNetwork: true
+ asserts:
+ - notExists:
+ path: spec.template.spec.affinity
+
+ - it: "should not set affinity when hostNetwork is false and no affinity is set"
+ asserts:
+ - notExists:
+ path: spec.template.spec.affinity
+
+ - it: "should use controller-specific affinity when set"
+ set:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ app.kubernetes.io/name: kubewarden-controller
+ topologyKey: kubernetes.io/hostname
+ asserts:
+ - equal:
+ path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey
+ value: kubernetes.io/hostname
+ - equal:
+ path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].labelSelector.matchLabels["app.kubernetes.io/name"]
+ value: kubewarden-controller
+
+ - it: "should fall back to global.affinity when controller-specific affinity is not set"
+ set:
+ global.affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ asserts:
+ - equal:
+ path: spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key
+ value: kubernetes.io/os
+
+ - it: "should use custom ports when overridden in values"
+ set:
+ ports.webhook: 10443
+ ports.healthProbe: 10081
+ asserts:
+ - equal:
+ path: spec.template.spec.containers[0].ports[0].containerPort
+ value: 10443
+ - equal:
+ path: spec.template.spec.containers[0].livenessProbe.httpGet.port
+ value: 10081
+ - equal:
+ path: spec.template.spec.containers[0].readinessProbe.httpGet.port
+ value: 10081
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content:
+ --webhook-server-port=10443
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content:
+ --health-probe-bind-address=:10081
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content:
+ --metrics-bind-address=0
+
+ - it: "should fail when hostNetwork is true and telemetry sidecar mode is configured"
+ set:
+ hostNetwork: true
+ telemetry:
+ mode: "sidecar"
+ metrics: true
+ tracing: true
+ asserts:
+ - failedTemplate:
+ errorMessage: "hostNetwork and telemetry.mode=sidecar are incompatible: OpenTelemetry sidecar injection causes port conflicts in host-network mode. Use telemetry.mode=custom with a remote collector instead."
+
+ - it: "should not affect custom telemetry mode when hostNetwork is true"
+ set:
+ hostNetwork: true
+ telemetry:
+ mode: "custom"
+ metrics: true
+ tracing: true
+ custom:
+ endpoint: "https://remote-collector:4317"
+ insecure: true
+ asserts:
+ - equal:
+ path: spec.template.spec.hostNetwork
+ value: true
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content: "--host-network"
+ - notContains:
+ path: spec.template.spec.containers[0].args
+ content: "--enable-otel-sidecar"
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content: "--enable-metrics"
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content: "--enable-tracing"
+ - contains:
+ path: spec.template.spec.containers[0].env
+ content:
+ name: OTEL_EXPORTER_OTLP_ENDPOINT
+ value: "https://remote-collector:4317"
+
+ - it: "should enable otel sidecar when hostNetwork is not set and sidecar telemetry is configured"
+ set:
+ telemetry:
+ mode: "sidecar"
+ metrics: true
+ asserts:
+ - notExists:
+ path: spec.template.spec.hostNetwork
+ - notContains:
+ path: spec.template.spec.containers[0].args
+ content: "--host-network"
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content: "--enable-otel-sidecar"
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content: "--enable-metrics"
+ - isSubset:
+ path: spec.template.metadata.annotations
+ content:
+ "sidecar.opentelemetry.io/inject": "true"
diff --git a/charts/kubewarden-controller/tests/service_ports_test.yaml b/charts/kubewarden-controller/tests/service_ports_test.yaml
new file mode 100644
index 000000000..e05eb18a5
--- /dev/null
+++ b/charts/kubewarden-controller/tests/service_ports_test.yaml
@@ -0,0 +1,68 @@
+suite: service ports configuration
+templates:
+ - service.yaml
+tests:
+ - it: "should use default ports in services"
+ set:
+ telemetry.metrics: true
+ asserts:
+ # Webhook service (documentIndex 1)
+ - equal:
+ path: spec.ports[0].port
+ value: 443
+ documentIndex: 1
+ - equal:
+ path: spec.ports[0].targetPort
+ value: 9443
+ documentIndex: 1
+ # Metrics service (documentIndex 0) - single metrics port
+ - equal:
+ path: spec.ports[0].name
+ value: metrics
+ documentIndex: 0
+ - equal:
+ path: spec.ports[0].port
+ value: 8080
+ documentIndex: 0
+ - equal:
+ path: spec.ports[0].targetPort
+ value: 8088
+ documentIndex: 0
+
+ - it: "should use custom webhook port in webhook service"
+ set:
+ ports.webhook: 10443
+ telemetry.metrics: true
+ asserts:
+ # Webhook service
+ - equal:
+ path: spec.ports[0].targetPort
+ value: 10443
+ documentIndex: 1
+
+ - it: "should use custom metrics port in metrics service"
+ set:
+ ports.metrics: 10088
+ telemetry.metrics: true
+ asserts:
+ # Metrics service - metrics port
+ - equal:
+ path: spec.ports[0].targetPort
+ value: 10088
+ documentIndex: 0
+
+ - it: "should not render metrics service when telemetry is disabled"
+ set:
+ telemetry.metrics: false
+ asserts:
+ # Only the webhook service is rendered
+ - hasDocuments:
+ count: 1
+ - equal:
+ path: spec.ports[0].port
+ value: 443
+ documentIndex: 0
+ - equal:
+ path: spec.ports[0].targetPort
+ value: 9443
+ documentIndex: 0
diff --git a/charts/kubewarden-controller/values.schema.json b/charts/kubewarden-controller/values.schema.json
index 054d9bd5f..fdb61aa81 100644
--- a/charts/kubewarden-controller/values.schema.json
+++ b/charts/kubewarden-controller/values.schema.json
@@ -11,6 +11,9 @@
"additionalLabels": {
"type": "object"
},
+ "affinity": {
+ "type": "object"
+ },
"alwaysAcceptAdmissionReviewsOnDeploymentsNamespace": {
"type": "boolean"
},
@@ -133,6 +136,9 @@
}
}
},
+ "hostNetwork": {
+ "type": "boolean"
+ },
"image": {
"type": "object",
"properties": {
@@ -234,6 +240,20 @@
}
}
},
+ "ports": {
+ "type": "object",
+ "properties": {
+ "healthProbe": {
+ "type": "integer"
+ },
+ "metrics": {
+ "type": "integer"
+ },
+ "webhook": {
+ "type": "integer"
+ }
+ }
+ },
"preDeleteHook": {
"type": "object",
"properties": {
diff --git a/charts/kubewarden-controller/values.yaml b/charts/kubewarden-controller/values.yaml
index e297b31fa..4989ce34d 100644
--- a/charts/kubewarden-controller/values.yaml
+++ b/charts/kubewarden-controller/values.yaml
@@ -151,6 +151,42 @@ preDeleteHook:
# evaluations that could interfere with the Kubewarden stack running in the
# admission controller namespace.
alwaysAcceptAdmissionReviewsOnDeploymentsNamespace: true
+# affinity configures affinity rules for the controller pod.
+# This takes precedence over global.affinity when set.
+# When hostNetwork is enabled, users should set appropriate podAntiAffinity
+# rules here to prevent host-port conflicts between controller replicas.
+# affinity:
+# podAntiAffinity:
+# requiredDuringSchedulingIgnoredDuringExecution:
+# - labelSelector:
+# matchLabels:
+# app.kubernetes.io/name: kubewarden-controller
+# topologyKey: kubernetes.io/hostname
+affinity: {}
+# hostNetwork enables host network mode for the controller pod and all
+# PolicyServer pods managed by this controller.
+# WARNING: enabling this increases the attack surface by exposing webhook
+# endpoints on the host network and giving pods visibility of all node network
+# interfaces. Use only when the Kubernetes API server cannot reach pod-network
+# webhook endpoints (e.g. clusters using a non-VPC CNI with NAT).
+# WARNING: When hostNetwork is enabled, users are responsible for setting
+# appropriate podAntiAffinity rules to prevent host-port conflicts between
+# controller replicas on the same node.
+# NOTE: hostNetwork mode is incompatible with telemetry sidecar mode
+# (telemetry.mode=sidecar). This combination is rejected; sidecar injection
+# is not automatically disabled. Use telemetry.mode=custom with a remote
+# collector instead.
+hostNetwork: false
+# ports configures the network ports used by the controller pod.
+# These are especially important when hostNetwork is enabled, because each port
+# is bound on the host network interface and must not clash with other workloads.
+ports:
+ # Port the controller webhook server listens on.
+ webhook: 9443
+ # Port for the controller liveness and readiness probes.
+ healthProbe: 8081
+ # Port for the controller Prometheus metrics endpoint.
+ metrics: 8088
# Verbosity of logging. Can be one of 'debug', 'info', 'error'.
logLevel: info
# open-telemetry options
diff --git a/charts/kubewarden-crds/templates/policies.kubewarden.io_policyservers.yaml b/charts/kubewarden-crds/templates/policies.kubewarden.io_policyservers.yaml
index d81067277..2ace927e9 100644
--- a/charts/kubewarden-crds/templates/policies.kubewarden.io_policyservers.yaml
+++ b/charts/kubewarden-crds/templates/policies.kubewarden.io_policyservers.yaml
@@ -1164,6 +1164,30 @@ spec:
eviction. The value can be an absolute number or a percentage. Only one of
MinAvailable or Max MaxUnavailable can be set.
x-kubernetes-int-or-string: true
+ metricsPort:
+ description: |-
+ Port exposed by the metrics Service for this policy server.
+ When unset, defaults to the controller-wide default
+ (KUBEWARDEN_POLICY_SERVER_SERVICES_METRICS_PORT env var, or 8080).
+ Only relevant when metrics are enabled.
+
+ Use this field to customize which port Prometheus scrapes for this
+ PolicyServer's metrics Service (e.g. to match naming conventions or
+ avoid Service-level port collisions).
+
+ NOTE: this field controls only the Service Port (the externally visible
+ scrape port). The Service TargetPort — the port the pod actually listens
+ on — is always the controller-wide default and is not affected by this
+ field. This is intentional: when the OpenTelemetry sidecar mode is
+ enabled, each pod gets its own injected sidecar, but the pod-side
+ Prometheus listener port is determined by controller-wide/injection
+ configuration, not per PolicyServer. Therefore, changing this field does
+ not change the pod listener port and will not resolve pod-port conflicts
+ such as those caused by hostNetwork.
+ format: int32
+ maximum: 65535
+ minimum: 1
+ type: integer
minAvailable:
anyOf:
- type: integer
@@ -1195,6 +1219,14 @@ spec:
Note: If the referenced PriorityClass is deleted, existing pods
remain unchanged, but new pods that reference it cannot be created.
type: string
+ readinessProbePort:
+ description: |-
+ Port used by the policy server to expose the readiness probe endpoint.
+ When unset, defaults to 8081.
+ format: int32
+ maximum: 65535
+ minimum: 1
+ type: integer
replicas:
description: Replicas is the number of desired replicas.
format: int32
@@ -1723,6 +1755,15 @@ spec:
configuration. The configuration must be under a key named
verification-config in the ConfigMap.
type: string
+ webhookPort:
+ description: |-
+ Port where the policy server listens for incoming webhook requests.
+ When unset, defaults to 8443. This is the port the Kubernetes API server
+ reaches when evaluating admission requests.
+ format: int32
+ maximum: 65535
+ minimum: 1
+ type: integer
required:
- image
- replicas
diff --git a/charts/kubewarden-defaults/templates/_helpers.tpl b/charts/kubewarden-defaults/templates/_helpers.tpl
index fc0aea2e9..f6cd48b7b 100644
--- a/charts/kubewarden-defaults/templates/_helpers.tpl
+++ b/charts/kubewarden-defaults/templates/_helpers.tpl
@@ -84,3 +84,15 @@ Fail
Ignore
{{- end -}}
{{- end -}}
+
+{{/*
+Effective affinity for the default PolicyServer.
+Uses policyServer.affinity if set, otherwise falls back to global.affinity.
+*/}}
+{{- define "kubewarden-defaults.effectiveAffinity" -}}
+{{- if .Values.policyServer.affinity -}}
+ {{- toYaml .Values.policyServer.affinity -}}
+{{- else if .Values.global.affinity -}}
+ {{- toYaml .Values.global.affinity -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/kubewarden-defaults/templates/policyserver-default.yaml b/charts/kubewarden-defaults/templates/policyserver-default.yaml
index a989224e8..bb1256dc9 100644
--- a/charts/kubewarden-defaults/templates/policyserver-default.yaml
+++ b/charts/kubewarden-defaults/templates/policyserver-default.yaml
@@ -20,8 +20,9 @@ spec:
{{- if .Values.policyServer.maxUnavailable }}
maxUnavailable: {{ .Values.policyServer.maxUnavailable }}
{{- end }}
- {{- if .Values.global.affinity }}
- affinity: {{ .Values.global.affinity | toYaml | nindent 4 }}
+ {{- $affinity := include "kubewarden-defaults.effectiveAffinity" . -}}
+ {{- if $affinity }}
+ affinity: {{ $affinity | nindent 4 }}
{{- end }}
{{- if .Values.global.tolerations }}
tolerations: {{ .Values.global.tolerations | toYaml | nindent 4 }}
@@ -83,4 +84,13 @@ spec:
{{- if .Values.policyServer.securityContexts }}
securityContexts: {{ toYaml .Values.policyServer.securityContexts | nindent 4 }}
{{- end }}
+ {{- if .Values.policyServer.webhookPort }}
+ webhookPort: {{ .Values.policyServer.webhookPort }}
+ {{- end }}
+ {{- if .Values.policyServer.readinessProbePort }}
+ readinessProbePort: {{ .Values.policyServer.readinessProbePort }}
+ {{- end }}
+ {{- if .Values.policyServer.metricsPort }}
+ metricsPort: {{ .Values.policyServer.metricsPort }}
+ {{- end }}
{{- end }}
diff --git a/charts/kubewarden-defaults/tests/host_network_test.yaml b/charts/kubewarden-defaults/tests/host_network_test.yaml
new file mode 100644
index 000000000..d6a465e9f
--- /dev/null
+++ b/charts/kubewarden-defaults/tests/host_network_test.yaml
@@ -0,0 +1,66 @@
+suite: host-network port configuration for default PolicyServer
+templates:
+ - policyserver-default.yaml
+tests:
+ - it: "should not set port fields by default (zero values are omitted)"
+ asserts:
+ - notExists:
+ path: spec.webhookPort
+ - notExists:
+ path: spec.readinessProbePort
+ - notExists:
+ path: spec.metricsPort
+
+ - it: "should set all port fields when all are overridden"
+ set:
+ policyServer.webhookPort: 9443
+ policyServer.readinessProbePort: 9081
+ policyServer.metricsPort: 9080
+ asserts:
+ - equal:
+ path: spec.webhookPort
+ value: 9443
+ - equal:
+ path: spec.readinessProbePort
+ value: 9081
+ - equal:
+ path: spec.metricsPort
+ value: 9080
+
+ - it: "should not set affinity by default"
+ asserts:
+ - notExists:
+ path: spec.affinity
+
+ - it: "should use policyServer.affinity when set"
+ set:
+ policyServer.affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ kubewarden/policy-server: default
+ topologyKey: kubernetes.io/hostname
+ asserts:
+ - equal:
+ path: spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey
+ value: kubernetes.io/hostname
+ - equal:
+ path: spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].labelSelector.matchLabels["kubewarden/policy-server"]
+ value: default
+
+ - it: "should fall back to global.affinity when policyServer.affinity is not set"
+ set:
+ global.affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ asserts:
+ - equal:
+ path: spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key
+ value: kubernetes.io/os
diff --git a/charts/kubewarden-defaults/values.schema.json b/charts/kubewarden-defaults/values.schema.json
index 7d01ae11c..97f8ccd9b 100644
--- a/charts/kubewarden-defaults/values.schema.json
+++ b/charts/kubewarden-defaults/values.schema.json
@@ -13,12 +13,31 @@
"type": "string",
"minLength": 1
},
+ "webhookPort": {
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 65535,
+ "description": "Port for the PolicyServer webhook listener. When omitted, the controller uses the default (8443)."
+ },
+ "readinessProbePort": {
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 65535,
+ "description": "Port for the PolicyServer readiness probe. When omitted, the controller uses the default (8081)."
+ },
+ "metricsPort": {
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 65535,
+ "description": "Port for the PolicyServer metrics endpoint. When omitted, the controller uses the default (8080)."
+ },
+ "affinity": {
+ "type": "object",
+ "description": "Affinity rules for the default PolicyServer. Takes precedence over global.affinity."
+ },
"namespacedPoliciesCapabilities": {
"description": "Host capability paths allowed for namespaced policies on this PolicyServer. Catch obvious errors",
- "type": [
- "array",
- "null"
- ],
+ "type": ["array", "null"],
"items": {
"type": "string",
"minLength": 1,
@@ -47,14 +66,10 @@
{
"oneOf": [
{
- "required": [
- "minAvailable"
- ]
+ "required": ["minAvailable"]
},
{
- "required": [
- "maxUnavailable"
- ]
+ "required": ["maxUnavailable"]
}
]
},
@@ -62,14 +77,10 @@
"not": {
"allOf": [
{
- "required": [
- "minAvailable"
- ]
+ "required": ["minAvailable"]
},
{
- "required": [
- "maxUnavailable"
- ]
+ "required": ["maxUnavailable"]
}
]
}
diff --git a/charts/kubewarden-defaults/values.yaml b/charts/kubewarden-defaults/values.yaml
index 04022cb5c..54397b980 100644
--- a/charts/kubewarden-defaults/values.yaml
+++ b/charts/kubewarden-defaults/values.yaml
@@ -196,6 +196,32 @@ policyServer:
# limits and requests, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
limits: {}
requests: {}
+ # Port configuration for the policy server.
+ # These ports are used for the webhook, readiness probe, and metrics endpoints.
+ # Important when hostNetwork is enabled or when running multiple PolicyServers
+ # on the same node, as they must not conflict.
+ #
+ # Default values (if not specified):
+ # - webhookPort: 8443
+ # - readinessProbePort: 8081
+ # - metricsPort: controller-wide default (env var
+ # KUBEWARDEN_POLICY_SERVER_SERVICES_METRICS_PORT, falling back to 8080)
+ #
+ # webhookPort: 9443
+ # readinessProbePort: 9081
+ # metricsPort: 9080
+ # affinity configures affinity rules for the default PolicyServer.
+ # This takes precedence over global.affinity when set.
+ # When hostNetwork is enabled, users should set appropriate podAntiAffinity
+ # rules here to prevent host-port conflicts between PolicyServer replicas.
+ # affinity:
+ # podAntiAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # - labelSelector:
+ # matchLabels:
+ # kubewarden/policy-server: default
+ # topologyKey: kubernetes.io/hostname
+ affinity: {}
crdVersion: "policies.kubewarden.io/v1"
recommendedPolicies:
enabled: False
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index 1e11a809a..7509b14ce 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -23,6 +23,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "strconv"
"strings"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
@@ -62,6 +63,11 @@ import (
//+kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create
//+kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create
+const (
+ minAllowedPort = 1
+ maxAllowedPort = 65535
+)
+
//nolint:gochecknoglobals // Following the kubebuilder pattern
var (
scheme = runtime.NewScheme()
@@ -74,6 +80,7 @@ type ManagerOptions struct {
EnableMutualTLS bool
MetricsAddr string
ProbeAddr string
+ WebhookServerPort int
}
type Configuration struct {
@@ -82,6 +89,11 @@ type Configuration struct {
FeatureGateAdmissionWebhookMatchConditions bool
WebhookServiceName string
ImagePullSecrets []corev1.LocalObjectReference
+ // HostNetwork enables host network mode for PolicyServer deployments.
+ // WARNING: enabling this increases the attack surface. Use only when
+ // the Kubernetes API server cannot reach pod-network webhook endpoints
+ // (e.g. clusters using a non-VPC CNI with NAT).
+ HostNetwork bool
}
func init() {
@@ -91,7 +103,7 @@ func init() {
//+kubebuilder:scaffold:scheme
}
-//nolint:funlen // Avoid splitting the main function in multiple functions to avoid changing the retcode logic for metrics shutdown
+//nolint:funlen,gocognit // Avoid splitting the main function in multiple functions to avoid changing the retcode logic for metrics shutdown
func main() {
retcode := 0
defer func() { os.Exit(retcode) }()
@@ -105,8 +117,9 @@ func main() {
var openTelemetryCertificateSecret string
var imagePullSecretsFlag string
- flag.StringVar(&mgrOpts.MetricsAddr, "metrics-bind-address", ":8088", "The address the metric endpoint binds to.")
+ flag.StringVar(&mgrOpts.MetricsAddr, "metrics-bind-address", ":8088", "The address the controller-runtime metric endpoint binds to.")
flag.StringVar(&mgrOpts.ProbeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
+ flag.IntVar(&mgrOpts.WebhookServerPort, "webhook-server-port", 9443, "The port the webhook server listens on.")
flag.BoolVar(&mgrOpts.EnableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
@@ -135,6 +148,13 @@ func main() {
"image-pull-secrets",
"",
"Comma-separated list of Secret names to use as imagePullSecrets on every policy-server Deployment. The secrets must exist in the deployments namespace.")
+ flag.BoolVar(&config.HostNetwork,
+ "host-network",
+ false,
+ "Enable host network mode for all PolicyServer deployments. "+
+ "WARNING: enabling this increases the attack surface by exposing webhook endpoints "+
+ "on the host network and giving pods visibility of all node network interfaces. "+
+ "Use only when the Kubernetes API server cannot reach pod-network webhook endpoints.")
opts := zap.Options{}
opts.BindFlags(flag.CommandLine)
@@ -143,6 +163,57 @@ func main() {
config.ImagePullSecrets = parseImagePullSecrets(imagePullSecretsFlag)
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
+ // Validate --webhook-server-port range.
+ if int64(mgrOpts.WebhookServerPort) < minAllowedPort || int64(mgrOpts.WebhookServerPort) > maxAllowedPort {
+ setupLog.Error(
+ errors.New("port must be between 1 and 65535"),
+ "invalid webhook server port",
+ "flag", "--webhook-server-port",
+ "value", mgrOpts.WebhookServerPort,
+ "min", minAllowedPort,
+ "max", maxAllowedPort,
+ )
+ retcode = 1
+ return
+ }
+
+ // HostNetwork mode is incompatible with OTel sidecar injection because
+ // multiple sidecars on the same node would cause port conflicts.
+ if config.HostNetwork && enableOtelSidecar {
+ setupLog.Error(
+ errors.New("--host-network and --enable-otel-sidecar are mutually exclusive"),
+ "incompatible flags: OpenTelemetry sidecar injection is not supported in host-network mode due to port conflicts; use a remote collector instead",
+ )
+ retcode = 1
+ return
+ }
+
+ // Read the global default metrics port for PolicyServer services from the
+ // environment variable, falling back to the hardcoded constant.
+ policyServerMetricsPort := int32(constants.PolicyServerMetricsPort)
+ if envPort := os.Getenv(constants.PolicyServerMetricsPortEnvVar); envPort != "" {
+ parsed, err := strconv.ParseInt(envPort, 10, 32)
+ if err != nil {
+ setupLog.Error(err, "cannot parse env var as integer port",
+ "envVar", constants.PolicyServerMetricsPortEnvVar, "value", envPort)
+ retcode = 1
+ return
+ }
+ if parsed < minAllowedPort || parsed > maxAllowedPort {
+ setupLog.Error(
+ errors.New("port must be between 1 and 65535"),
+ "invalid env var port value",
+ "envVar", constants.PolicyServerMetricsPortEnvVar,
+ "value", envPort,
+ "min", minAllowedPort,
+ "max", maxAllowedPort,
+ )
+ retcode = 1
+ return
+ }
+ policyServerMetricsPort = int32(parsed)
+ }
+
if enableMetrics {
shutdown, err := metrics.New()
if err != nil {
@@ -189,6 +260,7 @@ func main() {
mgrOpts.DeploymentsNamespace,
config,
otelConfiguration,
+ policyServerMetricsPort,
); err != nil {
setupLog.Error(err, "unable to create controllers")
retcode = 1
@@ -275,6 +347,7 @@ func setupManager(mgrOpts ManagerOptions) (ctrl.Manager, error) {
},
WebhookServer: webhook.NewServer(webhook.Options{
ClientCAName: clientCAName,
+ Port: mgrOpts.WebhookServerPort,
}),
}
@@ -299,6 +372,7 @@ func setupReconcilers(mgr ctrl.Manager,
deploymentsNamespace string,
config Configuration,
otelConfiguration controller.TelemetryConfiguration,
+ policyServerMetricsPort int32,
) error {
if err := (&controller.PolicyServerReconciler{
Client: mgr.GetClient(),
@@ -309,6 +383,8 @@ func setupReconcilers(mgr ctrl.Manager,
TelemetryConfiguration: otelConfiguration,
ClientCAConfigMapName: config.ClientCAConfigMapName,
ImagePullSecrets: config.ImagePullSecrets,
+ HostNetwork: config.HostNetwork,
+ PolicyServerMetricsPort: policyServerMetricsPort,
}).SetupWithManager(mgr); err != nil {
return errors.Join(errors.New("unable to create PolicyServer controller"), err)
}
diff --git a/docs/crds/CRD-docs-for-docs-repo.adoc b/docs/crds/CRD-docs-for-docs-repo.adoc
index e082e0ec4..93319160d 100644
--- a/docs/crds/CRD-docs-for-docs-repo.adoc
+++ b/docs/crds/CRD-docs-for-docs-repo.adoc
@@ -941,6 +941,38 @@ Supported wildcard patterns: +
- "category/version/*": allow all capabilities of a specific version (e.g. "oci/v1/*") +
- Specific capability paths (e.g. "oci/v1/verify", "net/v1/dns_lookup_host") + | | Optional: \{} +
+| *`webhookPort`* __integer__ | Port where the policy server listens for incoming webhook requests. +
+When unset, defaults to 8443. This is the port the Kubernetes API server +
+reaches when evaluating admission requests. + | | Maximum: 65535 +
+Minimum: 1 +
+Optional: \{} +
+
+| *`readinessProbePort`* __integer__ | Port used by the policy server to expose the readiness probe endpoint. +
+When unset, defaults to 8081. + | | Maximum: 65535 +
+Minimum: 1 +
+Optional: \{} +
+
+| *`metricsPort`* __integer__ | Port exposed by the metrics Service for this policy server. +
+When unset, defaults to the controller-wide default +
+(KUBEWARDEN_POLICY_SERVER_SERVICES_METRICS_PORT env var, or 8080). +
+Only relevant when metrics are enabled. +
+
+Use this field to customize which port Prometheus scrapes for this +
+PolicyServer's metrics Service (e.g. to match naming conventions or +
+avoid Service-level port collisions). +
+
+NOTE: this field controls only the Service Port (the externally visible +
+scrape port). The Service TargetPort — the port the pod actually listens +
+on — is always the controller-wide default and is not affected by this +
+field. This is intentional: when the OpenTelemetry sidecar mode is +
+enabled, each pod gets its own injected sidecar, but the pod-side +
+Prometheus listener port is determined by controller-wide/injection +
+configuration, not per PolicyServer. Therefore, changing this field does +
+not change the pod listener port and will not resolve pod-port conflicts +
+such as those caused by hostNetwork. + | | Maximum: 65535 +
+Minimum: 1 +
+Optional: \{} +
+
|===
diff --git a/docs/crds/CRD-docs-for-docs-repo.md b/docs/crds/CRD-docs-for-docs-repo.md
index 8fb8eeaaf..5d94f829c 100644
--- a/docs/crds/CRD-docs-for-docs-repo.md
+++ b/docs/crds/CRD-docs-for-docs-repo.md
@@ -539,6 +539,9 @@ _Appears in:_
| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#toleration-v1-core) array_ | Tolerations describe the policy server pod's tolerations. It can be
used to ensure that the policy server pod is not scheduled onto a
node with a taint. | | |
| `priorityClassName` _string_ | PriorityClassName is the name of the PriorityClass to be used for the
policy server pods. Useful to schedule policy server pods with higher
priority to ensure their availability over other cluster workload
resources.
Note: If the referenced PriorityClass is deleted, existing pods
remain unchanged, but new pods that reference it cannot be created. | | Optional: \{\}
|
| `namespacedPoliciesCapabilities` _string array_ | NamespacedPoliciesCapabilities lists host capability API calls allowed
for namespaced policies running on this PolicyServer. When not set,
no host capabilities are granted to namespaced policies.
Supported wildcard patterns:
- "*": allow all host capabilities
- "category/*": allow all capabilities in a category (e.g. "oci/*")
- "category/version/*": allow all capabilities of a specific version (e.g. "oci/v1/*")
- Specific capability paths (e.g. "oci/v1/verify", "net/v1/dns_lookup_host") | | Optional: \{\}
|
+| `webhookPort` _integer_ | Port where the policy server listens for incoming webhook requests.
When unset, defaults to 8443. This is the port the Kubernetes API server
reaches when evaluating admission requests. | | Maximum: 65535
Minimum: 1
Optional: \{\}
|
+| `readinessProbePort` _integer_ | Port used by the policy server to expose the readiness probe endpoint.
When unset, defaults to 8081. | | Maximum: 65535
Minimum: 1
Optional: \{\}
|
+| `metricsPort` _integer_ | Port exposed by the metrics Service for this policy server.
When unset, defaults to the controller-wide default
(KUBEWARDEN_POLICY_SERVER_SERVICES_METRICS_PORT env var, or 8080).
Only relevant when metrics are enabled.
Use this field to customize which port Prometheus scrapes for this
PolicyServer's metrics Service (e.g. to match naming conventions or
avoid Service-level port collisions).
NOTE: this field controls only the Service Port (the externally visible
scrape port). The Service TargetPort — the port the pod actually listens
on — is always the controller-wide default and is not affected by this
field. This is intentional: when the OpenTelemetry sidecar mode is
enabled, each pod gets its own injected sidecar, but the pod-side
Prometheus listener port is determined by controller-wide/injection
configuration, not per PolicyServer. Therefore, changing this field does
not change the pod listener port and will not resolve pod-port conflicts
such as those caused by hostNetwork. | | Maximum: 65535
Minimum: 1
Optional: \{\}
|
diff --git a/internal/controller/policyserver_controller.go b/internal/controller/policyserver_controller.go
index 1cd592aa9..1870655ec 100644
--- a/internal/controller/policyserver_controller.go
+++ b/internal/controller/policyserver_controller.go
@@ -67,6 +67,15 @@ type PolicyServerReconciler struct {
// policy-server Deployment, sourced from the controller's own
// --image-pull-secrets flag.
ImagePullSecrets []corev1.LocalObjectReference
+ // HostNetwork enables host network mode on PolicyServer Deployments.
+ // When true, the controller also sets dnsPolicy to ClusterFirstWithHostNet
+ // so that in-cluster DNS resolution keeps working.
+ HostNetwork bool
+ // PolicyServerMetricsPort is the global default metrics port for PolicyServer
+ // Service objects. It is populated from the KUBEWARDEN_POLICY_SERVER_SERVICES_METRICS_PORT
+ // environment variable at startup, falling back to constants.PolicyServerMetricsPort.
+ // A per-PolicyServer CRD field (spec.metricsPort) always takes priority.
+ PolicyServerMetricsPort int32
}
// TelemetryConfiguration is a struct that contains the configuration for the
diff --git a/internal/controller/policyserver_controller_deployment.go b/internal/controller/policyserver_controller_deployment.go
index 946e12261..15ae08aaa 100644
--- a/internal/controller/policyserver_controller_deployment.go
+++ b/internal/controller/policyserver_controller_deployment.go
@@ -137,15 +137,16 @@ func (r *PolicyServerReconciler) updatePolicyServerDeployment(ctx context.Contex
templateAnnotations,
podSecurityContext,
r.ImagePullSecrets,
+ r.HostNetwork,
)
r.adaptDeploymentForMetricsAndTracingConfiguration(policyServerDeployment, templateAnnotations)
r.adaptDeploymentSettingsForPolicyServer(policyServerDeployment, policyServer)
- if err := r.configureMutualTLS(ctx, policyServerDeployment); err != nil {
- return fmt.Errorf("failed to configure mutual TLS: %w", err)
+ if mtlsErr := r.configureMutualTLS(ctx, policyServerDeployment); mtlsErr != nil {
+ return fmt.Errorf("failed to configure mutual TLS: %w", mtlsErr)
}
- if err := controllerutil.SetOwnerReference(policyServer, policyServerDeployment, r.Client.Scheme()); err != nil {
- return errors.Join(errors.New("failed to set policy server deployment owner reference"), err)
+ if ownerErr := controllerutil.SetOwnerReference(policyServer, policyServerDeployment, r.Client.Scheme()); ownerErr != nil {
+ return errors.Join(errors.New("failed to set policy server deployment owner reference"), ownerErr)
}
return nil
@@ -201,7 +202,9 @@ func (r *PolicyServerReconciler) adaptDeploymentForMetricsAndTracingConfiguratio
// If the otel sidecar is enabled, we need to inject the sidecar in the
// policy server deployment. The exporter will communicate with the sidecar
// using the localhost address.
- if (r.MetricsEnabled || r.TracingEnabled) && r.OtelSidecarEnabled {
+ // NOTE: OTel sidecar injection is skipped when HostNetwork is enabled because
+ // multiple sidecars on the same node would cause port conflicts.
+ if (r.MetricsEnabled || r.TracingEnabled) && r.OtelSidecarEnabled && !r.HostNetwork {
templateAnnotations[constants.OptelInjectAnnotation] = "true"
envvar := corev1.EnvVar{Name: "OTEL_EXPORTER_OTLP_ENDPOINT", Value: "http://localhost:4317"}
if index := envVarsContainVariable(admissionContainer.Env, "OTEL_EXPORTER_OTLP_ENDPOINT"); index >= 0 {
@@ -416,6 +419,7 @@ func buildPolicyServerDeploymentSpec(
templateAnnotations map[string]string,
podSecurityContext *corev1.PodSecurityContext,
imagePullSecrets []corev1.LocalObjectReference,
+ hostNetwork bool,
) appsv1.DeploymentSpec {
templateLabels := map[string]string{
//nolint:staticcheck // this label will remove soon when policy lifecycle is revisited
@@ -427,6 +431,54 @@ func buildPolicyServerDeploymentSpec(
templateLabels[key] = value
}
+ podSpec := corev1.PodSpec{
+ SecurityContext: podSecurityContext,
+ Containers: []corev1.Container{admissionContainer},
+ ImagePullSecrets: imagePullSecrets,
+ ServiceAccountName: policyServer.Spec.ServiceAccountName,
+ Tolerations: policyServer.Spec.Tolerations,
+ Affinity: &policyServer.Spec.Affinity,
+ PriorityClassName: policyServer.Spec.PriorityClassName,
+ Volumes: []corev1.Volume{
+ {
+ Name: policyStoreVolume,
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ },
+ {
+ Name: certsVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: policyServer.NameWithPrefix(),
+ },
+ },
+ },
+ {
+ Name: policiesVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: policyServer.NameWithPrefix(),
+ },
+ Items: []corev1.KeyToPath{
+ {
+ Key: constants.PolicyServerConfigPoliciesEntry,
+ Path: policiesFilename,
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ podSpec.HostNetwork = hostNetwork
+ podSpec.DNSPolicy = corev1.DNSClusterFirst
+ if hostNetwork {
+ podSpec.DNSPolicy = corev1.DNSClusterFirstWithHostNet
+ }
+
return appsv1.DeploymentSpec{
Replicas: &policyServer.Spec.Replicas,
Selector: &metav1.LabelSelector{
@@ -443,47 +495,7 @@ func buildPolicyServerDeploymentSpec(
Labels: templateLabels,
Annotations: templateAnnotations,
},
- Spec: corev1.PodSpec{
- SecurityContext: podSecurityContext,
- Containers: []corev1.Container{admissionContainer},
- ImagePullSecrets: imagePullSecrets,
- ServiceAccountName: policyServer.Spec.ServiceAccountName,
- Tolerations: policyServer.Spec.Tolerations,
- Affinity: &policyServer.Spec.Affinity,
- PriorityClassName: policyServer.Spec.PriorityClassName,
- Volumes: []corev1.Volume{
- {
- Name: policyStoreVolume,
- VolumeSource: corev1.VolumeSource{
- EmptyDir: &corev1.EmptyDirVolumeSource{},
- },
- },
- {
- Name: certsVolumeName,
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: policyServer.NameWithPrefix(),
- },
- },
- },
- {
- Name: policiesVolumeName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: policyServer.NameWithPrefix(),
- },
- Items: []corev1.KeyToPath{
- {
- Key: constants.PolicyServerConfigPoliciesEntry,
- Path: policiesFilename,
- },
- },
- },
- },
- },
- },
- },
+ Spec: podSpec,
},
}
}
@@ -640,11 +652,11 @@ func getPolicyServerContainer(policyServer *policiesv1.PolicyServer) corev1.Cont
},
{
Name: "KUBEWARDEN_PORT",
- Value: strconv.Itoa(constants.PolicyServerListenPort),
+ Value: strconv.Itoa(int(policyServer.EffectiveWebhookPort())),
},
{
Name: "KUBEWARDEN_READINESS_PROBE_PORT",
- Value: strconv.Itoa(constants.PolicyServerReadinessProbePort),
+ Value: strconv.Itoa(int(policyServer.EffectiveReadinessProbePort())),
},
{
Name: "KUBEWARDEN_POLICIES_DOWNLOAD_DIR",
@@ -663,7 +675,7 @@ func getPolicyServerContainer(policyServer *policiesv1.PolicyServer) corev1.Cont
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: constants.PolicyServerReadinessProbe,
- Port: intstr.FromInt(constants.PolicyServerReadinessProbePort),
+ Port: intstr.FromInt32(policyServer.EffectiveReadinessProbePort()),
Scheme: corev1.URISchemeHTTP,
},
},
diff --git a/internal/controller/policyserver_controller_service.go b/internal/controller/policyserver_controller_service.go
index 1212aa3ce..c2fb3f43d 100644
--- a/internal/controller/policyserver_controller_service.go
+++ b/internal/controller/policyserver_controller_service.go
@@ -4,8 +4,6 @@ import (
"context"
"errors"
"fmt"
- "os"
- "strconv"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -16,23 +14,6 @@ import (
"github.com/kubewarden/kubewarden-controller/internal/constants"
)
-// This is the port where the Policy Server service will be exposing metrics. Can be overridden
-// by an environment variable KUBEWARDEN_POLICY_SERVER_SERVICES_METRICS_PORT.
-func getMetricsPort() int32 {
- metricsPort := int32(constants.PolicyServerMetricsPort)
- envMetricsPort := os.Getenv(constants.PolicyServerMetricsPortEnvVar)
- if envMetricsPort != "" {
- var err error
- metricsPortInt32, err := strconv.ParseInt(envMetricsPort, 10, 32)
- if err != nil {
- fmt.Fprintf(os.Stderr, "port %s provided in %s envvar cannot be parsed as integer: %v. Aborting.\n", envMetricsPort, constants.PolicyServerMetricsPortEnvVar, err)
- os.Exit(1)
- }
- metricsPort = int32(metricsPortInt32)
- }
- return metricsPort
-}
-
func (r *PolicyServerReconciler) reconcilePolicyServerService(ctx context.Context, policyServer *policiesv1.PolicyServer) error {
svc := corev1.Service{
ObjectMeta: metav1.ObjectMeta{
@@ -67,7 +48,7 @@ func (r *PolicyServerReconciler) updateService(svc *corev1.Service, policyServer
{
Name: "policy-server",
Port: constants.PolicyServerServicePort,
- TargetPort: intstr.FromInt(constants.PolicyServerListenPort),
+ TargetPort: intstr.FromInt32(policyServer.EffectiveWebhookPort()),
Protocol: corev1.ProtocolTCP,
},
},
@@ -80,9 +61,17 @@ func (r *PolicyServerReconciler) updateService(svc *corev1.Service, policyServer
svc.Spec.Ports = append(
svc.Spec.Ports,
corev1.ServicePort{
- Name: "metrics",
- Port: getMetricsPort(),
- Protocol: corev1.ProtocolTCP,
+ Name: "metrics",
+ // spec.metricsPort customizes the Service Port (the port Prometheus
+ // scrapes externally). It does not affect the pod-side port.
+ Port: policyServer.EffectiveMetricsPort(r.PolicyServerMetricsPort),
+ // TargetPort is intentionally fixed at the controller-wide default and
+ // does NOT follow spec.metricsPort. When the OpenTelemetry sidecar mode
+ // is enabled, the injected sidecar (one per pod) always exports
+ // Prometheus metrics on this fixed cluster-wide port. Routing traffic
+ // to a different pod port would break sidecar-mode metrics scraping.
+ TargetPort: intstr.FromInt32(r.PolicyServerMetricsPort),
+ Protocol: corev1.ProtocolTCP,
},
)
}
diff --git a/internal/controller/policyserver_controller_service_test.go b/internal/controller/policyserver_controller_service_test.go
new file mode 100644
index 000000000..242cc96de
--- /dev/null
+++ b/internal/controller/policyserver_controller_service_test.go
@@ -0,0 +1,180 @@
+/*
+Copyright 2026.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ policiesv1 "github.com/kubewarden/kubewarden-controller/api/policies/v1"
+ "github.com/kubewarden/kubewarden-controller/internal/constants"
+)
+
+// newTestScheme returns a runtime.Scheme with the PolicyServer types
+// registered, which is needed by updateService (SetOwnerReference).
+func newTestScheme() *runtime.Scheme {
+ s := runtime.NewScheme()
+ _ = policiesv1.AddToScheme(s)
+ _ = corev1.AddToScheme(s)
+ return s
+}
+
+// newPolicyServer builds a minimal PolicyServer for unit tests.
+// It sets the GVK so SetOwnerReference can resolve the owner.
+func newPolicyServer(name string, metricsPort *int32) *policiesv1.PolicyServer {
+ ps := &policiesv1.PolicyServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ UID: "test-uid",
+ },
+ Spec: policiesv1.PolicyServerSpec{
+ MetricsPort: metricsPort,
+ },
+ }
+ ps.SetGroupVersionKind(policiesv1.GroupVersion.WithKind("PolicyServer"))
+ return ps
+}
+
+func int32Ptr(v int32) *int32 { return &v }
+
+// findServicePort returns the ServicePort with the given name, or nil.
+func findServicePort(ports []corev1.ServicePort, name string) *corev1.ServicePort {
+ for i := range ports {
+ if ports[i].Name == name {
+ return &ports[i]
+ }
+ }
+ return nil
+}
+
+// TestUpdateServiceMetricsPortPriorityChain validates the 3-tier priority
+// chain for the metrics Service Port:
+//
+// CRD field (spec.metricsPort) > env var (PolicyServerMetricsPort) > constant (8080)
+//
+// It also verifies that the Service TargetPort is always fixed at the
+// controller-wide PolicyServerMetricsPort regardless of any CRD override.
+// This is intentional: when the OpenTelemetry sidecar mode is enabled, the
+// injected sidecar is a cluster-global singleton that always exports Prometheus
+// metrics on the global port — per-PolicyServer pod-side overrides are not
+// possible without reconfiguring the sidecar.
+//
+// It also verifies that when MetricsEnabled is false, no metrics port appears
+// on the Service.
+func TestUpdateServiceMetricsPortPriorityChain(t *testing.T) {
+ tests := []struct {
+ name string
+ metricsEnabled bool
+ policyServerMetricPort int32 // simulates env var / constant
+ crdMetricsPort *int32
+ expectMetricsPort bool
+ expectedPort int32 // Service Port
+ expectedTargetPort int32 // Service TargetPort
+ }{
+ {
+ name: "env var only (no CRD override)",
+ metricsEnabled: true,
+ policyServerMetricPort: 9090,
+ crdMetricsPort: nil,
+ expectMetricsPort: true,
+ expectedPort: 9090,
+ expectedTargetPort: 9090,
+ },
+ {
+ name: "CRD overrides env var",
+ metricsEnabled: true,
+ policyServerMetricPort: 9090,
+ crdMetricsPort: int32Ptr(9091),
+ expectMetricsPort: true,
+ expectedPort: 9091,
+ expectedTargetPort: 9090,
+ },
+ {
+ name: "constant default (8080)",
+ metricsEnabled: true,
+ policyServerMetricPort: constants.PolicyServerMetricsPort,
+ crdMetricsPort: nil,
+ expectMetricsPort: true,
+ expectedPort: constants.PolicyServerMetricsPort,
+ expectedTargetPort: constants.PolicyServerMetricsPort,
+ },
+ {
+ name: "metrics disabled — no metrics port on service",
+ metricsEnabled: false,
+ policyServerMetricPort: 9090,
+ crdMetricsPort: nil,
+ expectMetricsPort: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ scheme := newTestScheme()
+ fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ reconciler := &PolicyServerReconciler{
+ Client: fakeClient,
+ TelemetryConfiguration: TelemetryConfiguration{
+ MetricsEnabled: tc.metricsEnabled,
+ },
+ PolicyServerMetricsPort: tc.policyServerMetricPort,
+ DeploymentsNamespace: "kubewarden",
+ Scheme: scheme,
+ }
+
+ ps := newPolicyServer("test-ps", tc.crdMetricsPort)
+ svc := &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: ps.NameWithPrefix(),
+ Namespace: "kubewarden",
+ },
+ }
+
+ err := reconciler.updateService(svc, ps)
+ require.NoError(t, err)
+
+ // The "policy-server" port must always be present
+ webhookPort := findServicePort(svc.Spec.Ports, "policy-server")
+ require.NotNil(t, webhookPort, "policy-server port must always exist")
+ assert.Equal(t, int32(constants.PolicyServerServicePort), webhookPort.Port)
+
+ metricsPort := findServicePort(svc.Spec.Ports, "metrics")
+
+ if !tc.expectMetricsPort {
+ assert.Nil(t, metricsPort, "metrics port should not be present when metrics are disabled")
+ assert.Len(t, svc.Spec.Ports, 1, "only the policy-server port should exist")
+ return
+ }
+
+ require.NotNil(t, metricsPort, "metrics port must exist when metrics are enabled")
+ assert.Equal(t, tc.expectedPort, metricsPort.Port,
+ "Service Port should respect CRD > env var > constant priority")
+ assert.Equal(t, intstr.FromInt32(tc.expectedTargetPort), metricsPort.TargetPort,
+ "Service TargetPort must always equal the global PolicyServerMetricsPort (fixed regardless of spec.metricsPort, to avoid breaking OTel sidecar mode)")
+ assert.Equal(t, corev1.ProtocolTCP, metricsPort.Protocol)
+ assert.Len(t, svc.Spec.Ports, 2, "both policy-server and metrics ports should exist")
+ })
+ }
+}
diff --git a/internal/controller/policyserver_controller_test.go b/internal/controller/policyserver_controller_test.go
index e76591f57..f02912e53 100644
--- a/internal/controller/policyserver_controller_test.go
+++ b/internal/controller/policyserver_controller_test.go
@@ -22,6 +22,7 @@ import (
"errors"
"fmt"
"path/filepath"
+ "strconv"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -176,6 +177,8 @@ var _ = Describe("PolicyServer controller", func() {
By("checking the deployment spec")
Expect(deployment.Spec.Template.Spec).To(MatchFields(IgnoreExtras, Fields{
"Tolerations": BeEmpty(),
+ "HostNetwork": BeFalse(),
+ "DNSPolicy": Not(Equal(corev1.DNSClusterFirstWithHostNet)),
"SecurityContext": PointTo(MatchFields(IgnoreExtras, Fields{
"SELinuxOptions": BeNil(),
"WindowsOptions": BeNil(),
@@ -711,6 +714,69 @@ var _ = Describe("PolicyServer controller", func() {
}, timeout, pollInterval).Should(Succeed())
})
+ It("should use all custom ports", func() {
+ policyServer := policiesv1.NewPolicyServerFactory().
+ WithName(policyServerName).
+ WithWebhookPort(9443).
+ WithReadinessProbePort(9081).
+ WithMetricsPort(9080).
+ Build()
+ createPolicyServerAndWaitForItsService(ctx, policyServer)
+
+ By("checking all custom port env vars in the deployment")
+ Eventually(func() error {
+ deployment, err := getTestPolicyServerDeployment(ctx, policyServerName)
+ if err != nil {
+ return err
+ }
+ container := deployment.Spec.Template.Spec.Containers[0]
+ envMap := make(map[string]string)
+ for _, env := range container.Env {
+ envMap[env.Name] = env.Value
+ }
+
+ Expect(envMap).To(HaveKeyWithValue("KUBEWARDEN_PORT", strconv.Itoa(9443)))
+ Expect(envMap).To(HaveKeyWithValue("KUBEWARDEN_READINESS_PROBE_PORT", strconv.Itoa(9081)))
+ Expect(envMap).To(HaveKeyWithValue(constants.PolicyServerEnableMetricsEnvVar, "true"))
+
+ By("checking readiness probe port")
+ Expect(container.ReadinessProbe).ToNot(BeNil())
+ Expect(container.ReadinessProbe.HTTPGet).ToNot(BeNil())
+ Expect(container.ReadinessProbe.HTTPGet.Port).To(Equal(intstr.FromInt32(9081)))
+
+ return nil
+ }, timeout, pollInterval).Should(Succeed())
+
+ By("checking the service targetPort matches custom webhook port")
+ Eventually(func() error {
+ service, err := getTestPolicyServerService(ctx, policyServerName)
+ if err != nil {
+ return err
+ }
+ Expect(service.Spec.Ports).To(ContainElement(MatchFields(IgnoreExtras, Fields{
+ "Name": Equal("policy-server"),
+ "Port": Equal(int32(constants.PolicyServerServicePort)),
+ "TargetPort": Equal(intstr.FromInt32(9443)),
+ })))
+ return nil
+ }, timeout, pollInterval).Should(Succeed())
+
+ By("checking the service metrics port matches custom metrics port")
+ Eventually(func() error {
+ service, err := getTestPolicyServerService(ctx, policyServerName)
+ if err != nil {
+ return err
+ }
+ Expect(service.Spec.Ports).To(ContainElement(MatchFields(IgnoreExtras, Fields{
+ "Name": Equal("metrics"),
+ "Port": Equal(int32(9080)),
+ "TargetPort": Equal(intstr.FromInt32(constants.PolicyServerMetricsPort)),
+ "Protocol": Equal(corev1.ProtocolTCP),
+ })))
+ return nil
+ }, timeout, pollInterval).Should(Succeed())
+ })
+
It("should create deployment with owner reference", func() {
policyServer := policiesv1.NewPolicyServerFactory().WithName(policyServerName).Build()
createPolicyServerAndWaitForItsService(ctx, policyServer)
diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go
index 564583873..aaf1e61b7 100644
--- a/internal/controller/suite_test.go
+++ b/internal/controller/suite_test.go
@@ -18,6 +18,7 @@ package controller
import (
"context"
+ "os"
"path/filepath"
"testing"
"time"
@@ -34,6 +35,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
+ metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
policiesv1 "github.com/kubewarden/kubewarden-controller/api/policies/v1"
"github.com/kubewarden/kubewarden-controller/internal/certs"
@@ -66,8 +68,25 @@ var _ = SynchronizedBeforeSuite(func() []byte {
var ctx context.Context
ctx, cancel := context.WithCancel(context.TODO())
+ // Copy Kubewarden CRD files to a temp directory so envtest can load
+ // them. This is necessary to ensure that the latest CRDs version are available
+ // in the tests.
+ crdSourceDir := filepath.Join("..", "..", "charts", "kubewarden-crds", "templates")
+ crdTempDir, err := os.MkdirTemp("", "kubewarden-crds-*")
+ Expect(err).NotTo(HaveOccurred())
+ DeferCleanup(func() { os.RemoveAll(crdTempDir) })
+
+ crdFiles, err := filepath.Glob(filepath.Join(crdSourceDir, "policies.kubewarden.io_*.yaml"))
+ Expect(err).NotTo(HaveOccurred())
+ Expect(crdFiles).NotTo(BeEmpty(), "no Kubewarden CRD files found in %s", crdSourceDir)
+ for _, src := range crdFiles {
+ data, readErr := os.ReadFile(src)
+ Expect(readErr).NotTo(HaveOccurred())
+ Expect(os.WriteFile(filepath.Join(crdTempDir, filepath.Base(src)), data, 0o600)).To(Succeed())
+ }
+
testEnv := &envtest.Environment{
- CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
+ CRDDirectoryPaths: []string{crdTempDir},
ErrorIfCRDPathMissing: true,
}
@@ -86,6 +105,9 @@ var _ = SynchronizedBeforeSuite(func() []byte {
k8sManager, err := ctrl.NewManager(restConfig, ctrl.Options{
Scheme: scheme.Scheme,
+ Metrics: metricsserver.Options{
+ BindAddress: "0",
+ },
})
Expect(err).ToNot(HaveOccurred())
@@ -122,11 +144,15 @@ var _ = SynchronizedBeforeSuite(func() []byte {
Expect(err).ToNot(HaveOccurred())
err = (&PolicyServerReconciler{
- Client: k8sManager.GetClient(),
- Scheme: k8sManager.GetScheme(),
- DeploymentsNamespace: deploymentsNamespace,
- ClientCAConfigMapName: clientCAConfigMapName,
- ImagePullSecrets: []corev1.LocalObjectReference{{Name: reconcilerImagePullSecret}},
+ Client: k8sManager.GetClient(),
+ Scheme: k8sManager.GetScheme(),
+ DeploymentsNamespace: deploymentsNamespace,
+ ClientCAConfigMapName: clientCAConfigMapName,
+ ImagePullSecrets: []corev1.LocalObjectReference{{Name: reconcilerImagePullSecret}},
+ PolicyServerMetricsPort: constants.PolicyServerMetricsPort,
+ TelemetryConfiguration: TelemetryConfiguration{
+ MetricsEnabled: true,
+ },
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())