diff --git a/.changelog/4468.txt b/.changelog/4468.txt new file mode 100644 index 0000000000..2e28cf4656 --- /dev/null +++ b/.changelog/4468.txt @@ -0,0 +1,3 @@ +```release-note:bug +control-plane: Fixed bug in TerminatingGateway controller workflow for handling AdminPartition enabled cluster ACL policies for associated TerminatingGateway services ([NET-12039](https://hashicorp.atlassian.net/browse/NET-12039)). +``` diff --git a/.changelog/4470.txt b/.changelog/4470.txt new file mode 100644 index 0000000000..5d952e4a1e --- /dev/null +++ b/.changelog/4470.txt @@ -0,0 +1,3 @@ +```release-note:feature +control-plane: Add available Zone Kubernetes Topology Metadata for NodePort Service when Syncing Kubernetes Services. +``` \ No newline at end of file diff --git a/.changelog/4471.txt b/.changelog/4471.txt new file mode 100644 index 0000000000..1c1868e4b3 --- /dev/null +++ b/.changelog/4471.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +helm: Add support for `server.snapshotAgent.extraVolumes` and `server.snapshotAgent.extraEnvironmentVars` so privileged credentials can be configured for the snapshot agent. +``` \ No newline at end of file diff --git a/.changelog/4478.txt b/.changelog/4478.txt new file mode 100644 index 0000000000..be69e0ce7b --- /dev/null +++ b/.changelog/4478.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +security: Support running Consul under Pod Security Admissions (PSA) restricted mode. +``` diff --git a/acceptance/framework/consul/helm_cluster.go b/acceptance/framework/consul/helm_cluster.go index 54032b2978..f1ba9258a8 100644 --- a/acceptance/framework/consul/helm_cluster.go +++ b/acceptance/framework/consul/helm_cluster.go @@ -481,9 +481,17 @@ func (h *HelmCluster) CreatePortForwardTunnel(t *testing.T, remotePort int, rele releaseName = release[0] } serverPod := fmt.Sprintf("%s-consul-server-0", releaseName) + if releaseName == "" { + serverPod = "consul-server-0" + } return portforward.CreateTunnelToResourcePort(t, serverPod, remotePort, h.helmOptions.KubectlOptions, h.logger) } +// For instances when namespace is being manually set by the test and needs to be overridden. +func (h *HelmCluster) SetNamespace(ns string) { + h.helmOptions.KubectlOptions.Namespace = ns +} + func (h *HelmCluster) SetupConsulClient(t *testing.T, secure bool, release ...string) (client *api.Client, configAddress string) { t.Helper() @@ -514,10 +522,17 @@ func (h *HelmCluster) SetupConsulClient(t *testing.T, secure bool, release ...st // and will try to read the replication token from the federation secret. // In secondary servers, we don't create a bootstrap token since ACLs are only bootstrapped in the primary. // Instead, we provide a replication token that serves the role of the bootstrap token. - aclSecret, err := h.kubernetesClient.CoreV1().Secrets(namespace).Get(context.Background(), releaseName+"-consul-bootstrap-acl-token", metav1.GetOptions{}) + aclSecretName := releaseName + "-consul-bootstrap-acl-token" + if releaseName == "" { + aclSecretName = "consul-bootstrap-acl-token" + } + aclSecret, err := h.kubernetesClient.CoreV1().Secrets(namespace).Get(context.Background(), aclSecretName, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { - federationSecret := fmt.Sprintf("%s-consul-federation", releaseName) - aclSecret, err = h.kubernetesClient.CoreV1().Secrets(namespace).Get(context.Background(), federationSecret, metav1.GetOptions{}) + federationSecretName := fmt.Sprintf("%s-consul-federation", releaseName) + if releaseName == "" { + federationSecretName = "consul-federation" + } + aclSecret, err = h.kubernetesClient.CoreV1().Secrets(namespace).Get(context.Background(), federationSecretName, metav1.GetOptions{}) require.NoError(r, err) config.Token = string(aclSecret.Data["replicationToken"]) } else if err == nil { diff --git a/acceptance/framework/k8s/deploy.go b/acceptance/framework/k8s/deploy.go index 2db7224690..e9fe027c68 100644 --- a/acceptance/framework/k8s/deploy.go +++ b/acceptance/framework/k8s/deploy.go @@ -11,13 +11,14 @@ import ( "time" "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" v1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/util/yaml" + + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" ) // Deploy creates a Kubernetes deployment by applying configuration stored at filepath, @@ -141,7 +142,7 @@ func CheckStaticServerConnectionMultipleFailureMessages(t *testing.T, options *k require.Contains(r, output, expectedOutput) } else { require.Error(r, err) - require.Condition(r, func() bool { + require.Conditionf(r, func() bool { exists := false for _, msg := range failureMessages { if strings.Contains(output, msg) { @@ -149,7 +150,7 @@ func CheckStaticServerConnectionMultipleFailureMessages(t *testing.T, options *k } } return exists - }) + }, "expected failure messages %q but got %q", failureMessages, output) } }) } diff --git a/acceptance/tests/api-gateway/api_gateway_external_servers_test.go b/acceptance/tests/api-gateway/api_gateway_external_servers_test.go index 43755f5b40..a06309a18a 100644 --- a/acceptance/tests/api-gateway/api_gateway_external_servers_test.go +++ b/acceptance/tests/api-gateway/api_gateway_external_servers_test.go @@ -8,15 +8,16 @@ import ( "fmt" "testing" - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" - "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/types" gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" ) // TestAPIGateway_ExternalServers tests that connect works when using external servers. diff --git a/acceptance/tests/openshift/basic_openshift_test.go b/acceptance/tests/openshift/basic_openshift_test.go index ab39d9b3e6..7b399216de 100644 --- a/acceptance/tests/openshift/basic_openshift_test.go +++ b/acceptance/tests/openshift/basic_openshift_test.go @@ -27,69 +27,14 @@ import ( // Test that api gateway basic functionality works in a default installation and a secure installation. func TestOpenshift_Basic(t *testing.T) { cfg := suite.Config() - - cmd := exec.Command("helm", "repo", "add", "hashicorp", "https://helm.releases.hashicorp.com") - output, err := cmd.CombinedOutput() - require.NoErrorf(t, err, "failed to add hashicorp helm repo: %s", string(output)) - - // FUTURE for some reason NewHelmCluster creates a consul server pod that runs as root which - // isn't allowed in OpenShift. In order to test OpenShift properly, we have to call helm and k8s - // directly to bypass. Ideally we would just fix the framework that is running the pod as root. - cmd = exec.Command("kubectl", "create", "namespace", "consul") - output, err = cmd.CombinedOutput() - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - cmd = exec.Command("kubectl", "delete", "namespace", "consul") - output, err = cmd.CombinedOutput() - assert.NoErrorf(t, err, "failed to delete namespace: %s", string(output)) - }) - - require.NoErrorf(t, err, "failed to add hashicorp helm repo: %s", string(output)) - - cmd = exec.Command("kubectl", "create", "secret", "generic", - "consul-ent-license", - "--namespace", "consul", - `--from-literal=key=`+cfg.EnterpriseLicense) - output, err = cmd.CombinedOutput() - require.NoErrorf(t, err, "failed to add consul enterprise license: %s", string(output)) - - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - cmd = exec.Command("kubectl", "delete", "secret", "consul-ent-license") - output, err = cmd.CombinedOutput() - assert.NoErrorf(t, err, "failed to delete secret: %s", string(output)) - }) - - chartPath := "../../../charts/consul" - cmd = exec.Command("helm", "upgrade", "--install", "consul", chartPath, - "--namespace", "consul", - "--set", "global.name=consul", - "--set", "connectInject.enabled=true", - "--set", "connectInject.transparentProxy.defaultEnabled=false", - "--set", "connectInject.apiGateway.managedGatewayClass.mapPrivilegedContainerPorts=8000", - "--set", "global.acls.manageSystemACLs=true", - "--set", "global.tls.enabled=true", - "--set", "global.tls.enableAutoEncrypt=true", - "--set", "global.openshift.enabled=true", - "--set", "global.image="+cfg.ConsulImage, - "--set", "global.imageK8S="+cfg.ConsulK8SImage, - "--set", "global.imageConsulDataplane="+cfg.ConsulDataplaneImage, - "--set", "global.enterpriseLicense.secretName=consul-ent-license", - "--set", "global.enterpriseLicense.secretKey=key", - ) - output, err = cmd.CombinedOutput() - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - cmd := exec.Command("helm", "uninstall", "consul", "--namespace", "consul") - output, err := cmd.CombinedOutput() - require.NoErrorf(t, err, "failed to uninstall consul: %s", string(output)) - }) - - require.NoErrorf(t, err, "failed to install consul: %s", string(output)) + newOpenshiftCluster(t, cfg, true, false) // this is normally called by the environment, but because we have to bypass we have to call it explicitly logf.SetLogger(logr.New(nil)) logger.Log(t, "creating resources for OpenShift test") - cmd = exec.Command("kubectl", "apply", "-f", "../fixtures/cases/openshift/basic") - output, err = cmd.CombinedOutput() + cmd := exec.Command("kubectl", "apply", "-f", "../fixtures/cases/openshift/basic") + output, err := cmd.CombinedOutput() helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { cmd := exec.Command("kubectl", "delete", "-f", "../fixtures/cases/openshift/basic") output, err := cmd.CombinedOutput() diff --git a/acceptance/tests/openshift/openshift_test_runner.go b/acceptance/tests/openshift/openshift_test_runner.go new file mode 100644 index 0000000000..3b8159e841 --- /dev/null +++ b/acceptance/tests/openshift/openshift_test_runner.go @@ -0,0 +1,72 @@ +package openshift + +import ( + "github.com/hashicorp/consul-k8s/acceptance/framework/config" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "os/exec" + "strconv" + "testing" +) + +func newOpenshiftCluster(t *testing.T, cfg *config.TestConfig, secure, namespaceMirroring bool) { + cmd := exec.Command("helm", "repo", "add", "hashicorp", "https://helm.releases.hashicorp.com") + output, err := cmd.CombinedOutput() + require.NoErrorf(t, err, "failed to add hashicorp helm repo: %s", string(output)) + + // FUTURE for some reason NewHelmCluster creates a consul server pod that runs as root which + // isn't allowed in OpenShift. In order to test OpenShift properly, we have to call helm and k8s + // directly to bypass. Ideally we would just fix the framework that is running the pod as root. + cmd = exec.Command("kubectl", "create", "namespace", "consul") + output, err = cmd.CombinedOutput() + helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { + cmd = exec.Command("kubectl", "delete", "namespace", "consul") + output, err = cmd.CombinedOutput() + assert.NoErrorf(t, err, "failed to delete namespace: %s", string(output)) + }) + + require.NoErrorf(t, err, "failed to add hashicorp helm repo: %s", string(output)) + + cmd = exec.Command("kubectl", "create", "secret", "generic", + "consul-ent-license", + "--namespace", "consul", + `--from-literal=key=`+cfg.EnterpriseLicense) + output, err = cmd.CombinedOutput() + require.NoErrorf(t, err, "failed to add consul enterprise license: %s", string(output)) + + helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { + cmd = exec.Command("kubectl", "delete", "secret", "consul-ent-license", "--namespace", "consul") + output, err = cmd.CombinedOutput() + assert.NoErrorf(t, err, "failed to delete secret: %s", string(output)) + }) + + chartPath := "../../../charts/consul" + cmd = exec.Command("helm", "upgrade", "--install", "consul", chartPath, + "--namespace", "consul", + "--set", "global.name=consul", + "--set", "connectInject.enabled=true", + "--set", "connectInject.transparentProxy.defaultEnabled=false", + "--set", "connectInject.apiGateway.managedGatewayClass.mapPrivilegedContainerPorts=8000", + "--set", "global.acls.manageSystemACLs="+strconv.FormatBool(secure), + "--set", "global.tls.enabled="+strconv.FormatBool(secure), + "--set", "global.tls.enableAutoEncrypt="+strconv.FormatBool(secure), + "--set", "global.enableConsulNamespaces="+strconv.FormatBool(namespaceMirroring), + "--set", "global.consulNamespaces.mirroringK8S="+strconv.FormatBool(namespaceMirroring), + "--set", "global.openshift.enabled=true", + "--set", "global.image="+cfg.ConsulImage, + "--set", "global.imageK8S="+cfg.ConsulK8SImage, + "--set", "global.imageConsulDataplane="+cfg.ConsulDataplaneImage, + "--set", "global.enterpriseLicense.secretName=consul-ent-license", + "--set", "global.enterpriseLicense.secretKey=key", + ) + + output, err = cmd.CombinedOutput() + helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { + cmd := exec.Command("helm", "uninstall", "consul", "--namespace", "consul") + output, err := cmd.CombinedOutput() + require.NoErrorf(t, err, "failed to uninstall consul: %s", string(output)) + }) + + require.NoErrorf(t, err, "failed to install consul: %s", string(output)) +} diff --git a/acceptance/tests/openshift/tenancy_openshift_test.go b/acceptance/tests/openshift/tenancy_openshift_test.go new file mode 100644 index 0000000000..bc263046f9 --- /dev/null +++ b/acceptance/tests/openshift/tenancy_openshift_test.go @@ -0,0 +1,470 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package openshift + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/pem" + "fmt" + terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/config" + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" + "github.com/stretchr/testify/assert" + "math/big" + "os/exec" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/hashicorp/consul/api" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "path" + "testing" + "time" + + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/types" + gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" +) + +var ( + gatewayGroup = gwv1beta1.Group(gwv1beta1.GroupVersion.Group) + consulGroup = gwv1beta1.Group(v1alpha1.GroupVersion.Group) + gatewayKind = gwv1beta1.Kind("Gateway") + serviceKind = gwv1beta1.Kind("Service") + secretKind = gwv1beta1.Kind("Secret") + meshServiceKind = gwv1beta1.Kind("MeshService") + httpRouteKind = gwv1beta1.Kind("HTTPRoute") + tcpRouteKind = gwv1beta1.Kind("TCPRoute") + gatewayClassControllerName = "consul.hashicorp.com/gateway-controller" +) + +// This is a light copy of api_gateway_tenancy_test with modifications to run on openshift. +func TestOpenshift_APIGateway_Tenancy(t *testing.T) { + cases := []struct { + secure bool + namespaceMirroring bool + }{ + { + secure: false, + namespaceMirroring: false, + }, + { + secure: true, + namespaceMirroring: false, + }, + { + secure: false, + namespaceMirroring: true, + }, + { + secure: true, + namespaceMirroring: true, + }, + } + for _, c := range cases { + name := fmt.Sprintf("secure: %t, namespaces: %t", c.secure, c.namespaceMirroring) + t.Run(name, func(t *testing.T) { + cfg := suite.Config() + ctx := suite.Environment().DefaultContext(t) + + if !cfg.EnableEnterprise && c.namespaceMirroring { + t.Skipf("skipping this test because -enable-enterprise is not set") + } + + newOpenshiftCluster(t, cfg, c.secure, c.namespaceMirroring) + + serviceNamespace, serviceK8SOptions := createNamespace(t, ctx, cfg) + certificateNamespace, certificateK8SOptions := createNamespace(t, ctx, cfg) + gatewayNamespace, gatewayK8SOptions := createNamespace(t, ctx, cfg) + routeNamespace, routeK8SOptions := createNamespace(t, ctx, cfg) + + logger.Logf(t, "creating target server in %s namespace", serviceNamespace) + k8s.DeployKustomize(t, serviceK8SOptions, cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + + logger.Logf(t, "creating certificate resources in %s namespace", certificateNamespace) + applyFixture(t, cfg, certificateK8SOptions, "cases/api-gateways/certificate") + + logger.Logf(t, "creating gateway in %s namespace", gatewayNamespace) + applyFixture(t, cfg, gatewayK8SOptions, "cases/api-gateways/gateway") + + logger.Logf(t, "creating route resources in %s namespace", routeNamespace) + applyFixture(t, cfg, routeK8SOptions, "cases/api-gateways/httproute") + + // patch certificate with data + logger.Log(t, "patching certificate with generated data") + certificate := generateCertificate(t, nil, "gateway.test.local") + k8s.RunKubectl(t, certificateK8SOptions, "patch", "secret", "certificate", "-p", fmt.Sprintf(`{"data":{"tls.crt":"%s","tls.key":"%s"}}`, base64.StdEncoding.EncodeToString(certificate.CertPEM), base64.StdEncoding.EncodeToString(certificate.PrivateKeyPEM)), "--type=merge") + + // patch the resources to reference each other + logger.Log(t, "patching gateway to certificate") + k8s.RunKubectl(t, gatewayK8SOptions, "patch", "gateway", "gateway", "-p", fmt.Sprintf(`{"spec":{"listeners":[{"protocol":"HTTPS","port":8082,"name":"https","tls":{"certificateRefs":[{"name":"certificate","namespace":"%s"}]},"allowedRoutes":{"namespaces":{"from":"All"}}}]}}`, certificateNamespace), "--type=merge") + + logger.Log(t, "patching route to target server") + k8s.RunKubectl(t, routeK8SOptions, "patch", "httproute", "route", "-p", fmt.Sprintf(`{"spec":{"rules":[{"backendRefs":[{"name":"static-server","namespace":"%s","port":80}]}]}}`, serviceNamespace), "--type=merge") + + logger.Log(t, "patching route to gateway") + k8s.RunKubectl(t, routeK8SOptions, "patch", "httproute", "route", "-p", fmt.Sprintf(`{"spec":{"parentRefs":[{"name":"gateway","namespace":"%s"}]}}`, gatewayNamespace), "--type=merge") + + // Grab a kubernetes and consul client so that we can verify binding + // behavior prior to issuing requests through the gateway. + k8sClient := ctx.ControllerRuntimeClient(t) + + consulCluster := consul.NewHelmCluster(t, map[string]string{}, ctx, cfg, "") + consulCluster.SetNamespace("consul") + consulClient, _ := consulCluster.SetupConsulClient(t, c.secure) + + retryCheck(t, 120, func(r *retry.R) { + var gateway gwv1beta1.Gateway + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: "gateway", Namespace: gatewayNamespace}, &gateway) + require.NoError(r, err) + + // check our statuses + checkStatusCondition(r, gateway.Status.Conditions, trueCondition("Accepted", "Accepted")) + checkStatusCondition(r, gateway.Status.Conditions, falseCondition("Programmed", "Pending")) + // we expect a sync error here since dropping the listener means the gateway is now invalid + checkStatusCondition(r, gateway.Status.Conditions, falseCondition("Synced", "SyncError")) + + require.Len(r, gateway.Status.Listeners, 1) + require.EqualValues(r, 1, gateway.Status.Listeners[0].AttachedRoutes) + checkStatusCondition(r, gateway.Status.Listeners[0].Conditions, trueCondition("Accepted", "Accepted")) + checkStatusCondition(r, gateway.Status.Listeners[0].Conditions, falseCondition("Conflicted", "NoConflicts")) + checkStatusCondition(r, gateway.Status.Listeners[0].Conditions, falseCondition("ResolvedRefs", "RefNotPermitted")) + }) + + // since the sync operation should fail above, check that we don't have the entry in Consul. + checkConsulNotExists(t, consulClient, api.APIGateway, "gateway", namespaceForConsul(c.namespaceMirroring, gatewayNamespace)) + + // route failure + retryCheck(t, 60, func(r *retry.R) { + var httproute gwv1beta1.HTTPRoute + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: "route", Namespace: routeNamespace}, &httproute) + require.NoError(r, err) + + require.Len(r, httproute.Status.Parents, 1) + require.EqualValues(r, gatewayClassControllerName, httproute.Status.Parents[0].ControllerName) + require.EqualValues(r, "gateway", httproute.Status.Parents[0].ParentRef.Name) + require.NotNil(r, httproute.Status.Parents[0].ParentRef.Namespace) + require.EqualValues(r, gatewayNamespace, *httproute.Status.Parents[0].ParentRef.Namespace) + checkStatusCondition(r, httproute.Status.Parents[0].Conditions, trueCondition("Accepted", "Accepted")) + checkStatusCondition(r, httproute.Status.Parents[0].Conditions, falseCondition("ResolvedRefs", "RefNotPermitted")) + }) + + // we only sync validly referenced certificates over, so check to make sure it is not created. + checkConsulNotExists(t, consulClient, api.FileSystemCertificate, "certificate", namespaceForConsul(c.namespaceMirroring, certificateNamespace)) + + // now create reference grants + createReferenceGrant(t, cfg, k8sClient, "gateway-certificate", gatewayNamespace, certificateNamespace) + createReferenceGrant(t, cfg, k8sClient, "route-service", routeNamespace, serviceNamespace) + + // gateway updated with references allowed + retryCheck(t, 60, func(r *retry.R) { + var gateway gwv1beta1.Gateway + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: "gateway", Namespace: gatewayNamespace}, &gateway) + require.NoError(r, err) + + // check our statuses + checkStatusCondition(r, gateway.Status.Conditions, trueCondition("Accepted", "Accepted")) + checkStatusCondition(r, gateway.Status.Conditions, trueCondition("Programmed", "Programmed")) + checkStatusCondition(r, gateway.Status.Conditions, trueCondition("Synced", "Synced")) + require.Len(r, gateway.Status.Listeners, 1) + require.EqualValues(r, 1, gateway.Status.Listeners[0].AttachedRoutes) + checkStatusCondition(r, gateway.Status.Listeners[0].Conditions, trueCondition("Accepted", "Accepted")) + checkStatusCondition(r, gateway.Status.Listeners[0].Conditions, falseCondition("Conflicted", "NoConflicts")) + checkStatusCondition(r, gateway.Status.Listeners[0].Conditions, trueCondition("ResolvedRefs", "ResolvedRefs")) + }) + + // check the Consul gateway is updated, with the listener. + retryCheck(t, 30, func(r *retry.R) { + entry, _, err := consulClient.ConfigEntries().Get(api.APIGateway, "gateway", &api.QueryOptions{ + Namespace: namespaceForConsul(c.namespaceMirroring, gatewayNamespace), + }) + require.NoError(r, err) + gateway := entry.(*api.APIGatewayConfigEntry) + + require.EqualValues(r, "gateway", gateway.Meta["k8s-name"]) + require.EqualValues(r, gatewayNamespace, gateway.Meta["k8s-namespace"]) + require.Len(r, gateway.Listeners, 1) + checkConsulStatusCondition(t, gateway.Status.Conditions, trueConsulCondition("Accepted", "Accepted")) + checkConsulStatusCondition(t, gateway.Status.Conditions, trueConsulCondition("ResolvedRefs", "ResolvedRefs")) + }) + + // route updated with gateway and services allowed + retryCheck(t, 30, func(r *retry.R) { + var httproute gwv1beta1.HTTPRoute + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: "route", Namespace: routeNamespace}, &httproute) + require.NoError(r, err) + + require.Len(r, httproute.Status.Parents, 1) + require.EqualValues(r, gatewayClassControllerName, httproute.Status.Parents[0].ControllerName) + require.EqualValues(r, "gateway", httproute.Status.Parents[0].ParentRef.Name) + require.NotNil(r, httproute.Status.Parents[0].ParentRef.Namespace) + require.EqualValues(r, gatewayNamespace, *httproute.Status.Parents[0].ParentRef.Namespace) + checkStatusCondition(r, httproute.Status.Parents[0].Conditions, trueCondition("Accepted", "Accepted")) + checkStatusCondition(r, httproute.Status.Parents[0].Conditions, trueCondition("ResolvedRefs", "ResolvedRefs")) + }) + + // now check to make sure that the route is updated and valid + retryCheck(t, 30, func(r *retry.R) { + // since we're not bound, check to make sure that the route doesn't target the gateway in Consul. + entry, _, err := consulClient.ConfigEntries().Get(api.HTTPRoute, "route", &api.QueryOptions{ + Namespace: namespaceForConsul(c.namespaceMirroring, routeNamespace), + }) + require.NoError(r, err) + route := entry.(*api.HTTPRouteConfigEntry) + + require.EqualValues(r, "route", route.Meta["k8s-name"]) + require.EqualValues(r, routeNamespace, route.Meta["k8s-namespace"]) + require.Len(r, route.Parents, 1) + }) + + // and check to make sure that the certificate exists + retryCheck(t, 30, func(r *retry.R) { + entry, _, err := consulClient.ConfigEntries().Get(api.FileSystemCertificate, "certificate", &api.QueryOptions{ + Namespace: namespaceForConsul(c.namespaceMirroring, certificateNamespace), + }) + require.NoError(r, err) + certificate := entry.(*api.FileSystemCertificateConfigEntry) + + require.EqualValues(r, "certificate", certificate.Meta["k8s-name"]) + require.EqualValues(r, certificateNamespace, certificate.Meta["k8s-namespace"]) + }) + }) + } +} + +func retryCheck(t *testing.T, count int, fn func(r *retry.R)) { + retryCheckWithWait(t, count, 2*time.Second, fn) +} + +func retryCheckWithWait(t *testing.T, count int, wait time.Duration, fn func(r *retry.R)) { + t.Helper() + + counter := &retry.Counter{Count: count, Wait: wait} + retry.RunWith(counter, t, fn) +} + +func createReferenceGrant(t *testing.T, cfg *config.TestConfig, client client.Client, name, from, to string) { + t.Helper() + + // we just create a reference grant for all combinations in the given namespaces + + require.NoError(t, client.Create(context.Background(), &gwv1beta1.ReferenceGrant{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: to, + }, + Spec: gwv1beta1.ReferenceGrantSpec{ + From: []gwv1beta1.ReferenceGrantFrom{{ + Group: gatewayGroup, + Kind: gatewayKind, + Namespace: gwv1beta1.Namespace(from), + }, { + Group: gatewayGroup, + Kind: httpRouteKind, + Namespace: gwv1beta1.Namespace(from), + }, { + Group: gatewayGroup, + Kind: tcpRouteKind, + Namespace: gwv1beta1.Namespace(from), + }}, + To: []gwv1beta1.ReferenceGrantTo{{ + Group: gatewayGroup, + Kind: gatewayKind, + }, { + Kind: serviceKind, + }, { + Group: consulGroup, + Kind: meshServiceKind, + }, { + Kind: secretKind, + }}, + }, + })) + + helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { + cmd := exec.Command("kubectl", "delete", "referencegrant", name, "-n", to) + output, err := cmd.CombinedOutput() + assert.NoErrorf(t, err, "failed to delete resources: %s", string(output)) + }) +} + +func namespaceForConsul(namespaceMirroringEnabled bool, namespace string) string { + if namespaceMirroringEnabled { + return namespace + } + return "" +} + +func applyFixture(t *testing.T, cfg *config.TestConfig, k8sOptions *terratestk8s.KubectlOptions, fixture string) { + t.Helper() + + out, err := k8s.RunKubectlAndGetOutputE(t, k8sOptions, "apply", "-k", path.Join("../fixtures", fixture)) + require.NoError(t, err, out) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { + fmt.Println(k8s.RunKubectlAndGetOutputE(t, k8sOptions, "delete", "-k", path.Join("../fixtures", fixture))) + }) +} + +func createNamespace(t *testing.T, ctx environment.TestContext, cfg *config.TestConfig) (string, *terratestk8s.KubectlOptions) { + t.Helper() + + namespace := helpers.RandomName() + + logger.Logf(t, "creating Kubernetes namespace %s", namespace) + k8s.RunKubectl(t, ctx.KubectlOptions(t), "create", "ns", namespace) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { + cmd := exec.Command("kubectl", "delete", "namespace", namespace) + output, err := cmd.CombinedOutput() + assert.NoErrorf(t, err, "failed to delete resources: %s", string(output)) + //k8s.RunKubectl(t, ctx.KubectlOptions(t), "delete", "ns", namespace) + }) + + return namespace, &terratestk8s.KubectlOptions{ + ContextName: ctx.KubectlOptions(t).ContextName, + ConfigPath: ctx.KubectlOptions(t).ConfigPath, + Namespace: namespace, + } +} + +func checkStatusCondition(t require.TestingT, conditions []metav1.Condition, toCheck metav1.Condition) { + for _, c := range conditions { + if c.Type == toCheck.Type { + require.EqualValues(t, toCheck.Reason, c.Reason) + require.EqualValues(t, toCheck.Status, c.Status) + return + } + } + + t.Errorf("expected condition not found: %s", toCheck.Type) +} + +func trueCondition(conditionType, reason string) metav1.Condition { + return metav1.Condition{ + Type: conditionType, + Reason: reason, + Status: metav1.ConditionTrue, + } +} + +func falseCondition(conditionType, reason string) metav1.Condition { + return metav1.Condition{ + Type: conditionType, + Reason: reason, + Status: metav1.ConditionFalse, + } +} + +type certificateInfo struct { + Cert *x509.Certificate + PrivateKey *rsa.PrivateKey + CertPEM []byte + PrivateKeyPEM []byte +} + +func generateCertificate(t *testing.T, ca *certificateInfo, commonName string) *certificateInfo { + t.Helper() + + bits := 2048 + privateKey, err := rsa.GenerateKey(rand.Reader, bits) + require.NoError(t, err) + + usage := x509.KeyUsageDigitalSignature + if ca == nil { + usage = x509.KeyUsageCertSign + } + + expiration := time.Now().AddDate(10, 0, 0) + cert := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{"Testing, INC."}, + Country: []string{"US"}, + Province: []string{""}, + Locality: []string{"San Francisco"}, + StreetAddress: []string{"Fake Street"}, + PostalCode: []string{"11111"}, + CommonName: commonName, + }, + IsCA: ca == nil, + NotBefore: time.Now().Add(-10 * time.Minute), + NotAfter: expiration, + SubjectKeyId: []byte{1, 2, 3, 4, 6}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: usage, + BasicConstraintsValid: true, + } + caCert := cert + if ca != nil { + caCert = ca.Cert + } + caPrivateKey := privateKey + if ca != nil { + caPrivateKey = ca.PrivateKey + } + data, err := x509.CreateCertificate(rand.Reader, cert, caCert, &privateKey.PublicKey, caPrivateKey) + require.NoError(t, err) + + certBytes := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: data, + }) + + privateKeyBytes := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(privateKey), + }) + + return &certificateInfo{ + Cert: cert, + CertPEM: certBytes, + PrivateKey: privateKey, + PrivateKeyPEM: privateKeyBytes, + } +} + +func checkConsulNotExists(t *testing.T, client *api.Client, kind, name string, namespace ...string) { + t.Helper() + + opts := &api.QueryOptions{} + if len(namespace) != 0 { + opts.Namespace = namespace[0] + } + + retryCheck(t, 60, func(r *retry.R) { + _, _, err := client.ConfigEntries().Get(kind, name, opts) + require.Error(r, err) + require.EqualError(r, err, fmt.Sprintf("Unexpected response code: 404 (Config entry not found for %q / %q)", kind, name)) + }) +} + +func checkConsulStatusCondition(t require.TestingT, conditions []api.Condition, toCheck api.Condition) { + for _, c := range conditions { + if c.Type == toCheck.Type { + require.EqualValues(t, toCheck.Reason, c.Reason) + require.EqualValues(t, toCheck.Status, c.Status) + return + } + } + + t.Errorf("expected condition not found: %s", toCheck.Type) +} + +func trueConsulCondition(conditionType, reason string) api.Condition { + return api.Condition{ + Type: conditionType, + Reason: reason, + Status: "True", + } +} diff --git a/charts/consul/templates/mesh-gateway-deployment.yaml b/charts/consul/templates/mesh-gateway-deployment.yaml index c6c33966ee..1ff9c81e41 100644 --- a/charts/consul/templates/mesh-gateway-deployment.yaml +++ b/charts/consul/templates/mesh-gateway-deployment.yaml @@ -126,6 +126,15 @@ spec: {{- end }} initContainers: - name: mesh-gateway-init + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL image: {{ .Values.global.imageK8S }} {{ template "consul.imagePullPolicy" . }} env: @@ -188,6 +197,11 @@ spec: image: {{ .Values.global.imageConsulDataplane | quote }} {{ template "consul.imagePullPolicy" . }} securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault capabilities: {{ if not .Values.meshGateway.hostNetwork}} drop: diff --git a/charts/consul/templates/server-statefulset.yaml b/charts/consul/templates/server-statefulset.yaml index f8cb9b4def..208382f162 100644 --- a/charts/consul/templates/server-statefulset.yaml +++ b/charts/consul/templates/server-statefulset.yaml @@ -315,6 +315,22 @@ spec: {{- end }} {{- end }} {{- end }} + {{- range .Values.server.snapshotAgent.extraVolumes }} + - name: userconfig-snapshot-{{ .name }} + {{ .type }}: + {{- if (eq .type "configMap") }} + name: {{ .name }} + {{- else if (eq .type "secret") }} + secretName: {{ .name }} + {{- end }} + {{- with .items }} + items: + {{- range . }} + - key: {{.key}} + path: {{.path}} + {{- end }} + {{- end }} + {{- end }} {{- if .Values.server.priorityClassName }} priorityClassName: {{ .Values.server.priorityClassName | quote }} {{- end }} @@ -686,6 +702,7 @@ spec: value: /consul/license/{{ .Values.global.enterpriseLicense.secretKey }} {{- end }} {{- end }} + {{- include "consul.extraEnvironmentVars" .Values.server.snapshotAgent | nindent 12 }} command: - "/bin/sh" - "-ec" @@ -733,6 +750,11 @@ spec: mountPath: /consul/tls/ca readOnly: true {{- end }} + {{- range .Values.server.snapshotAgent.extraVolumes }} + - name: userconfig-snapshot-{{ .name }} + readOnly: true + mountPath: /consul/userconfig/{{ .name }} + {{- end }} {{- with .Values.server.snapshotAgent.resources }} resources: {{- toYaml . | nindent 12 }} diff --git a/charts/consul/values.yaml b/charts/consul/values.yaml index 13615e716c..71ecb7a0ea 100644 --- a/charts/consul/values.yaml +++ b/charts/consul/values.yaml @@ -1438,6 +1438,35 @@ server: # @type: string caCert: null + # A list of extra environment variables to set on the snapshot agent specifically + # This could be used to configure credentials that the rest of the + # stateful set would not need access to, like GOOGLE_APPLICATION_CREDENTIALS + # @type: map + extraEnvironmentVars: { } + + # A list of extra volumes to mount onto the snapshot agent. This + # is useful for bringing in extra data that only the snapshot agent needs access + # to. Like storage credentials. The value of this should be a list of objects. + # + # Example: + # + # ```yaml + # extraVolumes: + # - type: secret + # name: storage-credentials + # ``` + # + # Each object supports the following keys: + # + # - `type` - Type of the volume, must be one of "configMap" or "secret". Case sensitive. + # + # - `name` - Name of the configMap or secret to be mounted. This also controls + # the path that it is mounted to. The volume will be mounted to `/consul/userconfig/`. + # + # The snapshot agent will not attempt to load any volumes passed in this stanza + # @type: array + extraVolumes: [ ] + # [Enterprise Only] Added in Consul 1.8, the audit object allow users to enable auditing # and configure a sink and filters for their audit logs. Please refer to # [audit logs](https://developer.hashicorp.com/consul/docs/enterprise/audit-logging) documentation diff --git a/control-plane/api-gateway/gatekeeper/dataplane.go b/control-plane/api-gateway/gatekeeper/dataplane.go index fb488509c1..d9b36b6e9e 100644 --- a/control-plane/api-gateway/gatekeeper/dataplane.go +++ b/control-plane/api-gateway/gatekeeper/dataplane.go @@ -10,6 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" + gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" "github.com/hashicorp/consul-k8s/control-plane/api-gateway/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" @@ -27,7 +28,7 @@ const ( volumeNameForTLSCerts = "consul-gateway-tls-certificates" ) -func consulDataplaneContainer(metrics common.MetricsConfig, config common.HelmConfig, gcc v1alpha1.GatewayClassConfig, name, namespace string, mounts []corev1.VolumeMount) (corev1.Container, error) { +func consulDataplaneContainer(metrics common.MetricsConfig, config common.HelmConfig, gcc v1alpha1.GatewayClassConfig, gateway gwv1beta1.Gateway, mounts []corev1.VolumeMount) (corev1.Container, error) { // Extract the service account token's volume mount. var ( err error @@ -38,7 +39,7 @@ func consulDataplaneContainer(metrics common.MetricsConfig, config common.HelmCo bearerTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" } - args, err := getDataplaneArgs(metrics, namespace, config, bearerTokenFile, name) + args, err := getDataplaneArgs(metrics, gateway.Namespace, config, bearerTokenFile, gateway.Name) if err != nil { return corev1.Container{}, err } @@ -54,7 +55,7 @@ func consulDataplaneContainer(metrics common.MetricsConfig, config common.HelmCo } container := corev1.Container{ - Name: name, + Name: gateway.Name, Image: config.ImageDataplane, ImagePullPolicy: corev1.PullPolicy(config.GlobalImagePullPolicy), @@ -110,19 +111,33 @@ func consulDataplaneContainer(metrics common.MetricsConfig, config common.HelmCo container.Resources = *gcc.Spec.DeploymentSpec.Resources } - // If running in vanilla K8s, run as root to allow binding to privileged ports; - // otherwise, allow the user to be assigned by OpenShift. + // For backwards-compatibility, we allow privilege escalation if port mapping + // is disabled and the Gateway utilizes a privileged port (< 1024). + usingPrivilegedPorts := false + if gcc.Spec.MapPrivilegedContainerPorts == 0 { + for _, listener := range gateway.Spec.Listeners { + if listener.Port < 1024 { + usingPrivilegedPorts = true + break + } + } + } + container.SecurityContext = &corev1.SecurityContext{ - ReadOnlyRootFilesystem: ptr.To(true), + AllowPrivilegeEscalation: ptr.To(usingPrivilegedPorts), + ReadOnlyRootFilesystem: ptr.To(true), + RunAsNonRoot: ptr.To(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, // Drop any Linux capabilities you'd get as root other than NET_BIND_SERVICE. + // NET_BIND_SERVICE is a requirement for consul-dataplane, even though we don't + // bind to privileged ports. Capabilities: &corev1.Capabilities{ Add: []corev1.Capability{netBindCapability}, Drop: []corev1.Capability{allCapabilities}, }, } - if !config.EnableOpenShift { - container.SecurityContext.RunAsUser = ptr.To(int64(0)) - } return container, nil } diff --git a/control-plane/api-gateway/gatekeeper/deployment.go b/control-plane/api-gateway/gatekeeper/deployment.go index 9519a42d74..1374d7eef9 100644 --- a/control-plane/api-gateway/gatekeeper/deployment.go +++ b/control-plane/api-gateway/gatekeeper/deployment.go @@ -109,7 +109,7 @@ func (g *Gatekeeper) deployment(gateway gwv1beta1.Gateway, gcc v1alpha1.GatewayC volumes, mounts := volumesAndMounts(gateway) - container, err := consulDataplaneContainer(metrics, config, gcc, gateway.Name, gateway.Namespace, mounts) + container, err := consulDataplaneContainer(metrics, config, gcc, gateway, mounts) if err != nil { return nil, err } diff --git a/control-plane/api-gateway/gatekeeper/init.go b/control-plane/api-gateway/gatekeeper/init.go index e8a17dc8ea..7e17f1c1fc 100644 --- a/control-plane/api-gateway/gatekeeper/init.go +++ b/control-plane/api-gateway/gatekeeper/init.go @@ -213,6 +213,9 @@ func (g *Gatekeeper) initContainer(config common.HelmConfig, name, namespace str }, AllowPrivilegeEscalation: ptr.To(false), ReadOnlyRootFilesystem: ptr.To(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, } return container, nil diff --git a/control-plane/catalog/to-consul/resource.go b/control-plane/catalog/to-consul/resource.go index 9e48305a30..dd42b6d2f5 100644 --- a/control-plane/catalog/to-consul/resource.go +++ b/control-plane/catalog/to-consul/resource.go @@ -662,7 +662,7 @@ func (t *ServiceResource) generateRegistrations(key string) { r.Service = &rs r.Service.ID = serviceID(r.Service.Service, endpointAddr) r.Service.Address = address.Address - + r.Service.Meta = updateServiceMeta(baseService.Meta, endpoint) t.consulMap[key] = append(t.consulMap[key], &r) // Only consider the first address that matches. In some cases // there will be multiple addresses like when using AWS CNI. @@ -683,7 +683,7 @@ func (t *ServiceResource) generateRegistrations(key string) { r.Service = &rs r.Service.ID = serviceID(r.Service.Service, endpointAddr) r.Service.Address = address.Address - + r.Service.Meta = updateServiceMeta(baseService.Meta, endpoint) t.consulMap[key] = append(t.consulMap[key], &r) // Only consider the first address that matches. In some cases // there will be multiple addresses like when using AWS CNI. @@ -778,23 +778,7 @@ func (t *ServiceResource) registerServiceInstance( r.Service.ID = serviceID(r.Service.Service, addr) r.Service.Address = addr r.Service.Port = epPort - r.Service.Meta = make(map[string]string) - // Deepcopy baseService.Meta into r.Service.Meta as baseService is shared - // between all nodes of a service - for k, v := range baseService.Meta { - r.Service.Meta[k] = v - } - if endpoint.TargetRef != nil { - r.Service.Meta[ConsulK8SRefValue] = endpoint.TargetRef.Name - r.Service.Meta[ConsulK8SRefKind] = endpoint.TargetRef.Kind - } - if endpoint.NodeName != nil { - r.Service.Meta[ConsulK8SNodeName] = *endpoint.NodeName - } - if endpoint.Zone != nil { - r.Service.Meta[ConsulK8STopologyZone] = *endpoint.Zone - } - + r.Service.Meta = updateServiceMeta(baseService.Meta, endpoint) r.Check = &consulapi.AgentCheck{ CheckID: consulHealthCheckID(endpointSlice.Namespace, serviceID(r.Service.Service, addr)), Name: consulKubernetesCheckName, @@ -1110,3 +1094,25 @@ func getServiceWeight(weight string) (int, error) { return weightI, nil } + +// deepcopy baseService.Meta into r.Service.Meta as baseService is shared between all nodes of a service. +// update service meta with k8s topology info. +func updateServiceMeta(baseServiceMeta map[string]string, endpoint discoveryv1.Endpoint) map[string]string { + + serviceMeta := make(map[string]string) + + for k, v := range baseServiceMeta { + serviceMeta[k] = v + } + if endpoint.TargetRef != nil { + serviceMeta[ConsulK8SRefValue] = endpoint.TargetRef.Name + serviceMeta[ConsulK8SRefKind] = endpoint.TargetRef.Kind + } + if endpoint.NodeName != nil { + serviceMeta[ConsulK8SNodeName] = *endpoint.NodeName + } + if endpoint.Zone != nil { + serviceMeta[ConsulK8STopologyZone] = *endpoint.Zone + } + return serviceMeta +} diff --git a/control-plane/catalog/to-consul/resource_test.go b/control-plane/catalog/to-consul/resource_test.go index 4951d1b107..22bc037a6b 100644 --- a/control-plane/catalog/to-consul/resource_test.go +++ b/control-plane/catalog/to-consul/resource_test.go @@ -817,6 +817,7 @@ func TestServiceResource_lbRegisterEndpoints(t *testing.T) { require.Equal(r, "8.8.8.8", actual[0].Service.Address) require.Equal(r, 8080, actual[0].Service.Port) require.Equal(r, "k8s-sync", actual[0].Node) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) }) } @@ -859,6 +860,9 @@ func TestServiceResource_nodePort(t *testing.T) { require.Equal(r, "3.4.5.6", actual[2].Service.Address) require.Equal(r, 30000, actual[2].Service.Port) require.Equal(r, "k8s-sync", actual[2].Node) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) @@ -905,6 +909,9 @@ func TestServiceResource_nodePortPrefix(t *testing.T) { require.Equal(r, "3.4.5.6", actual[2].Service.Address) require.Equal(r, 30000, actual[2].Service.Port) require.Equal(r, "k8s-sync", actual[2].Node) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) @@ -1020,6 +1027,9 @@ func TestServiceResource_nodePortAnnotatedPort(t *testing.T) { require.Equal(r, "3.4.5.6", actual[2].Service.Address) require.Equal(r, 30001, actual[2].Service.Port) require.Equal(r, "k8s-sync", actual[2].Node) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) @@ -1070,6 +1080,9 @@ func TestServiceResource_nodePortUnnamedPort(t *testing.T) { require.Equal(r, "3.4.5.6", actual[2].Service.Address) require.Equal(r, 30000, actual[2].Service.Port) require.Equal(r, "k8s-sync", actual[2].Node) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) @@ -1116,6 +1129,9 @@ func TestServiceResource_nodePort_internalOnlySync(t *testing.T) { require.Equal(r, "6.7.8.9", actual[2].Service.Address) require.Equal(r, 30000, actual[2].Service.Port) require.Equal(r, "k8s-sync", actual[2].Node) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) @@ -1170,6 +1186,9 @@ func TestServiceResource_nodePort_externalFirstSync(t *testing.T) { require.Equal(r, "3.4.5.6", actual[2].Service.Address) require.Equal(r, 30000, actual[2].Service.Port) require.Equal(r, "k8s-sync", actual[2].Node) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) @@ -1303,6 +1322,9 @@ func TestServiceResource_clusterIPPrefix(t *testing.T) { require.Equal(r, "prefixfoo", actual[2].Service.Service) require.Equal(r, "3.3.3.3", actual[2].Service.Address) require.Equal(r, 8080, actual[2].Service.Port) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) @@ -1348,6 +1370,9 @@ func TestServiceResource_clusterIPAnnotatedPortName(t *testing.T) { require.Equal(r, "foo", actual[2].Service.Service) require.Equal(r, "3.3.3.3", actual[2].Service.Address) require.Equal(r, 2000, actual[2].Service.Port) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) @@ -1393,6 +1418,9 @@ func TestServiceResource_clusterIPAnnotatedPortNumber(t *testing.T) { require.Equal(r, "foo", actual[2].Service.Service) require.Equal(r, "3.3.3.3", actual[2].Service.Address) require.Equal(r, 4141, actual[2].Service.Port) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) @@ -1440,6 +1468,9 @@ func TestServiceResource_clusterIPUnnamedPorts(t *testing.T) { require.Equal(r, "foo", actual[2].Service.Service) require.Equal(r, "3.3.3.3", actual[2].Service.Address) require.Equal(r, 8080, actual[2].Service.Port) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) @@ -1516,6 +1547,9 @@ func TestServiceResource_clusterIPAllNamespaces(t *testing.T) { require.Equal(r, "foo", actual[2].Service.Service) require.Equal(r, "3.3.3.3", actual[2].Service.Address) require.Equal(r, 8080, actual[2].Service.Port) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) @@ -1564,6 +1598,9 @@ func TestServiceResource_clusterIPTargetPortNamed(t *testing.T) { require.Equal(r, "foo", actual[2].Service.Service) require.Equal(r, "3.3.3.3", actual[2].Service.Address) require.Equal(r, 2000, actual[2].Service.Port) + require.Equal(r, "us-west-2a", actual[0].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2b", actual[1].Service.Meta[ConsulK8STopologyZone]) + require.Equal(r, "us-west-2c", actual[2].Service.Meta[ConsulK8STopologyZone]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) diff --git a/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go b/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go index a18c15b999..890b0b27d0 100644 --- a/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go +++ b/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go @@ -10,12 +10,13 @@ import ( "strings" "github.com/google/shlex" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" ) const ( @@ -264,10 +265,14 @@ func (w *MeshWebhook) consulDataplaneSidecar(namespace corev1.Namespace, pod cor RunAsGroup: ptr.To(group), RunAsNonRoot: ptr.To(true), AllowPrivilegeEscalation: ptr.To(false), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, // consul-dataplane requires the NET_BIND_SERVICE capability regardless of binding port #. // See https://developer.hashicorp.com/consul/docs/connect/dataplane#technical-constraints Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Drop: []corev1.Capability{"ALL"}, }, ReadOnlyRootFilesystem: ptr.To(true), } diff --git a/control-plane/connect-inject/webhook/consul_dataplane_sidecar_test.go b/control-plane/connect-inject/webhook/consul_dataplane_sidecar_test.go index 9edd91f9e2..b2988565e5 100644 --- a/control-plane/connect-inject/webhook/consul_dataplane_sidecar_test.go +++ b/control-plane/connect-inject/webhook/consul_dataplane_sidecar_test.go @@ -808,7 +808,11 @@ func TestHandlerConsulDataplaneSidecar_withSecurityContext(t *testing.T) { ReadOnlyRootFilesystem: ptr.To(true), AllowPrivilegeEscalation: ptr.To(false), Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Drop: []corev1.Capability{"ALL"}, + }, + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, }, }, }, @@ -822,7 +826,11 @@ func TestHandlerConsulDataplaneSidecar_withSecurityContext(t *testing.T) { ReadOnlyRootFilesystem: ptr.To(true), AllowPrivilegeEscalation: ptr.To(false), Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Drop: []corev1.Capability{"ALL"}, + }, + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, }, }, }, @@ -836,7 +844,11 @@ func TestHandlerConsulDataplaneSidecar_withSecurityContext(t *testing.T) { ReadOnlyRootFilesystem: ptr.To(true), AllowPrivilegeEscalation: ptr.To(false), Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Drop: []corev1.Capability{"ALL"}, + }, + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, }, }, }, @@ -850,7 +862,11 @@ func TestHandlerConsulDataplaneSidecar_withSecurityContext(t *testing.T) { ReadOnlyRootFilesystem: ptr.To(true), AllowPrivilegeEscalation: ptr.To(false), Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Drop: []corev1.Capability{"ALL"}, + }, + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, }, }, }, diff --git a/control-plane/connect-inject/webhook/container_init.go b/control-plane/connect-inject/webhook/container_init.go index 6c357967c9..3fe4366289 100644 --- a/control-plane/connect-inject/webhook/container_init.go +++ b/control-plane/connect-inject/webhook/container_init.go @@ -289,6 +289,19 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, }, } } + } else { + container.SecurityContext = &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{}, + Drop: []corev1.Capability{"ALL"}, + }, + ReadOnlyRootFilesystem: ptr.To(true), + RunAsNonRoot: ptr.To(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + } } return container, nil diff --git a/control-plane/connect-inject/webhook/container_init_test.go b/control-plane/connect-inject/webhook/container_init_test.go index 00aac4a8fc..3df1c8b70d 100644 --- a/control-plane/connect-inject/webhook/container_init_test.go +++ b/control-plane/connect-inject/webhook/container_init_test.go @@ -9,14 +9,15 @@ import ( "testing" "time" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/namespaces" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" ) const k8sNamespace = "k8snamespace" @@ -328,6 +329,20 @@ func TestHandlerContainerInit_transparentProxy(t *testing.T) { ReadOnlyRootFilesystem: ptr.To(true), AllowPrivilegeEscalation: ptr.To(false), } + } else { + // When tproxy disabled + expectedSecurityContext = &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{}, + Drop: []corev1.Capability{"ALL"}, + }, + ReadOnlyRootFilesystem: ptr.To(true), + RunAsNonRoot: ptr.To(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + } } ns := corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ diff --git a/control-plane/controllers/configentries/configentry_controller.go b/control-plane/controllers/configentries/configentry_controller.go index dc68aea619..886ed3f435 100644 --- a/control-plane/controllers/configentries/configentry_controller.go +++ b/control-plane/controllers/configentries/configentry_controller.go @@ -90,6 +90,10 @@ type ConfigEntryController struct { // `k8s-default` namespace. NSMirroringPrefix string + // ConsulPartition indicates the Consul Admin Partition name the controller is + // operating in. Adds this value as metadata on managed resources. + ConsulPartition string + // CrossNSACLPolicy is the name of the ACL policy to attach to // any created Consul namespaces to allow cross namespace service discovery. // Only necessary if ACLs are enabled. diff --git a/control-plane/controllers/configentries/terminatinggateway_controller.go b/control-plane/controllers/configentries/terminatinggateway_controller.go index ec329bd17c..2eb18c0848 100644 --- a/control-plane/controllers/configentries/terminatinggateway_controller.go +++ b/control-plane/controllers/configentries/terminatinggateway_controller.go @@ -36,6 +36,7 @@ type TerminatingGatewayController struct { FinalizerPatcher NamespacesEnabled bool + PartitionsEnabled bool Log logr.Logger Scheme *runtime.Scheme @@ -49,33 +50,49 @@ func init() { type templateArgs struct { Namespace string + Partition string ServiceName string EnableNamespaces bool + EnablePartitions bool } var ( servicePolicyTpl *template.Template servicePolicyRulesTpl = ` +{{- if .EnablePartitions }} +partition "{{.Partition}}" { +{{- end }} {{- if .EnableNamespaces }} -namespace "{{.Namespace}}" { + namespace "{{.Namespace}}" { {{- end }} - service "{{.ServiceName}}" { - policy = "write" - } + service "{{.ServiceName}}" { + policy = "write" + intention = "read" + } {{- if .EnableNamespaces }} + } +{{- end }} +{{- if .EnablePartitions }} } {{- end }} ` wildcardPolicyTpl *template.Template wildcardPolicyRulesTpl = ` +{{- if .EnablePartitions }} +partition "{{.Partition}}" { +{{- end }} {{- if .EnableNamespaces }} -namespace "{{.Namespace}}" { + namespace "{{.Namespace}}" { {{- end }} - service_prefix "" { - policy = "write" - } + service_prefix "" { + policy = "write" + intention = "read" + } {{- if .EnableNamespaces }} + } +{{- end }} +{{- if .EnablePartitions }} } {{- end }} ` @@ -104,7 +121,7 @@ func (r *TerminatingGatewayController) Reconcile(ctx context.Context, req ctrl.R } if enabled { - err := r.updateACls(log, termGW) + err = r.updateACls(log, termGW) if err != nil { log.Error(err, "error updating terminating-gateway roles") r.UpdateStatusFailedToSetACLs(ctx, termGW, err) @@ -165,13 +182,17 @@ func (r *TerminatingGatewayController) aclsEnabled() (bool, error) { return state.Token != "", nil } +func (r *TerminatingGatewayController) adminPartition() string { + return defaultIfEmpty(r.ConfigEntryController.ConsulPartition) +} + func (r *TerminatingGatewayController) updateACls(log logr.Logger, termGW *consulv1alpha1.TerminatingGateway) error { - client, err := consul.NewClientFromConnMgr(r.ConfigEntryController.ConsulClientConfig, r.ConfigEntryController.ConsulServerConnMgr) + connMgrClient, err := consul.NewClientFromConnMgr(r.ConfigEntryController.ConsulClientConfig, r.ConfigEntryController.ConsulServerConnMgr) if err != nil { return err } - roles, _, err := client.ACL().RoleList(nil) + roles, _, err := connMgrClient.ACL().RoleList(nil) if err != nil { return err } @@ -189,7 +210,7 @@ func (r *TerminatingGatewayController) updateACls(log logr.Logger, termGW *consu return errors.New("terminating gateway role not found") } - terminatingGatewayRole, _, err := client.ACL().RoleRead(terminatingGatewayRoleID, nil) + terminatingGatewayRole, _, err := connMgrClient.ACL().RoleRead(terminatingGatewayRoleID, nil) if err != nil { return err } @@ -214,7 +235,7 @@ func (r *TerminatingGatewayController) updateACls(log logr.Logger, termGW *consu } if termGW.ObjectMeta.DeletionTimestamp.IsZero() { - termGWPoliciesToKeep, termGWPoliciesToRemove, err = r.handleModificationForPolicies(log, client, existingTermGWPolicies, termGW.Spec.Services) + termGWPoliciesToKeep, termGWPoliciesToRemove, err = r.handleModificationForPolicies(log, connMgrClient, existingTermGWPolicies, termGW.Spec.Services) if err != nil { return err } @@ -225,12 +246,12 @@ func (r *TerminatingGatewayController) updateACls(log logr.Logger, termGW *consu termGWPoliciesToKeep = append(termGWPoliciesToKeep, terminatingGatewayPolicy) terminatingGatewayRole.Policies = termGWPoliciesToKeep - _, _, err = client.ACL().RoleUpdate(terminatingGatewayRole, nil) + _, _, err = connMgrClient.ACL().RoleUpdate(terminatingGatewayRole, nil) if err != nil { return err } - err = r.conditionallyDeletePolicies(log, client, termGWPoliciesToRemove, termGW.Name) + err = r.conditionallyDeletePolicies(log, connMgrClient, termGWPoliciesToRemove, termGW.Name) if err != nil { return err } @@ -253,18 +274,29 @@ func (r *TerminatingGatewayController) handleModificationForPolicies(log logr.Lo termGWPoliciesToKeepNames := mapset.NewSet[string]() for _, service := range services { + log.Info("Checking for existing policies", "policy", servicePolicyName(service.Name, defaultIfEmpty(service.Namespace))) existingPolicy, _, err := client.ACL().PolicyReadByName(servicePolicyName(service.Name, defaultIfEmpty(service.Namespace)), &capi.QueryOptions{}) if err != nil { log.Error(err, "error reading policy") return nil, nil, err } + if existingPolicy != nil { + log.Info("Found for existing policies", "policy", existingPolicy.Name, "ID", existingPolicy.ID) + } else { + log.Info("Did not find for existing policies", "policy", servicePolicyName(service.Name, defaultIfEmpty(service.Namespace))) + } if existingPolicy == nil { policyTemplate := getPolicyTemplateFor(service.Name) + policyNamespace := defaultIfEmpty(service.Namespace) + policyAdminPartition := r.adminPartition() + log.Info("Templating new ACL Policy", "Service", service.Name, "Namespace", policyNamespace, "Partition", policyAdminPartition) var data bytes.Buffer if err := policyTemplate.Execute(&data, templateArgs{ EnableNamespaces: r.NamespacesEnabled, - Namespace: defaultIfEmpty(service.Namespace), + EnablePartitions: r.PartitionsEnabled, + Namespace: policyNamespace, + Partition: policyAdminPartition, ServiceName: service.Name, }); err != nil { // just panic if we can't compile the simple template @@ -277,7 +309,10 @@ func (r *TerminatingGatewayController) handleModificationForPolicies(log logr.Lo Rules: data.String(), }, nil) if err != nil { + log.Error(err, "error creating policy") return nil, nil, err + } else { + log.Info("Created new ACL Policy", "Service", service.Name, "Namespace", policyNamespace, "Partition", policyAdminPartition) } } diff --git a/control-plane/subcommand/inject-connect/v1controllers.go b/control-plane/subcommand/inject-connect/v1controllers.go index dd6b3be739..e5c3ae2b06 100644 --- a/control-plane/subcommand/inject-connect/v1controllers.go +++ b/control-plane/subcommand/inject-connect/v1controllers.go @@ -187,6 +187,7 @@ func (c *Command) configureControllers(ctx context.Context, mgr manager.Manager, ConsulDestinationNamespace: c.flagConsulDestinationNamespace, EnableNSMirroring: c.flagEnableK8SNSMirroring, NSMirroringPrefix: c.flagK8SNSMirroringPrefix, + ConsulPartition: c.consul.Partition, CrossNSACLPolicy: c.flagCrossNamespaceACLPolicy, } if err := (&controllers.ServiceDefaultsController{ @@ -275,6 +276,7 @@ func (c *Command) configureControllers(ctx context.Context, mgr manager.Manager, Client: mgr.GetClient(), Log: ctrl.Log.WithName("controller").WithName(apicommon.TerminatingGateway), NamespacesEnabled: c.flagEnableNamespaces, + PartitionsEnabled: c.flagEnablePartitions, Scheme: mgr.GetScheme(), }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", apicommon.TerminatingGateway)