diff --git a/rootfs/etc/nginx/lua/balancer.lua b/rootfs/etc/nginx/lua/balancer.lua index 00104c89d7..df48568247 100644 --- a/rootfs/etc/nginx/lua/balancer.lua +++ b/rootfs/etc/nginx/lua/balancer.lua @@ -72,6 +72,8 @@ local function get_implementation(backend) return implementation end +-- used to get the IP address of the upstream to set as the +-- backends endpoint to route to local function resolve_external_names(original_backend) local backend = util.deepcopy(original_backend) local endpoints = {} @@ -181,6 +183,7 @@ local function sync_backends() backends_last_synced_at = raw_backends_last_synced_at end +-- logic used to pick up if request should be routed to an alternative backend local function route_to_alternative_balancer(balancer) if balancer.is_affinitized(balancer) then -- If request is already affinitized to a primary balancer, keep the primary balancer. @@ -218,7 +221,6 @@ local function route_to_alternative_balancer(balancer) "of backend: ", tostring(backend_name)) return false end - local target_header = util.replace_special_char(traffic_shaping_policy.header, "-", "_") local header = ngx.var["http_" .. target_header] @@ -278,14 +280,15 @@ local function get_balancer() local backend_name = ngx.var.proxy_upstream_name local balancer = balancers[backend_name] + if not balancer then return nil end - if route_to_alternative_balancer(balancer) then + --we should not overwrite balancer when it is the default backend + if route_to_alternative_balancer(balancer) and not balancer.is_default_backend then local alternative_backend_name = balancer.alternative_backends[1] ngx.var.proxy_alternative_upstream_name = alternative_backend_name - balancer = balancers[alternative_backend_name] end @@ -318,6 +321,7 @@ end function _M.rewrite() local balancer = get_balancer() + if not balancer then ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE return ngx.exit(ngx.status) @@ -344,6 +348,7 @@ function _M.balance() ngx_balancer.set_more_tries(1) local ok, err = ngx_balancer.set_current_peer(peer) + if not ok then ngx.log(ngx.ERR, "error while setting current upstream peer ", peer, ": ", err) @@ -363,6 +368,16 @@ function _M.log() balancer:after_balance() end +--this is used to check if we are routing to the +--default backend for sepcific error codes so that we do not overwrite it with +--alternative routes +--https://github.com/kubernetes/ingress-nginx/issues/9944 +function _M.is_default_backend() + if ngx.ctx.balancer then + ngx.ctx.balancer.is_default_backend = true + end +end + setmetatable(_M, {__index = { get_implementation = get_implementation, sync_backend = sync_backend, diff --git a/rootfs/etc/nginx/template/nginx.tmpl b/rootfs/etc/nginx/template/nginx.tmpl index d58be28800..8c69b89dcf 100644 --- a/rootfs/etc/nginx/template/nginx.tmpl +++ b/rootfs/etc/nginx/template/nginx.tmpl @@ -931,6 +931,10 @@ stream { rewrite (.*) / break; proxy_pass http://upstream_balancer; + access_by_lua_block { + balancer.is_default_backend() + } + log_by_lua_block { {{ if $enableMetrics }} monitor.call() diff --git a/test/e2e/DEFAULT_BACKEND_IMAGE b/test/e2e/DEFAULT_BACKEND_IMAGE new file mode 100644 index 0000000000..f3dc5f4a9c --- /dev/null +++ b/test/e2e/DEFAULT_BACKEND_IMAGE @@ -0,0 +1 @@ +registry.k8s.io/ingress-nginx/nginx-errors:v20230505@sha256:3600dcd1bbd0d05959bb01af4b272714e94d22d24a64e91838e7183c80e53f7f diff --git a/test/e2e/annotations/canary.go b/test/e2e/annotations/canary.go index ea733dbf43..80b7efc766 100644 --- a/test/e2e/annotations/canary.go +++ b/test/e2e/annotations/canary.go @@ -86,6 +86,54 @@ var _ = framework.DescribeAnnotation("canary-*", func() { NotContains(canaryService) }) + // issue: https://github.com/kubernetes/ingress-nginx/issues/9944 + // canary routing should not overwrite custom errors + ginkgo.It("should respond with a 401 status from the custom errors backend when canary responds with a 401", + func() { + host := "foo" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/custom-http-errors": "401", + "nginx.ingress.kubernetes.io/default-backend": framework.DefaultBackendService, + } + + f.NewDefaultBackendDeployment() + + f.EnsureIngress(framework.NewSingleIngress( + host, + "/", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations, + )) + + f.WaitForNginxServer(host, func(server string) bool { + return strings.Contains(server, "server_name foo") + }) + + f.EnsureIngress(framework.NewSingleIngress( + canaryService, + "/", + host, + f.Namespace, + canaryService, + 80, + map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + }, + )) + + f.HTTPTestClient(). + GET("/status/401"). + WithHeader("Host", host). + Expect(). + Status(http.StatusUnauthorized). + Body(). + Contains("401") + }) + ginkgo.It("should return 404 status for requests to the canary if no matching ingress is found", func() { host := fooHost diff --git a/test/e2e/framework/deployment.go b/test/e2e/framework/deployment.go index 08a5353b24..9f4face103 100644 --- a/test/e2e/framework/deployment.go +++ b/test/e2e/framework/deployment.go @@ -31,20 +31,30 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -// EchoService name of the deployment for the echo app -const EchoService = "echo" +const ( + // EchoService name of the deployment for the echo app + EchoService = "echo" -// SlowEchoService name of the deployment for the echo app -const SlowEchoService = "slow-echo" + // SlowEchoService name of the deployment for the echo app + SlowEchoService = "slow-echo" -// HTTPBunService name of the deployment for the httpbun app -const HTTPBunService = "httpbun" + // HTTPBunService name of the deployment for the httpbun app + HTTPBunService = "httpbun" -// NipService name of external service using nip.io -const NIPService = "external-nip" + // NipService name of external service using nip.io + NIPService = "external-nip" -// HTTPBunImage is the default image that is used to deploy HTTPBun with the framwork -var HTTPBunImage = os.Getenv("HTTPBUN_IMAGE") + // DefaultBackendService name of default backend deployment + DefaultBackendService = "default-backend" +) + +var ( + // HTTPBunImage is the default image that is used to deploy HTTPBun with the framwork + HTTPBunImage = os.Getenv("HTTPBUN_IMAGE") + // DefaultBackendImage is the default image that is used to deploy custom + // default backend with the framwork + DefaultBackendImage = os.Getenv("DEFAULT_BACKEND_IMAGE") +) // EchoImage is the default image to be used by the echo service const EchoImage = "registry.k8s.io/ingress-nginx/e2e-test-echo@sha256:4938d1d91a2b7d19454460a8c1b010b89f6ff92d2987fd889ac3e8fc3b70d91a" //#nosec G101 @@ -56,8 +66,16 @@ type deploymentOptions struct { name string namespace string image string + port int32 replicas int + command []string + args []string + env []corev1.EnvVar + volumeMounts []corev1.VolumeMount + volumes []corev1.Volume svcAnnotations map[string]string + setProbe bool + probe *corev1.HTTPGetAction } // WithDeploymentNamespace allows configuring the deployment's namespace @@ -103,6 +121,13 @@ func WithImage(i string) func(*deploymentOptions) { } } +// WithProbeHandler used to set probe on deployment +func WithProbeHandler(p *corev1.HTTPGetAction) func(*deploymentOptions) { + return func(o *deploymentOptions) { + o.probe = p + } +} + // NewEchoDeployment creates a new single replica deployment of the echo server image in a particular namespace func (f *Framework) NewEchoDeployment(opts ...func(*deploymentOptions)) { options := &deploymentOptions{ @@ -158,6 +183,108 @@ func (f *Framework) NewEchoDeployment(opts ...func(*deploymentOptions)) { assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready") } +// NewDefaultBackendDeployment creates a new single replica deployment of the +// custom errors backend in a particular namespace +func (f *Framework) NewDefaultBackendDeployment(opts ...func(*deploymentOptions)) { + options := &deploymentOptions{ + namespace: f.Namespace, + name: DefaultBackendService, + replicas: 1, + image: DefaultBackendImage, + } + for _, o := range opts { + o(options) + } + + f.EnsureConfigMap(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: options.name, + Namespace: options.namespace, + }, + Data: map[string]string{ + "404": `404`, + "401": `401`, + "503": `503`, + }, + }) + + f.EnsureDeployment(newDeployment( + options.name, + options.namespace, + options.image, + 8080, + int32(options.replicas), + nil, nil, nil, + []corev1.VolumeMount{ + { + Name: options.name, + MountPath: "/www", + }, + }, + []corev1.Volume{ + { + Name: options.name, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: options.name, + }, + Items: []corev1.KeyToPath{ + { + Key: "404", + Path: "404.html", + }, + { + Key: "401", + Path: "401.html", + }, + { + Key: "503", + Path: "503.html", + }, + }, + }, + }, + }, + }, + false, + WithProbeHandler(&corev1.HTTPGetAction{ + Port: intstr.FromString("http"), + Path: "/healthz", + }), + )) + + f.EnsureService(&corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: options.name, + Namespace: options.namespace, + Annotations: options.svcAnnotations, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: 80, + TargetPort: intstr.FromInt(8080), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app": options.name, + }, + }, + }) + + err := WaitForEndpoints( + f.KubeClientSet, + DefaultTimeout, + options.name, + options.namespace, + options.replicas, + ) + assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready") +} + // BuildNipHost used to generate a nip host for DNS resolving func BuildNIPHost(ip string) string { return fmt.Sprintf("%s.nip.io", ip) @@ -261,7 +388,7 @@ func (f *Framework) NewHttpbunDeployment(opts ...func(*deploymentOptions)) strin e, err := f.KubeClientSet. CoreV1(). Endpoints(f.Namespace). - Get(context.TODO(), HTTPBunService, metav1.GetOptions{}) + Get(context.TODO(), options.name, metav1.GetOptions{}) assert.Nil(ginkgo.GinkgoT(), err, "failed to get httpbun endpoint") return e.Subsets[0].Addresses[0].IP @@ -486,9 +613,38 @@ func (f *Framework) NewGRPCBinDeployment() { assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready") } -func newDeployment(name, namespace, image string, port int32, replicas int32, command []string, args []string, env []corev1.EnvVar, - volumeMounts []corev1.VolumeMount, volumes []corev1.Volume, setProbe bool, +func newDeployment( + name, namespace, image string, + port int32, replicas int32, + command, args []string, + env []corev1.EnvVar, + volumeMounts []corev1.VolumeMount, + volumes []corev1.Volume, + setProbe bool, + opts ...func(*deploymentOptions), ) *appsv1.Deployment { + // TODO: we should move to using options to configure deployments to have less + // logic here + o := &deploymentOptions{ + name: name, + namespace: namespace, + image: image, + port: port, + command: command, + args: args, + env: env, + volumeMounts: volumeMounts, + volumes: volumes, + setProbe: setProbe, + probe: &corev1.HTTPGetAction{ + Port: intstr.FromString("http"), + Path: "/", + }, + } + for _, opt := range opts { + opt(o) + } + probe := &corev1.Probe{ InitialDelaySeconds: 2, PeriodSeconds: 1, @@ -496,48 +652,46 @@ func newDeployment(name, namespace, image string, port int32, replicas int32, co TimeoutSeconds: 2, FailureThreshold: 6, ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromString("http"), - Path: "/", - }, + HTTPGet: o.probe, }, } d := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, + Name: o.name, + Namespace: o.namespace, }, Spec: appsv1.DeploymentSpec{ Replicas: NewInt32(replicas), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - "app": name, + "app": o.name, }, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - "app": name, + "app": o.name, }, }, Spec: corev1.PodSpec{ TerminationGracePeriodSeconds: NewInt64(0), Containers: []corev1.Container{ { - Name: name, + Name: o.name, Image: image, - Env: []corev1.EnvVar{}, + Env: o.env, + Args: o.args, Ports: []corev1.ContainerPort{ { Name: "http", - ContainerPort: port, + ContainerPort: o.port, }, }, - VolumeMounts: volumeMounts, + VolumeMounts: o.volumeMounts, }, }, - Volumes: volumes, + Volumes: o.volumes, }, }, }, diff --git a/test/e2e/run-e2e-suite.sh b/test/e2e/run-e2e-suite.sh index 9333ee61f9..580fd2d51c 100755 --- a/test/e2e/run-e2e-suite.sh +++ b/test/e2e/run-e2e-suite.sh @@ -52,6 +52,7 @@ fi BASEDIR=$(dirname "$0") NGINX_BASE_IMAGE=$(cat $BASEDIR/../../NGINX_BASE) HTTPBUN_IMAGE=$(cat $BASEDIR/HTTPBUN_IMAGE) +DEFAULT_BACKEND_IMAGE=$(cat $BASEDIR/DEFAULT_BACKEND_IMAGE) echo -e "${BGREEN}Granting permissions to ingress-nginx e2e service account...${NC}" kubectl create serviceaccount ingress-nginx-e2e || true @@ -83,6 +84,7 @@ kubectl run --rm \ --env="E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS}" \ --env="NGINX_BASE_IMAGE=${NGINX_BASE_IMAGE}" \ --env="HTTPBUN_IMAGE=${HTTPBUN_IMAGE}" \ + --env="DEFAULT_BACKEND_IMAGE=${DEFAULT_BACKEND_IMAGE}" \ --overrides='{ "apiVersion": "v1", "spec":{"serviceAccountName": "ingress-nginx-e2e"}}' \ e2e --image=nginx-ingress-controller:e2e