Skip to content

Commit 3eca4f0

Browse files
committed
fix Node access underlay pod failed when applying network policy (#5256)
* fix Node access underlay pod failed when applying network policy Signed-off-by: clyi <clyi@alauda.io>
1 parent bb0c59a commit 3eca4f0

File tree

3 files changed

+213
-2
lines changed

3 files changed

+213
-2
lines changed

pkg/controller/node.go

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -703,9 +703,28 @@ func (c *Controller) retryDelDupChassis(attempts, sleep int, f func(node *v1.Nod
703703
func (c *Controller) fetchPodsOnNode(nodeName string, pods []*v1.Pod) ([]string, error) {
704704
ports := make([]string, 0, len(pods))
705705
for _, pod := range pods {
706-
if !isPodAlive(pod) || pod.Spec.HostNetwork || pod.Spec.NodeName != nodeName || pod.Annotations[util.LogicalRouterAnnotation] != c.config.ClusterRouter {
706+
if !isPodAlive(pod) || pod.Spec.HostNetwork || pod.Spec.NodeName != nodeName {
707707
continue
708708
}
709+
710+
if pod.Annotations[util.LogicalRouterAnnotation] != c.config.ClusterRouter {
711+
subnetName := pod.Annotations[util.LogicalSwitchAnnotation]
712+
if subnetName == "" {
713+
klog.V(4).Infof("Pod %s/%s is not on cluster router and has no logical switch annotation, skipping for VLAN check.", pod.Namespace, pod.Name)
714+
continue
715+
}
716+
717+
subnet, err := c.subnetsLister.Get(subnetName)
718+
if err != nil {
719+
klog.Errorf("failed to get subnet %s: %v", subnetName, err)
720+
return nil, err
721+
}
722+
723+
if subnet.Spec.Vlan == "" {
724+
continue
725+
}
726+
}
727+
709728
podName := c.getNameByPod(pod)
710729

711730
podNets, err := c.getPodKubeovnNets(pod)

test/e2e/framework/iproute/iproute.go

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import (
77
"reflect"
88
"slices"
99
"strings"
10+
"time"
1011

1112
"github.com/kubeovn/kube-ovn/test/e2e/framework/docker"
1213
)
@@ -136,6 +137,44 @@ func AddressShow(device string, execFunc ExecFunc) ([]Link, error) {
136137
return links, nil
137138
}
138139

140+
func AddressDel(device, addr string, execFunc ExecFunc) error {
141+
e := execer{fn: execFunc}
142+
if err := e.exec("ip a del"+devArg(device)+" "+addr, nil); err != nil {
143+
return err
144+
}
145+
return nil
146+
}
147+
148+
func AddressDelCheckExist(device, addr string, execFunc ExecFunc) error {
149+
found := false
150+
for range 10 {
151+
showLinks, err := AddressShow(device, execFunc)
152+
if err != nil {
153+
return err
154+
}
155+
for _, link := range showLinks {
156+
for _, a := range link.AddrInfo {
157+
cidr := fmt.Sprintf("%s/%d", a.Local, a.PrefixLen)
158+
if cidr == addr {
159+
found = true
160+
break
161+
}
162+
}
163+
if found {
164+
break
165+
}
166+
}
167+
if found {
168+
break
169+
}
170+
time.Sleep(time.Second)
171+
}
172+
if !found {
173+
return fmt.Errorf("address %s not found on %s after waiting", addr, device)
174+
}
175+
return AddressDel(device, addr, execFunc)
176+
}
177+
139178
func RouteShow(table, device string, execFunc ExecFunc) ([]Route, error) {
140179
e := execer{fn: execFunc}
141180
var args string

test/e2e/kube-ovn/underlay/underlay.go

Lines changed: 154 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@ import (
1212

1313
dockernetwork "github.com/docker/docker/api/types/network"
1414
corev1 "k8s.io/api/core/v1"
15+
netv1 "k8s.io/api/networking/v1"
16+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
17+
"k8s.io/apimachinery/pkg/util/intstr"
1518
clientset "k8s.io/client-go/kubernetes"
1619
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
1720
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
@@ -106,7 +109,7 @@ var _ = framework.SerialDescribe("[group:underlay]", func() {
106109
var itFn func(bool)
107110
var cs clientset.Interface
108111
var nodeNames []string
109-
var clusterName, providerNetworkName, vlanName, subnetName, podName, namespaceName string
112+
var clusterName, providerNetworkName, vlanName, subnetName, podName, namespaceName, netpolName string
110113
var vpcName string
111114
var u2oPodNameUnderlay, u2oOverlaySubnetName, u2oPodNameOverlay, u2oOverlaySubnetNameCustomVPC, u2oPodOverlayCustomVPC string
112115
var linkMap map[string]*iproute.Link
@@ -118,6 +121,7 @@ var _ = framework.SerialDescribe("[group:underlay]", func() {
118121
var vlanClient *framework.VlanClient
119122
var providerNetworkClient *framework.ProviderNetworkClient
120123
var dockerNetwork *dockernetwork.Inspect
124+
var netpolClient *framework.NetworkPolicyClient
121125
var containerID string
122126

123127
ginkgo.BeforeEach(func() {
@@ -128,6 +132,7 @@ var _ = framework.SerialDescribe("[group:underlay]", func() {
128132
vpcClient = f.VpcClient()
129133
vlanClient = f.VlanClient()
130134
providerNetworkClient = f.ProviderNetworkClient()
135+
netpolClient = f.NetworkPolicyClient()
131136
namespaceName = f.Namespace.Name
132137
podName = "pod-" + framework.RandomSuffix()
133138
u2oPodNameOverlay = "pod-" + framework.RandomSuffix()
@@ -139,6 +144,7 @@ var _ = framework.SerialDescribe("[group:underlay]", func() {
139144
vlanName = "vlan-" + framework.RandomSuffix()
140145
providerNetworkName = "pn-" + framework.RandomSuffix()
141146
vpcName = "vpc-" + framework.RandomSuffix()
147+
netpolName = "netpol-" + framework.RandomSuffix()
142148
containerID = ""
143149

144150
if skip {
@@ -325,6 +331,9 @@ var _ = framework.SerialDescribe("[group:underlay]", func() {
325331
framework.ExpectNoError(err)
326332
}
327333

334+
ginkgo.By("Deleting network policy " + netpolName)
335+
netpolClient.DeleteSync(netpolName)
336+
328337
ginkgo.By("Deleting pod " + podName)
329338
podClient.DeleteSync(podName)
330339

@@ -448,6 +457,150 @@ var _ = framework.SerialDescribe("[group:underlay]", func() {
448457
framework.ExpectEqual(links[0].Mtu, docker.MTU)
449458
})
450459

460+
framework.ConformanceIt("should be able to access underlay pod from node after applying network policy", func() {
461+
ginkgo.By("Creating provider network " + providerNetworkName)
462+
pn := makeProviderNetwork(providerNetworkName, false, linkMap)
463+
_ = providerNetworkClient.CreateSync(pn)
464+
465+
ginkgo.By("Getting docker network " + dockerNetworkName)
466+
network, err := docker.NetworkInspect(dockerNetworkName)
467+
framework.ExpectNoError(err, "getting docker network "+dockerNetworkName)
468+
469+
ginkgo.By("Creating vlan " + vlanName)
470+
vlan := framework.MakeVlan(vlanName, providerNetworkName, 0)
471+
_ = vlanClient.Create(vlan)
472+
473+
ginkgo.By("Creating subnet " + subnetName)
474+
var cidrV4, cidrV6, gatewayV4, gatewayV6 string
475+
for _, config := range dockerNetwork.IPAM.Config {
476+
switch util.CheckProtocol(config.Subnet) {
477+
case apiv1.ProtocolIPv4:
478+
if f.HasIPv4() {
479+
cidrV4 = config.Subnet
480+
gatewayV4 = config.Gateway
481+
}
482+
case apiv1.ProtocolIPv6:
483+
if f.HasIPv6() {
484+
cidrV6 = config.Subnet
485+
gatewayV6 = config.Gateway
486+
}
487+
}
488+
}
489+
cidr := make([]string, 0, 2)
490+
gateway := make([]string, 0, 2)
491+
if f.HasIPv4() {
492+
cidr = append(cidr, cidrV4)
493+
gateway = append(gateway, gatewayV4)
494+
}
495+
if f.HasIPv6() {
496+
cidr = append(cidr, cidrV6)
497+
gateway = append(gateway, gatewayV6)
498+
}
499+
excludeIPs := make([]string, 0, len(network.Containers)*2)
500+
for _, container := range network.Containers {
501+
if container.IPv4Address != "" && f.HasIPv4() {
502+
excludeIPs = append(excludeIPs, strings.Split(container.IPv4Address, "/")[0])
503+
}
504+
if container.IPv6Address != "" && f.HasIPv6() {
505+
excludeIPs = append(excludeIPs, strings.Split(container.IPv6Address, "/")[0])
506+
}
507+
}
508+
subnet := framework.MakeSubnet(subnetName, vlanName, strings.Join(cidr, ","), strings.Join(gateway, ","), "", "", excludeIPs, nil, []string{namespaceName})
509+
_ = subnetClient.CreateSync(subnet)
510+
511+
// set iptables to allow traffic between underlay and node network
512+
ginkgo.By("Getting nodes")
513+
nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs)
514+
framework.ExpectNoError(err)
515+
framework.ExpectNotEmpty(nodes.Items)
516+
517+
for _, node := range nodes.Items {
518+
for _, address := range node.Status.Addresses {
519+
if address.Type == corev1.NodeInternalIP {
520+
nodeIP := address.Address
521+
if util.CheckProtocol(nodeIP) == apiv1.ProtocolIPv4 {
522+
cmd1 := exec.Command("sudo", "iptables", "-t", "filter", "-I", "FORWARD", "-s", nodeIP, "-j", "ACCEPT")
523+
if output, err := cmd1.CombinedOutput(); err != nil {
524+
framework.Logf("failed to add iptables rule: %v, output: %s", err, string(output))
525+
}
526+
cmd2 := exec.Command("sudo", "iptables", "-t", "filter", "-I", "FORWARD", "-d", nodeIP, "-j", "ACCEPT")
527+
if output, err := cmd2.CombinedOutput(); err != nil {
528+
framework.Logf("failed to add iptables rule: %v, output: %s", err, string(output))
529+
}
530+
} else {
531+
cmd1 := exec.Command("sudo", "ip6tables", "-t", "filter", "-I", "FORWARD", "-s", nodeIP, "-j", "ACCEPT")
532+
if output, err := cmd1.CombinedOutput(); err != nil {
533+
framework.Logf("failed to add ip6tables rule: %v, output: %s", err, string(output))
534+
}
535+
cmd2 := exec.Command("sudo", "ip6tables", "-t", "filter", "-I", "FORWARD", "-d", nodeIP, "-j", "ACCEPT")
536+
if output, err := cmd2.CombinedOutput(); err != nil {
537+
framework.Logf("failed to add ip6tables rule: %v, output: %s", err, string(output))
538+
}
539+
}
540+
}
541+
}
542+
}
543+
544+
// remove underlay ip on bridge
545+
kindNodes, _ := kind.ListNodes(clusterName, "")
546+
for _, node := range kindNodes {
547+
for _, container := range network.Containers {
548+
if container.Name != node.Name() {
549+
continue
550+
}
551+
if container.IPv4Address != "" && f.HasIPv4() {
552+
err := iproute.AddressDelCheckExist("br-"+providerNetworkName, container.IPv4Address, node.Exec)
553+
framework.ExpectNoError(err)
554+
}
555+
if container.IPv6Address != "" && f.HasIPv6() {
556+
err := iproute.AddressDelCheckExist("br-"+providerNetworkName, container.IPv6Address, node.Exec)
557+
framework.ExpectNoError(err)
558+
}
559+
}
560+
}
561+
562+
ginkgo.By("Creating network policy " + netpolName)
563+
netpol := &netv1.NetworkPolicy{
564+
ObjectMeta: metav1.ObjectMeta{
565+
Name: netpolName,
566+
Namespace: namespaceName,
567+
},
568+
Spec: netv1.NetworkPolicySpec{
569+
PodSelector: metav1.LabelSelector{
570+
MatchLabels: map[string]string{
571+
"app": "agnhost",
572+
},
573+
},
574+
Ingress: []netv1.NetworkPolicyIngressRule{},
575+
PolicyTypes: []netv1.PolicyType{netv1.PolicyTypeIngress},
576+
},
577+
}
578+
_ = netpolClient.Create(netpol)
579+
580+
ginkgo.By("Creating pod " + podName)
581+
labels := map[string]string{
582+
"app": "agnhost",
583+
}
584+
args := []string{"netexec", "--http-port", strconv.Itoa(80)}
585+
pod := framework.MakePrivilegedPod(namespaceName, podName, labels, nil, framework.AgnhostImage, nil, args)
586+
pod.Spec.Containers[0].ReadinessProbe = &corev1.Probe{
587+
ProbeHandler: corev1.ProbeHandler{
588+
HTTPGet: &corev1.HTTPGetAction{
589+
Port: intstr.FromInt32(80),
590+
},
591+
},
592+
}
593+
pod.Spec.Containers[0].LivenessProbe = &corev1.Probe{
594+
ProbeHandler: corev1.ProbeHandler{
595+
HTTPGet: &corev1.HTTPGetAction{
596+
Port: intstr.FromInt32(80),
597+
},
598+
},
599+
}
600+
601+
_ = podClient.CreateSync(pod)
602+
})
603+
451604
framework.ConformanceIt("should be able to detect duplicate address", func() {
452605
f.SkipVersionPriorTo(1, 9, "Duplicate address detection was introduced in v1.9")
453606
if !f.HasIPv4() {

0 commit comments

Comments
 (0)