From f01576300b491856e23c9b3b9505336917c9c048 Mon Sep 17 00:00:00 2001 From: zbb88888 Date: Thu, 18 Dec 2025 11:13:39 +0800 Subject: [PATCH 1/2] Add tests for VIP finalizer handling and subnet status updates - Introduced a new test to verify that the subnet status is correctly updated when a VIP is created and deleted, ensuring that finalizers are properly handled. - Added checks for both IPv4 and IPv6 protocols, including dual stack scenarios, to confirm that available and using IP counts and ranges are updated as expected. - Enhanced the existing VIP creation test to wait for the finalizer to be added before proceeding with subnet status verification. - Updated sleep durations to ensure sufficient time for status updates after VIP operations. Signed-off-by: zbb88888 --- makefiles/e2e.mk | 20 +- pkg/controller/controller.go | 8 +- pkg/controller/ovn_dnat.go | 55 +- pkg/controller/ovn_eip.go | 171 ++- pkg/controller/ovn_fip.go | 53 +- pkg/controller/ovn_snat.go | 53 +- pkg/controller/pod.go | 3 + pkg/controller/subnet.go | 4 +- pkg/controller/vip.go | 140 ++- pkg/controller/vpc_nat_gw_eip.go | 123 +- test/e2e/framework/vpc-nat-gateway.go | 6 + test/e2e/iptables-eip-qos/e2e_test.go | 989 ++++++++++++++++ test/e2e/iptables-vpc-nat-gw/e2e_test.go | 1324 +++++++++------------- test/e2e/vip/e2e_test.go | 192 +++- 14 files changed, 2148 insertions(+), 993 deletions(-) create mode 100644 test/e2e/iptables-eip-qos/e2e_test.go diff --git a/makefiles/e2e.mk b/makefiles/e2e.mk index 92db5979fd4..1b73e9f321b 100644 --- a/makefiles/e2e.mk +++ b/makefiles/e2e.mk @@ -205,8 +205,8 @@ vpc-egress-gateway-e2e: ginkgo $(GINKGO_OUTPUT_OPT) $(GINKGO_PARALLEL_OPT) --randomize-all -v --timeout=30m \ --focus=CNI:Kube-OVN ./test/e2e/vpc-egress-gateway/vpc-egress-gateway.test -- $(TEST_BIN_ARGS) -.PHONY: iptables-vpc-nat-gw-conformance-e2e -iptables-vpc-nat-gw-conformance-e2e: +.PHONY: iptables-eip-conformance-e2e +iptables-eip-conformance-e2e: ginkgo build $(E2E_BUILD_FLAGS) ./test/e2e/iptables-vpc-nat-gw E2E_BRANCH=$(E2E_BRANCH) \ E2E_IP_FAMILY=$(E2E_IP_FAMILY) \ @@ -214,6 +214,22 @@ iptables-vpc-nat-gw-conformance-e2e: ginkgo $(GINKGO_OUTPUT_OPT) $(GINKGO_PARALLEL_OPT) --randomize-all -v \ --focus=CNI:Kube-OVN ./test/e2e/iptables-vpc-nat-gw/iptables-vpc-nat-gw.test -- $(TEST_BIN_ARGS) +.PHONY: iptables-eip-qos-conformance-e2e +iptables-eip-qos-conformance-e2e: + ginkgo build $(E2E_BUILD_FLAGS) ./test/e2e/iptables-eip-qos + E2E_BRANCH=$(E2E_BRANCH) \ + E2E_IP_FAMILY=$(E2E_IP_FAMILY) \ + E2E_NETWORK_MODE=$(E2E_NETWORK_MODE) \ + ginkgo $(GINKGO_OUTPUT_OPT) --randomize-all -v \ + --focus=CNI:Kube-OVN ./test/e2e/iptables-eip-qos/iptables-eip-qos.test -- $(TEST_BIN_ARGS) + +.PHONY: iptables-vpc-nat-gw-conformance-e2e +iptables-vpc-nat-gw-conformance-e2e: + $(MAKE) iptables-eip-conformance-e2e + $(MAKE) iptables-eip-qos-conformance-e2e + + + .PHONY: ovn-vpc-nat-gw-conformance-e2e ovn-vpc-nat-gw-conformance-e2e: ginkgo build $(E2E_BUILD_FLAGS) ./test/e2e/ovn-vpc-nat-gw diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index d13e8a72a9c..984a659ea93 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -159,7 +159,7 @@ type Controller struct { addIptablesEipQueue workqueue.TypedRateLimitingInterface[string] updateIptablesEipQueue workqueue.TypedRateLimitingInterface[string] resetIptablesEipQueue workqueue.TypedRateLimitingInterface[string] - delIptablesEipQueue workqueue.TypedRateLimitingInterface[string] + delIptablesEipQueue workqueue.TypedRateLimitingInterface[*kubeovnv1.IptablesEIP] iptablesFipsLister kubeovnlister.IptablesFIPRuleLister iptablesFipSynced cache.InformerSynced @@ -184,7 +184,7 @@ type Controller struct { addOvnEipQueue workqueue.TypedRateLimitingInterface[string] updateOvnEipQueue workqueue.TypedRateLimitingInterface[string] resetOvnEipQueue workqueue.TypedRateLimitingInterface[string] - delOvnEipQueue workqueue.TypedRateLimitingInterface[string] + delOvnEipQueue workqueue.TypedRateLimitingInterface[*kubeovnv1.OvnEip] ovnFipsLister kubeovnlister.OvnFipLister ovnFipSynced cache.InformerSynced @@ -472,7 +472,7 @@ func Run(ctx context.Context, config *Configuration) { addIptablesEipQueue: newTypedRateLimitingQueue("AddIptablesEip", custCrdRateLimiter), updateIptablesEipQueue: newTypedRateLimitingQueue("UpdateIptablesEip", custCrdRateLimiter), resetIptablesEipQueue: newTypedRateLimitingQueue("ResetIptablesEip", custCrdRateLimiter), - delIptablesEipQueue: newTypedRateLimitingQueue("DeleteIptablesEip", custCrdRateLimiter), + delIptablesEipQueue: newTypedRateLimitingQueue[*kubeovnv1.IptablesEIP]("DeleteIptablesEip", nil), iptablesFipsLister: iptablesFipInformer.Lister(), iptablesFipSynced: iptablesFipInformer.Informer().HasSynced, @@ -563,7 +563,7 @@ func Run(ctx context.Context, config *Configuration) { addOvnEipQueue: newTypedRateLimitingQueue("AddOvnEip", custCrdRateLimiter), updateOvnEipQueue: newTypedRateLimitingQueue("UpdateOvnEip", custCrdRateLimiter), resetOvnEipQueue: newTypedRateLimitingQueue("ResetOvnEip", custCrdRateLimiter), - delOvnEipQueue: newTypedRateLimitingQueue("DeleteOvnEip", custCrdRateLimiter), + delOvnEipQueue: newTypedRateLimitingQueue[*kubeovnv1.OvnEip]("DeleteOvnEip", nil), ovnFipsLister: ovnFipInformer.Lister(), ovnFipSynced: ovnFipInformer.Informer().HasSynced, diff --git a/pkg/controller/ovn_dnat.go b/pkg/controller/ovn_dnat.go index 1647d98bc48..14bf14fb804 100644 --- a/pkg/controller/ovn_dnat.go +++ b/pkg/controller/ovn_dnat.go @@ -34,8 +34,9 @@ func (c *Controller) enqueueUpdateOvnDnatRule(oldObj, newObj any) { // avoid delete twice return } - klog.Infof("enqueue del ovn dnat %s", key) - c.delOvnDnatRuleQueue.Add(key) + // DNAT with finalizer should be handled in updateOvnDnatRuleQueue + klog.Infof("enqueue update (deleting) ovn dnat %s", key) + c.updateOvnDnatRuleQueue.Add(key) return } oldDnat := oldObj.(*kubeovnv1.OvnDnatRule) @@ -297,6 +298,48 @@ func (c *Controller) handleUpdateOvnDnatRule(key string) error { klog.Error(err) return err } + + // Handle deletion first (for DNATs with finalizers) + if !cachedDnat.DeletionTimestamp.IsZero() { + klog.Infof("handle deleting ovn dnat %s", key) + if cachedDnat.Status.Vpc == "" { + // Already cleaned, just remove finalizer + if err = c.handleDelOvnDnatFinalizer(cachedDnat); err != nil { + klog.Errorf("failed to remove finalizer for ovn dnat %s, %v", cachedDnat.Name, err) + return err + } + return nil + } + + // ovn delete dnat + if cachedDnat.Status.V4Eip != "" && cachedDnat.Status.ExternalPort != "" { + if err = c.DelDnatRule(cachedDnat.Status.Vpc, cachedDnat.Name, + cachedDnat.Status.V4Eip, cachedDnat.Status.ExternalPort); err != nil { + klog.Errorf("failed to delete v4 dnat %s, %v", key, err) + return err + } + } + if cachedDnat.Status.V6Eip != "" && cachedDnat.Status.ExternalPort != "" { + if err = c.DelDnatRule(cachedDnat.Status.Vpc, cachedDnat.Name, + cachedDnat.Status.V6Eip, cachedDnat.Status.ExternalPort); err != nil { + klog.Errorf("failed to delete v6 dnat %s, %v", key, err) + return err + } + } + + // Remove finalizer + if err = c.handleDelOvnDnatFinalizer(cachedDnat); err != nil { + klog.Errorf("failed to remove finalizer for ovn dnat %s, %v", cachedDnat.Name, err) + return err + } + + // Reset eip + if cachedDnat.Spec.OvnEip != "" { + c.resetOvnEipQueue.Add(cachedDnat.Spec.OvnEip) + } + return nil + } + if !cachedDnat.Status.Ready { // create dnat only in add process, just check to error out here klog.Infof("wait ovn dnat %s to be ready only in the handle add process", cachedDnat.Name) @@ -617,6 +660,7 @@ func (c *Controller) handleAddOvnDnatFinalizer(cachedDnat *kubeovnv1.OvnDnatRule err error ) + controllerutil.RemoveFinalizer(newDnat, util.DepreciatedFinalizerName) controllerutil.AddFinalizer(newDnat, util.KubeOVNControllerFinalizer) if patch, err = util.GenerateMergePatchPayload(cachedDnat, newDnat); err != nil { klog.Errorf("failed to generate patch payload for ovn dnat '%s', %v", cachedDnat.Name, err) @@ -655,5 +699,12 @@ func (c *Controller) handleDelOvnDnatFinalizer(cachedDnat *kubeovnv1.OvnDnatRule klog.Errorf("failed to remove finalizer from ovn dnat '%s', %v", cachedDnat.Name, err) return err } + + // Trigger associated EIP to recheck if it can be deleted now + if cachedDnat.Spec.OvnEip != "" { + klog.Infof("triggering eip %s update after dnat %s deletion", cachedDnat.Spec.OvnEip, cachedDnat.Name) + c.updateOvnEipQueue.Add(cachedDnat.Spec.OvnEip) + } + return nil } diff --git a/pkg/controller/ovn_eip.go b/pkg/controller/ovn_eip.go index 388ba3d5b77..1e23ca1def0 100644 --- a/pkg/controller/ovn_eip.go +++ b/pkg/controller/ovn_eip.go @@ -35,8 +35,9 @@ func (c *Controller) enqueueUpdateOvnEip(oldObj, newObj any) { // avoid delete eip twice return } - klog.Infof("enqueue del ovn eip %s", key) - c.delOvnEipQueue.Add(key) + // EIP with finalizer should be handled in updateOvnEipQueue + klog.Infof("enqueue update (deleting) ovn eip %s", key) + c.updateOvnEipQueue.Add(key) return } oldEip := oldObj.(*kubeovnv1.OvnEip) @@ -72,7 +73,7 @@ func (c *Controller) enqueueDelOvnEip(obj any) { key := cache.MetaObjectToName(eip).String() klog.Infof("enqueue del ovn eip %s", key) - c.delOvnEipQueue.Add(key) + c.delOvnEipQueue.Add(eip.DeepCopy()) } func (c *Controller) handleAddOvnEip(key string) error { @@ -89,6 +90,7 @@ func (c *Controller) handleAddOvnEip(key string) error { return nil } klog.Infof("handle add ovn eip %s", cachedEip.Name) + var v4ip, v6ip, mac, subnetName string subnetName = cachedEip.Spec.ExternalSubnet if subnetName == "" { @@ -143,11 +145,10 @@ func (c *Controller) handleAddOvnEip(key string) error { return err } } - if err = c.handleAddOvnEipFinalizer(cachedEip); err != nil { - klog.Errorf("failed to add finalizer for ovn eip, %v", err) - return err - } - c.updateSubnetStatusQueue.Add(subnetName) + + // Trigger subnet status update after all operations complete + // At this point: IPAM allocated, OvnEip CR created with labels+status+finalizer + c.updateSubnetStatusQueue.Add(subnet.Name) return nil } @@ -160,6 +161,55 @@ func (c *Controller) handleUpdateOvnEip(key string) error { klog.Error(err) return err } + + // Handle deletion first + if !cachedEip.DeletionTimestamp.IsZero() { + klog.Infof("handle deleting ovn eip %s", key) + + // Check if EIP is still being used by any NAT rules (FIP/DNAT/SNAT) BEFORE cleanup + // Only proceed with cleanup and finalizer removal when no NAT rules are using it + nat, err := c.getOvnEipNat(cachedEip.Spec.V4Ip) + if err != nil { + klog.Errorf("failed to get ovn eip %s nat rules, %v", key, err) + return err + } + if nat != "" { + err := fmt.Errorf("ovn eip %s is still being used by NAT rules: %s, waiting for them to be deleted", key, nat) + klog.Error(err) + return err + } + + // Clean up resources before removing finalizer + if cachedEip.Spec.Type == util.OvnEipTypeLSP { + if err := c.OVNNbClient.DeleteLogicalSwitchPort(cachedEip.Name); err != nil { + klog.Errorf("failed to delete lsp %s, %v", cachedEip.Name, err) + return err + } + } + if cachedEip.Spec.Type == util.OvnEipTypeLRP { + if err := c.OVNNbClient.DeleteLogicalRouterPort(cachedEip.Name); err != nil { + klog.Errorf("failed to delete lrp %s, %v", cachedEip.Name, err) + return err + } + } + + // Release IP from IPAM before removing finalizer + c.ipam.ReleaseAddressByPod(cachedEip.Name, cachedEip.Spec.ExternalSubnet) + + // Now remove finalizer, which will trigger subnet status update + if err = c.handleDelOvnEipFinalizer(cachedEip); err != nil { + klog.Errorf("failed to handle remove ovn eip finalizer , %v", err) + return err + } + return nil + } + + // Always ensure finalizer is added regardless of Status + if err = c.handleAddOrUpdateOvnEipFinalizer(cachedEip); err != nil { + klog.Errorf("failed to handle add or update finalizer for ovn eip %s: %v", key, err) + return err + } + if !cachedEip.Status.Ready { // create eip only in add process, just check to error out here klog.Infof("wait ovn eip %s to be ready only in the handle add process", cachedEip.Name) @@ -216,17 +266,12 @@ func (c *Controller) handleResetOvnEip(key string) error { return nil } -func (c *Controller) handleDelOvnEip(key string) error { - klog.Infof("handle del ovn eip %s", key) - eip, err := c.ovnEipsLister.Get(key) - if err != nil { - if k8serrors.IsNotFound(err) { - return nil - } - klog.Error(err) - return err - } +func (c *Controller) handleDelOvnEip(eip *kubeovnv1.OvnEip) error { + // This handles deletion of EIPs without finalizers (race condition or direct deletion) + // EIPs with finalizers are handled in handleUpdateOvnEip + klog.Infof("handle del ovn eip %s (without finalizer)", eip.Name) + // Clean up resources if they still exist if eip.Spec.Type == util.OvnEipTypeLSP { if err := c.OVNNbClient.DeleteLogicalSwitchPort(eip.Name); err != nil { klog.Errorf("failed to delete lsp %s, %v", eip.Name, err) @@ -240,12 +285,14 @@ func (c *Controller) handleDelOvnEip(key string) error { } } - if err = c.handleDelOvnEipFinalizer(eip); err != nil { - klog.Errorf("failed to handle remove ovn eip finalizer , %v", err) - return err - } + // Release IP from IPAM c.ipam.ReleaseAddressByPod(eip.Name, eip.Spec.ExternalSubnet) - c.updateSubnetStatusQueue.Add(eip.Spec.ExternalSubnet) + + // Ensure subnet status is updated + if eip.Spec.ExternalSubnet != "" { + c.updateSubnetStatusQueue.Add(eip.Spec.ExternalSubnet) + } + return nil } @@ -253,9 +300,11 @@ func (c *Controller) createOrUpdateOvnEipCR(key, subnet, v4ip, v6ip, mac, usageT cachedEip, err := c.ovnEipsLister.Get(key) if err != nil { if k8serrors.IsNotFound(err) { + // Create CR with finalizer, labels and status all at once _, err := c.config.KubeOvnClient.KubeovnV1().OvnEips().Create(context.Background(), &kubeovnv1.OvnEip{ ObjectMeta: metav1.ObjectMeta{ - Name: key, + Name: key, + Finalizers: []string{util.KubeOVNControllerFinalizer}, Labels: map[string]string{ util.SubnetNameLabel: subnet, util.OvnEipTypeLabel: usageType, @@ -270,6 +319,14 @@ func (c *Controller) createOrUpdateOvnEipCR(key, subnet, v4ip, v6ip, mac, usageT MacAddress: mac, Type: usageType, }, + Status: kubeovnv1.OvnEipStatus{ + V4Ip: v4ip, + V6Ip: v6ip, + MacAddress: mac, + Type: usageType, + Nat: "", + Ready: false, + }, }, metav1.CreateOptions{}) if err != nil { err := fmt.Errorf("failed to create crd ovn eip '%s', %w", key, err) @@ -284,8 +341,17 @@ func (c *Controller) createOrUpdateOvnEipCR(key, subnet, v4ip, v6ip, mac, usageT } } else { ovnEip := cachedEip.DeepCopy() - needUpdate := false + // Ensure labels are set correctly before any update + if ovnEip.Labels == nil { + ovnEip.Labels = make(map[string]string) + } + ovnEip.Labels[util.SubnetNameLabel] = subnet + ovnEip.Labels[util.OvnEipTypeLabel] = usageType + ovnEip.Labels[util.EipV4IpLabel] = v4ip + ovnEip.Labels[util.EipV6IpLabel] = v6ip + + needUpdate := false if mac != "" && ovnEip.Spec.MacAddress != mac { ovnEip.Spec.MacAddress = mac needUpdate = true @@ -303,6 +369,7 @@ func (c *Controller) createOrUpdateOvnEipCR(key, subnet, v4ip, v6ip, mac, usageT needUpdate = true } if needUpdate { + // Update with labels and spec in one call if _, err := c.config.KubeOvnClient.KubeovnV1().OvnEips().Update(context.Background(), ovnEip, metav1.UpdateOptions{}); err != nil { errMsg := fmt.Errorf("failed to update ovn eip '%s', %w", key, err) klog.Error(errMsg) @@ -341,42 +408,10 @@ func (c *Controller) createOrUpdateOvnEipCR(key, subnet, v4ip, v6ip, mac, usageT return err } } - - var needUpdateLabel bool - var op string - if len(ovnEip.Labels) == 0 { - op = "add" - ovnEip.Labels = map[string]string{ - util.SubnetNameLabel: subnet, - util.OvnEipTypeLabel: usageType, - util.EipV4IpLabel: v4ip, - util.EipV6IpLabel: v6ip, - } - needUpdateLabel = true - } - if ovnEip.Labels[util.SubnetNameLabel] != subnet { - op = "replace" - ovnEip.Labels[util.SubnetNameLabel] = subnet - ovnEip.Labels[util.EipV4IpLabel] = v4ip - ovnEip.Labels[util.EipV6IpLabel] = v6ip - needUpdateLabel = true - } - if ovnEip.Labels[util.OvnEipTypeLabel] != usageType { - op = "replace" - ovnEip.Labels[util.OvnEipTypeLabel] = usageType - needUpdateLabel = true - } - if needUpdateLabel { - patchPayloadTemplate := `[{ "op": "%s", "path": "/metadata/labels", "value": %s }]` - raw, _ := json.Marshal(ovnEip.Labels) - patchPayload := fmt.Sprintf(patchPayloadTemplate, op, raw) - if _, err := c.config.KubeOvnClient.KubeovnV1().OvnEips().Patch(context.Background(), ovnEip.Name, types.JSONPatchType, - []byte(patchPayload), metav1.PatchOptions{}); err != nil { - klog.Errorf("failed to patch label for ovn eip '%s', %v", ovnEip.Name, err) - return err - } - } } + // Trigger subnet status update after CR creation or update + time.Sleep(300 * time.Millisecond) + c.updateSubnetStatusQueue.Add(subnet) return nil } @@ -504,11 +539,12 @@ func (c *Controller) syncOvnEipFinalizer(cl client.Client) error { }) } -func (c *Controller) handleAddOvnEipFinalizer(cachedEip *kubeovnv1.OvnEip) error { - if !cachedEip.DeletionTimestamp.IsZero() || len(cachedEip.GetFinalizers()) != 0 { +func (c *Controller) handleAddOrUpdateOvnEipFinalizer(cachedEip *kubeovnv1.OvnEip) error { + if !cachedEip.DeletionTimestamp.IsZero() { return nil } newEip := cachedEip.DeepCopy() + controllerutil.RemoveFinalizer(newEip, util.DepreciatedFinalizerName) controllerutil.AddFinalizer(newEip, util.KubeOVNControllerFinalizer) patch, err := util.GenerateMergePatchPayload(cachedEip, newEip) if err != nil { @@ -523,6 +559,11 @@ func (c *Controller) handleAddOvnEipFinalizer(cachedEip *kubeovnv1.OvnEip) error klog.Errorf("failed to add finalizer for ovn eip '%s', %v", cachedEip.Name, err) return err } + + // Trigger subnet status update after finalizer is processed as a fallback + // This handles cases where finalizer was not added during creation + // AddFinalizer is idempotent, so this is safe even if finalizer already exists + c.updateSubnetStatusQueue.Add(cachedEip.Spec.ExternalSubnet) return nil } @@ -558,6 +599,12 @@ func (c *Controller) handleDelOvnEipFinalizer(cachedEip *kubeovnv1.OvnEip) error klog.Errorf("failed to remove finalizer from ovn eip '%s', %v", cachedEip.Name, err) return err } + + // Trigger subnet status update after finalizer is removed + // This ensures subnet status reflects the IP release + // Add delay to ensure API server completes the finalizer removal + time.Sleep(300 * time.Millisecond) + c.updateSubnetStatusQueue.Add(cachedEip.Spec.ExternalSubnet) return nil } diff --git a/pkg/controller/ovn_fip.go b/pkg/controller/ovn_fip.go index 7c47cc2998e..724f3767993 100644 --- a/pkg/controller/ovn_fip.go +++ b/pkg/controller/ovn_fip.go @@ -35,8 +35,9 @@ func (c *Controller) enqueueUpdateOvnFip(oldObj, newObj any) { // avoid delete twice return } - klog.Infof("enqueue del ovn fip %s", key) - c.delOvnFipQueue.Add(key) + // FIP with finalizer should be handled in updateOvnFipQueue + klog.Infof("enqueue update (deleting) ovn fip %s", key) + c.updateOvnFipQueue.Add(key) return } oldFip := oldObj.(*kubeovnv1.OvnFip) @@ -267,6 +268,46 @@ func (c *Controller) handleUpdateOvnFip(key string) error { klog.Error(err) return err } + + // Handle deletion first (for FIPs with finalizers) + if !cachedFip.DeletionTimestamp.IsZero() { + klog.Infof("handle deleting ovn fip %s", key) + if cachedFip.Status.Vpc == "" { + // Already cleaned, just remove finalizer + if err = c.handleDelOvnFipFinalizer(cachedFip); err != nil { + klog.Errorf("failed to remove finalizer for ovn fip %s, %v", cachedFip.Name, err) + return err + } + return nil + } + + // ovn delete fip nat + if cachedFip.Status.V4Eip != "" && cachedFip.Status.V4Ip != "" { + if err = c.OVNNbClient.DeleteNat(cachedFip.Status.Vpc, ovnnb.NATTypeDNATAndSNAT, cachedFip.Status.V4Eip, cachedFip.Status.V4Ip); err != nil { + klog.Errorf("failed to delete v4 fip %s, %v", key, err) + return err + } + } + if cachedFip.Status.V6Eip != "" && cachedFip.Status.V6Ip != "" { + if err = c.OVNNbClient.DeleteNat(cachedFip.Status.Vpc, ovnnb.NATTypeDNATAndSNAT, cachedFip.Status.V6Eip, cachedFip.Status.V6Ip); err != nil { + klog.Errorf("failed to delete v6 fip %s, %v", key, err) + return err + } + } + + // Remove finalizer + if err = c.handleDelOvnFipFinalizer(cachedFip); err != nil { + klog.Errorf("failed to remove finalizer for ovn fip %s, %v", cachedFip.Name, err) + return err + } + + // Reset eip + if cachedFip.Spec.OvnEip != "" { + c.resetOvnEipQueue.Add(cachedFip.Spec.OvnEip) + } + return nil + } + if !cachedFip.Status.Ready { // create fip only in add process, just check to error out here klog.Infof("wait ovn fip %s to be ready only in the handle add process", cachedFip.Name) @@ -542,6 +583,7 @@ func (c *Controller) handleAddOvnFipFinalizer(cachedFip *kubeovnv1.OvnFip) error return nil } newFip := cachedFip.DeepCopy() + controllerutil.RemoveFinalizer(newFip, util.DepreciatedFinalizerName) controllerutil.AddFinalizer(newFip, util.KubeOVNControllerFinalizer) patch, err := util.GenerateMergePatchPayload(cachedFip, newFip) if err != nil { @@ -580,5 +622,12 @@ func (c *Controller) handleDelOvnFipFinalizer(cachedFip *kubeovnv1.OvnFip) error klog.Errorf("failed to remove finalizer from ovn fip '%s', %v", cachedFip.Name, err) return err } + + // Trigger associated EIP to recheck if it can be deleted now + if cachedFip.Spec.OvnEip != "" { + klog.Infof("triggering eip %s update after fip %s deletion", cachedFip.Spec.OvnEip, cachedFip.Name) + c.updateOvnEipQueue.Add(cachedFip.Spec.OvnEip) + } + return nil } diff --git a/pkg/controller/ovn_snat.go b/pkg/controller/ovn_snat.go index 11de6beed10..e2e5df5b1cb 100644 --- a/pkg/controller/ovn_snat.go +++ b/pkg/controller/ovn_snat.go @@ -33,8 +33,9 @@ func (c *Controller) enqueueUpdateOvnSnatRule(oldObj, newObj any) { // avoid delete twice return } - klog.Infof("enqueue del ovn snat %s", key) - c.delOvnSnatRuleQueue.Add(key) + // SNAT with finalizer should be handled in updateOvnSnatRuleQueue + klog.Infof("enqueue update (deleting) ovn snat %s", key) + c.updateOvnSnatRuleQueue.Add(key) return } oldSnat := oldObj.(*kubeovnv1.OvnSnatRule) @@ -197,6 +198,46 @@ func (c *Controller) handleUpdateOvnSnatRule(key string) error { klog.Error(err) return err } + + // Handle deletion first (for SNATs with finalizers) + if !cachedSnat.DeletionTimestamp.IsZero() { + klog.Infof("handle deleting ovn snat %s", key) + if cachedSnat.Status.Vpc == "" { + // Already cleaned, just remove finalizer + if err = c.handleDelOvnSnatFinalizer(cachedSnat); err != nil { + klog.Errorf("failed to remove finalizer for ovn snat %s, %v", cachedSnat.Name, err) + return err + } + return nil + } + + // ovn delete snat + if cachedSnat.Status.V4Eip != "" && cachedSnat.Status.V4IpCidr != "" { + if err = c.OVNNbClient.DeleteNat(cachedSnat.Status.Vpc, ovnnb.NATTypeSNAT, cachedSnat.Status.V4Eip, cachedSnat.Status.V4IpCidr); err != nil { + klog.Errorf("failed to delete v4 snat %s, %v", key, err) + return err + } + } + if cachedSnat.Status.V6Eip != "" && cachedSnat.Status.V6IpCidr != "" { + if err = c.OVNNbClient.DeleteNat(cachedSnat.Status.Vpc, ovnnb.NATTypeSNAT, cachedSnat.Status.V6Eip, cachedSnat.Status.V6IpCidr); err != nil { + klog.Errorf("failed to delete v6 snat %s, %v", key, err) + return err + } + } + + // Remove finalizer + if err = c.handleDelOvnSnatFinalizer(cachedSnat); err != nil { + klog.Errorf("failed to remove finalizer for ovn snat %s, %v", cachedSnat.Name, err) + return err + } + + // Reset eip + if cachedSnat.Spec.OvnEip != "" { + c.resetOvnEipQueue.Add(cachedSnat.Spec.OvnEip) + } + return nil + } + if !cachedSnat.Status.Ready { klog.Infof("wait ovn snat %s to be ready only in the handle add process", cachedSnat.Name) return nil @@ -458,6 +499,7 @@ func (c *Controller) handleAddOvnSnatFinalizer(cachedSnat *kubeovnv1.OvnSnatRule return nil } newSnat := cachedSnat.DeepCopy() + controllerutil.RemoveFinalizer(newSnat, util.DepreciatedFinalizerName) controllerutil.AddFinalizer(newSnat, util.KubeOVNControllerFinalizer) patch, err := util.GenerateMergePatchPayload(cachedSnat, newSnat) if err != nil { @@ -495,5 +537,12 @@ func (c *Controller) handleDelOvnSnatFinalizer(cachedSnat *kubeovnv1.OvnSnatRule klog.Errorf("failed to remove finalizer from ovn snat '%s', %v", cachedSnat.Name, err) return err } + + // Trigger associated EIP to recheck if it can be deleted now + if cachedSnat.Spec.OvnEip != "" { + klog.Infof("triggering eip %s update after snat %s deletion", cachedSnat.Spec.OvnEip, cachedSnat.Name) + c.updateOvnEipQueue.Add(cachedSnat.Spec.OvnEip) + } + return nil } diff --git a/pkg/controller/pod.go b/pkg/controller/pod.go index 87ed37c1709..4e4452bc1fe 100644 --- a/pkg/controller/pod.go +++ b/pkg/controller/pod.go @@ -1204,6 +1204,9 @@ func (c *Controller) handleDeletePod(key string) (err error) { } // release ipam address after delete ip CR c.ipam.ReleaseAddressByNic(podKey, portName, podNet.Subnet.Name) + // Trigger subnet status update after IPAM release + // This is needed when IP CR is deleted without finalizer (race condition) + c.updateSubnetStatusQueue.Add(podNet.Subnet.Name) } } if pod.Annotations[util.VipAnnotation] != "" { diff --git a/pkg/controller/subnet.go b/pkg/controller/subnet.go index d6f17d8c10c..e6a7d64fa2c 100644 --- a/pkg/controller/subnet.go +++ b/pkg/controller/subnet.go @@ -53,7 +53,7 @@ func (c *Controller) enqueueDeleteSubnet(obj any) { } klog.V(3).Infof("enqueue delete subnet %s", subnet.Name) - c.deleteSubnetQueue.Add(subnet) + c.deleteSubnetQueue.Add(subnet.DeepCopy()) } func readyToRemoveFinalizer(subnet *kubeovnv1.Subnet) bool { @@ -275,6 +275,7 @@ func (c *Controller) syncSubnetFinalizer(cl client.Client) error { func (c *Controller) handleSubnetFinalizer(subnet *kubeovnv1.Subnet) (*kubeovnv1.Subnet, bool, error) { if subnet.DeletionTimestamp.IsZero() && !slices.Contains(subnet.GetFinalizers(), util.KubeOVNControllerFinalizer) { newSubnet := subnet.DeepCopy() + controllerutil.RemoveFinalizer(newSubnet, util.DepreciatedFinalizerName) controllerutil.AddFinalizer(newSubnet, util.KubeOVNControllerFinalizer) patch, err := util.GenerateMergePatchPayload(subnet, newSubnet) if err != nil { @@ -298,6 +299,7 @@ func (c *Controller) handleSubnetFinalizer(subnet *kubeovnv1.Subnet) (*kubeovnv1 u2oInterconnIP := subnet.Status.U2OInterconnectionIP if !subnet.DeletionTimestamp.IsZero() && (usingIPs == 0 || (usingIPs == 1 && u2oInterconnIP != "")) { newSubnet := subnet.DeepCopy() + controllerutil.RemoveFinalizer(newSubnet, util.DepreciatedFinalizerName) controllerutil.RemoveFinalizer(newSubnet, util.KubeOVNControllerFinalizer) patch, err := util.GenerateMergePatchPayload(subnet, newSubnet) if err != nil { diff --git a/pkg/controller/vip.go b/pkg/controller/vip.go index 722114975d1..901d830dac1 100644 --- a/pkg/controller/vip.go +++ b/pkg/controller/vip.go @@ -7,6 +7,7 @@ import ( "fmt" "slices" "strings" + "time" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -63,7 +64,7 @@ func (c *Controller) enqueueDelVirtualIP(obj any) { key := cache.MetaObjectToName(vip).String() klog.Infof("enqueue del vip %s", key) - c.delVirtualIPQueue.Add(vip) + c.delVirtualIPQueue.Add(vip.DeepCopy()) } func (c *Controller) handleAddVirtualIP(key string) error { @@ -80,6 +81,7 @@ func (c *Controller) handleAddVirtualIP(key string) error { return nil } klog.V(3).Infof("handle add vip %s", key) + vip := cachedVip.DeepCopy() var sourceV4Ip, sourceV6Ip, v4ip, v6ip, mac, subnetName string subnetName = vip.Spec.Subnet @@ -167,6 +169,10 @@ func (c *Controller) handleAddVirtualIP(key string) error { klog.Error(err) return err } + + // Trigger subnet status update after all operations complete + // At this point: IPAM allocated, VIP CR created with labels+status+finalizer + c.updateSubnetStatusQueue.Add(subnetName) return nil } @@ -182,6 +188,31 @@ func (c *Controller) handleUpdateVirtualIP(key string) error { vip := cachedVip.DeepCopy() // should delete if !vip.DeletionTimestamp.IsZero() { + klog.Infof("handle deleting vip %s", vip.Name) + // Clean up resources before removing finalizer + if vip.Spec.Type != "" { + subnet, err := c.subnetsLister.Get(vip.Spec.Subnet) + if err != nil { + klog.Errorf("failed to get subnet %s: %v", vip.Spec.Subnet, err) + return err + } + portName := ovs.PodNameToPortName(vip.Name, vip.Spec.Namespace, subnet.Spec.Provider) + klog.Infof("delete vip lsp %s", portName) + if err := c.OVNNbClient.DeleteLogicalSwitchPort(portName); err != nil { + err = fmt.Errorf("failed to delete lsp %s: %w", vip.Name, err) + klog.Error(err) + return err + } + } + // delete virtual ports + if err := c.OVNNbClient.DeleteLogicalSwitchPort(vip.Name); err != nil { + klog.Errorf("delete virtual logical switch port %s from logical switch %s: %v", vip.Name, vip.Spec.Subnet, err) + return err + } + // Release IP from IPAM before removing finalizer + c.ipam.ReleaseAddressByPod(vip.Name, vip.Spec.Subnet) + + // Now remove finalizer, which will trigger subnet status update if err = c.handleDelVipFinalizer(key); err != nil { klog.Errorf("failed to handle vip finalizer %v", err) return err @@ -217,38 +248,26 @@ func (c *Controller) handleUpdateVirtualIP(key string) error { klog.Error(err) return err } - if err = c.handleAddVipFinalizer(key); err != nil { - klog.Errorf("failed to handle vip finalizer %v", err) - return err - } + } + // Always ensure finalizer is added regardless of Status + if err = c.handleAddOrUpdateVipFinalizer(key); err != nil { + klog.Errorf("failed to handle vip finalizer %v", err) + return err } return nil } func (c *Controller) handleDelVirtualIP(vip *kubeovnv1.Vip) error { - klog.Infof("handle delete vip %s", vip.Name) - // TODO:// clean vip in its parent port aap list - if vip.Spec.Type != "" { - subnet, err := c.subnetsLister.Get(vip.Spec.Subnet) - if err != nil { - klog.Errorf("failed to get subnet %s: %v", vip.Spec.Subnet, err) - return err - } - portName := ovs.PodNameToPortName(vip.Name, vip.Spec.Namespace, subnet.Spec.Provider) - klog.Infof("delete vip lsp %s", portName) - if err := c.OVNNbClient.DeleteLogicalSwitchPort(portName); err != nil { - err = fmt.Errorf("failed to delete lsp %s: %w", vip.Name, err) - klog.Error(err) - return err - } - } - // delete virtual ports - if err := c.OVNNbClient.DeleteLogicalSwitchPort(vip.Name); err != nil { - klog.Errorf("delete virtual logical switch port %s from logical switch %s: %v", vip.Name, vip.Spec.Subnet, err) - return err + // Cleanup is now handled in handleUpdateVirtualIP before finalizer removal + // This function is kept for compatibility with the delete queue + klog.V(3).Infof("vip %s cleanup already done in update handler", vip.Name) + + // For VIPs deleted without finalizer (race condition or direct deletion), + // we need to ensure subnet status is updated as a safety net. + if vip.Spec.Subnet != "" { + c.updateSubnetStatusQueue.Add(vip.Spec.Subnet) } - c.ipam.ReleaseAddressByPod(vip.Name, vip.Spec.Subnet) - c.updateSubnetStatusQueue.Add(vip.Spec.Subnet) + return nil } @@ -348,14 +367,16 @@ func (c *Controller) createOrUpdateVipCR(key, ns, subnet, v4ip, v6ip, mac string vipCR, err := c.virtualIpsLister.Get(key) if err != nil { if k8serrors.IsNotFound(err) { + // Create CR with finalizer, labels and status all at once if _, err := c.config.KubeOvnClient.KubeovnV1().Vips().Create(context.Background(), &kubeovnv1.Vip{ ObjectMeta: metav1.ObjectMeta{ - Name: key, + Name: key, + Namespace: ns, + Finalizers: []string{util.KubeOVNControllerFinalizer}, Labels: map[string]string{ util.SubnetNameLabel: subnet, util.IPReservedLabel: "", }, - Namespace: ns, }, Spec: kubeovnv1.VipSpec{ Namespace: ns, @@ -364,6 +385,11 @@ func (c *Controller) createOrUpdateVipCR(key, ns, subnet, v4ip, v6ip, mac string V6ip: v6ip, MacAddress: mac, }, + Status: kubeovnv1.VipStatus{ + V4ip: v4ip, + V6ip: v6ip, + Mac: mac, + }, }, metav1.CreateOptions{}); err != nil { err := fmt.Errorf("failed to create crd vip '%s', %w", key, err) klog.Error(err) @@ -376,6 +402,14 @@ func (c *Controller) createOrUpdateVipCR(key, ns, subnet, v4ip, v6ip, mac string } } else { vip := vipCR.DeepCopy() + + // Ensure labels are set correctly + if vip.Labels == nil { + vip.Labels = make(map[string]string) + } + vip.Labels[util.SubnetNameLabel] = subnet + vip.Labels[util.IPReservedLabel] = "" + if vip.Status.Mac == "" && mac != "" || vip.Status.V4ip == "" && v4ip != "" || vip.Status.V6ip == "" && v6ip != "" { @@ -391,39 +425,16 @@ func (c *Controller) createOrUpdateVipCR(key, ns, subnet, v4ip, v6ip, mac string vip.Status.Mac = mac vip.Status.Type = vip.Spec.Type // TODO:// Ready = true as subnet.Status.Ready + // Update with labels, spec and status in one call if _, err := c.config.KubeOvnClient.KubeovnV1().Vips().Update(context.Background(), vip, metav1.UpdateOptions{}); err != nil { err := fmt.Errorf("failed to update vip '%s', %w", key, err) klog.Error(err) return err } } - var needUpdateLabel bool - var op string - if len(vip.Labels) == 0 { - op = "add" - vip.Labels = map[string]string{ - util.SubnetNameLabel: subnet, - util.IPReservedLabel: "", - } - needUpdateLabel = true - } - if _, ok := vip.Labels[util.SubnetNameLabel]; !ok { - op = "add" - vip.Labels[util.SubnetNameLabel] = subnet - vip.Labels[util.IPReservedLabel] = "" - needUpdateLabel = true - } - if needUpdateLabel { - patchPayloadTemplate := `[{ "op": "%s", "path": "/metadata/labels", "value": %s }]` - raw, _ := json.Marshal(vip.Labels) - patchPayload := fmt.Sprintf(patchPayloadTemplate, op, raw) - if _, err := c.config.KubeOvnClient.KubeovnV1().Vips().Patch(context.Background(), vip.Name, types.JSONPatchType, - []byte(patchPayload), metav1.PatchOptions{}); err != nil { - klog.Errorf("failed to patch label for vip '%s', %v", vip.Name, err) - return err - } - } } + // Trigger subnet status update after CR creation or update + time.Sleep(300 * time.Millisecond) c.updateSubnetStatusQueue.Add(subnet) return nil } @@ -460,7 +471,6 @@ func (c *Controller) podReuseVip(vipName, portName string, keepVIP bool) error { return err } c.ipam.ReleaseAddressByPod(vipName, vip.Spec.Subnet) - c.updateSubnetStatusQueue.Add(vip.Spec.Subnet) return nil } @@ -500,12 +510,11 @@ func (c *Controller) releaseVip(key string) error { if _, _, _, err = c.ipam.GetStaticAddress(key, vip.Name, vip.Status.V4ip, mac, vip.Spec.Subnet, false); err != nil { klog.Errorf("failed to recover IPAM from vip CR %s: %v", vip.Name, err) } - c.updateSubnetStatusQueue.Add(vip.Spec.Subnet) } return nil } -func (c *Controller) handleAddVipFinalizer(key string) error { +func (c *Controller) handleAddOrUpdateVipFinalizer(key string) error { cachedVip, err := c.virtualIpsLister.Get(key) if err != nil { if k8serrors.IsNotFound(err) { @@ -514,10 +523,11 @@ func (c *Controller) handleAddVipFinalizer(key string) error { klog.Error(err) return err } - if !cachedVip.DeletionTimestamp.IsZero() || len(cachedVip.GetFinalizers()) != 0 { + if !cachedVip.DeletionTimestamp.IsZero() { return nil } newVip := cachedVip.DeepCopy() + controllerutil.RemoveFinalizer(newVip, util.DepreciatedFinalizerName) controllerutil.AddFinalizer(newVip, util.KubeOVNControllerFinalizer) patch, err := util.GenerateMergePatchPayload(cachedVip, newVip) if err != nil { @@ -532,6 +542,11 @@ func (c *Controller) handleAddVipFinalizer(key string) error { klog.Errorf("failed to add finalizer for vip '%s', %v", cachedVip.Name, err) return err } + + // Trigger subnet status update after finalizer is processed as a fallback + // This handles cases where finalizer was not added during creation + // AddFinalizer is idempotent, so this is safe even if finalizer already exists + c.updateSubnetStatusQueue.Add(cachedVip.Spec.Subnet) return nil } @@ -548,6 +563,7 @@ func (c *Controller) handleDelVipFinalizer(key string) error { return nil } newVip := cachedVip.DeepCopy() + controllerutil.RemoveFinalizer(newVip, util.DepreciatedFinalizerName) controllerutil.RemoveFinalizer(newVip, util.KubeOVNControllerFinalizer) patch, err := util.GenerateMergePatchPayload(cachedVip, newVip) if err != nil { @@ -562,6 +578,12 @@ func (c *Controller) handleDelVipFinalizer(key string) error { klog.Errorf("failed to remove finalizer from vip '%s', %v", cachedVip.Name, err) return err } + + // Trigger subnet status update after finalizer is removed + // This ensures subnet status reflects the IP release + // Add delay to ensure API server completes the finalizer removal + time.Sleep(300 * time.Millisecond) + c.updateSubnetStatusQueue.Add(cachedVip.Spec.Subnet) return nil } diff --git a/pkg/controller/vpc_nat_gw_eip.go b/pkg/controller/vpc_nat_gw_eip.go index fb72e4a4a7c..8c51e1d3aab 100644 --- a/pkg/controller/vpc_nat_gw_eip.go +++ b/pkg/controller/vpc_nat_gw_eip.go @@ -38,13 +38,6 @@ func (c *Controller) enqueueUpdateIptablesEip(oldObj, newObj any) { klog.Infof("enqueue update iptables eip %s", key) c.updateIptablesEipQueue.Add(key) } - - // Trigger subnet status update when EIP CR is updated - // This ensures both CR labels and IPAM state are synced before status calculation - if oldEip.Status.IP != newEip.Status.IP || oldEip.Status.Ready != newEip.Status.Ready { - externalNetwork := util.GetExternalNetwork(newEip.Spec.ExternalSubnet) - c.updateSubnetStatusQueue.Add(externalNetwork) - } } func (c *Controller) enqueueDelIptablesEip(obj any) { @@ -66,12 +59,7 @@ func (c *Controller) enqueueDelIptablesEip(obj any) { key := cache.MetaObjectToName(eip).String() klog.Infof("enqueue del iptables eip %s", key) - c.delIptablesEipQueue.Add(key) - - // Trigger subnet status update when EIP is deleted - // This ensures subnet status reflects the IP release - externalNetwork := util.GetExternalNetwork(eip.Spec.ExternalSubnet) - c.updateSubnetStatusQueue.Add(externalNetwork) + c.delIptablesEipQueue.Add(eip.DeepCopy()) } func (c *Controller) handleAddIptablesEip(key string) error { @@ -166,6 +154,9 @@ func (c *Controller) handleAddIptablesEip(key string) error { return err } + // Trigger subnet status update after all operations complete + // At this point: IPAM allocated, IptablesEIP CR created with labels+status+finalizer + c.updateSubnetStatusQueue.Add(subnet.Name) return nil } @@ -225,6 +216,21 @@ func (c *Controller) handleUpdateIptablesEip(key string) error { if !cachedEip.DeletionTimestamp.IsZero() { klog.Infof("clean eip %q in pod", key) + + // Check if EIP is still being used by any NAT rules (FIP/DNAT/SNAT) + // Only remove finalizer when no NAT rules are using it + // Note: We query NAT rules directly instead of relying on cachedEip.Status.Nat + // to avoid cache staleness issues + nat, err := c.getIptablesEipNat(cachedEip.Spec.V4ip) + if err != nil { + klog.Errorf("failed to get eip %s nat rules, %v", key, err) + return err + } + if nat != "" { + klog.Infof("eip %s is still being used by NAT rules: %s, waiting for them to be deleted", key, nat) + return nil + } + if vpcNatEnabled == "true" { v4ipCidr, err := util.GetIPAddrWithMask(cachedEip.Status.IP, v4Cidr) if err != nil { @@ -243,11 +249,14 @@ func (c *Controller) handleUpdateIptablesEip(key string) error { return err } } + // Release IP from IPAM before removing finalizer + c.ipam.ReleaseAddressByPod(key, cachedEip.Spec.ExternalSubnet) + + // Now remove finalizer, which will trigger subnet status update if err = c.handleDelIptablesEipFinalizer(key); err != nil { klog.Errorf("failed to handle del finalizer for eip %s, %v", key, err) return err } - c.ipam.ReleaseAddressByPod(key, cachedEip.Spec.ExternalSubnet) return nil } @@ -344,8 +353,16 @@ func (c *Controller) handleUpdateIptablesEip(key string) error { return nil } -func (c *Controller) handleDelIptablesEip(key string) error { - klog.Infof("handle delete iptables eip %s", key) +func (c *Controller) handleDelIptablesEip(eip *kubeovnv1.IptablesEIP) error { + klog.Infof("handle delete iptables eip %s", eip.Name) + + // For IptablesEIPs deleted without finalizer (race condition or direct deletion), + // we need to ensure subnet status is updated as a safety net. + externalNetwork := util.GetExternalNetwork(eip.Spec.ExternalSubnet) + if externalNetwork != "" { + c.updateSubnetStatusQueue.Add(externalNetwork) + } + return nil } @@ -581,12 +598,13 @@ func (c *Controller) createOrUpdateEipCR(key, v4ip, v6ip, mac, natGwDp, qos, ext return err } } - externalNetwork := util.GetExternalNetwork(cachedEip.Spec.ExternalSubnet) if needCreate { klog.V(3).Infof("create eip cr %s", key) + // Create CR with finalizer, labels and status all at once _, err := c.config.KubeOvnClient.KubeovnV1().IptablesEIPs().Create(context.Background(), &kubeovnv1.IptablesEIP{ ObjectMeta: metav1.ObjectMeta{ - Name: key, + Name: key, + Finalizers: []string{util.KubeOVNControllerFinalizer}, Labels: map[string]string{ util.SubnetNameLabel: externalNet, util.EipV4IpLabel: v4ip, @@ -598,6 +616,14 @@ func (c *Controller) createOrUpdateEipCR(key, v4ip, v6ip, mac, natGwDp, qos, ext V6ip: v6ip, MacAddress: mac, NatGwDp: natGwDp, + QoSPolicy: qos, + }, + Status: kubeovnv1.IptablesEIPStatus{ + IP: v4ip, + Ready: true, + QoSPolicy: qos, + Nat: "", + Redo: "", }, }, metav1.CreateOptions{}) if err != nil { @@ -607,12 +633,25 @@ func (c *Controller) createOrUpdateEipCR(key, v4ip, v6ip, mac, natGwDp, qos, ext } } else { eip := cachedEip.DeepCopy() + + // Ensure labels are set correctly before any update + if eip.Labels == nil { + eip.Labels = make(map[string]string) + } + eip.Labels[util.SubnetNameLabel] = externalNet + eip.Labels[util.VpcNatGatewayNameLabel] = natGwDp + eip.Labels[util.EipV4IpLabel] = v4ip + if eip.Spec.QoSPolicy != "" { + eip.Labels[util.QoSLabel] = eip.Spec.QoSPolicy + } + if v4ip != "" { klog.V(3).Infof("update eip cr %s", key) eip.Spec.V4ip = v4ip eip.Spec.V6ip = v6ip eip.Spec.NatGwDp = natGwDp eip.Spec.MacAddress = mac + // Update with labels and spec in one call if _, err := c.config.KubeOvnClient.KubeovnV1().IptablesEIPs().Update(context.Background(), eip, metav1.UpdateOptions{}); err != nil { errMsg := fmt.Errorf("failed to update eip crd %s, %w", key, err) klog.Error(errMsg) @@ -639,38 +678,15 @@ func (c *Controller) createOrUpdateEipCR(key, v4ip, v6ip, mac, natGwDp, qos, ext return err } } - var needUpdateLabel bool - var op string - if len(eip.Labels) == 0 { - op = "add" - eip.Labels = map[string]string{ - util.SubnetNameLabel: externalNetwork, - util.VpcNatGatewayNameLabel: natGwDp, - util.EipV4IpLabel: v4ip, - } - needUpdateLabel = true - } else if eip.Labels[util.SubnetNameLabel] != externalNetwork { - op = "replace" - eip.Labels[util.SubnetNameLabel] = externalNetwork - eip.Labels[util.VpcNatGatewayNameLabel] = natGwDp - needUpdateLabel = true - } - if eip.Spec.QoSPolicy != "" && eip.Labels[util.QoSLabel] != eip.Spec.QoSPolicy { - eip.Labels[util.QoSLabel] = eip.Spec.QoSPolicy - needUpdateLabel = true - } - if needUpdateLabel { - if err := c.updateIptableLabels(eip.Name, op, "eip", eip.Labels); err != nil { - klog.Errorf("failed to update eip %s labels, %v", eip.Name, err) - return err - } - } if err = c.handleAddIptablesEipFinalizer(key); err != nil { - klog.Errorf("failed to handle add finalizer for eip, %v", err) + klog.Errorf("failed to handle add or update finalizer for eip, %v", err) return err } } + // Trigger subnet status update after all operations complete + time.Sleep(300 * time.Millisecond) + c.updateSubnetStatusQueue.Add(externalNet) return nil } @@ -694,10 +710,11 @@ func (c *Controller) handleAddIptablesEipFinalizer(key string) error { klog.Error(err) return err } - if !cachedIptablesEip.DeletionTimestamp.IsZero() || len(cachedIptablesEip.GetFinalizers()) != 0 { + if !cachedIptablesEip.DeletionTimestamp.IsZero() { return nil } newIptablesEip := cachedIptablesEip.DeepCopy() + controllerutil.RemoveFinalizer(newIptablesEip, util.DepreciatedFinalizerName) controllerutil.AddFinalizer(newIptablesEip, util.KubeOVNControllerFinalizer) patch, err := util.GenerateMergePatchPayload(cachedIptablesEip, newIptablesEip) if err != nil { @@ -712,6 +729,12 @@ func (c *Controller) handleAddIptablesEipFinalizer(key string) error { klog.Errorf("failed to add finalizer for iptables eip '%s', %v", cachedIptablesEip.Name, err) return err } + + // Trigger subnet status update after finalizer is processed as a fallback + // This handles cases where finalizer was not added during creation + // AddFinalizer is idempotent, so this is safe even if finalizer already exists + externalNetwork := util.GetExternalNetwork(cachedIptablesEip.Spec.ExternalSubnet) + c.updateSubnetStatusQueue.Add(externalNetwork) return nil } @@ -728,6 +751,7 @@ func (c *Controller) handleDelIptablesEipFinalizer(key string) error { return nil } newIptablesEip := cachedIptablesEip.DeepCopy() + controllerutil.RemoveFinalizer(newIptablesEip, util.DepreciatedFinalizerName) controllerutil.RemoveFinalizer(newIptablesEip, util.KubeOVNControllerFinalizer) patch, err := util.GenerateMergePatchPayload(cachedIptablesEip, newIptablesEip) if err != nil { @@ -742,6 +766,13 @@ func (c *Controller) handleDelIptablesEipFinalizer(key string) error { klog.Errorf("failed to remove finalizer from iptables eip '%s', %v", cachedIptablesEip.Name, err) return err } + + // Trigger subnet status update after finalizer is removed + // This ensures subnet status reflects the IP release + // Add delay to ensure API server completes the finalizer removal + time.Sleep(300 * time.Millisecond) + externalNetwork := util.GetExternalNetwork(cachedIptablesEip.Spec.ExternalSubnet) + c.updateSubnetStatusQueue.Add(externalNetwork) return nil } diff --git a/test/e2e/framework/vpc-nat-gateway.go b/test/e2e/framework/vpc-nat-gateway.go index 0b2a547e1d6..1607df81003 100644 --- a/test/e2e/framework/vpc-nat-gateway.go +++ b/test/e2e/framework/vpc-nat-gateway.go @@ -222,3 +222,9 @@ func MakeVpcNatGateway(name, vpc, subnet, lanIP, externalSubnet, qosPolicyName s vpcNatGw.Spec.QoSPolicy = qosPolicyName return vpcNatGw } + +func MakeVpcNatGatewayWithNoDefaultEIP(name, vpc, subnet, lanIP, externalSubnet, qosPolicyName string, noDefaultEIP bool) *apiv1.VpcNatGateway { + vpcNatGw := MakeVpcNatGateway(name, vpc, subnet, lanIP, externalSubnet, qosPolicyName) + vpcNatGw.Spec.NoDefaultEIP = noDefaultEIP + return vpcNatGw +} diff --git a/test/e2e/iptables-eip-qos/e2e_test.go b/test/e2e/iptables-eip-qos/e2e_test.go new file mode 100644 index 00000000000..b1cb5341106 --- /dev/null +++ b/test/e2e/iptables-eip-qos/e2e_test.go @@ -0,0 +1,989 @@ +package ovn_eip + +import ( + "context" + "errors" + "flag" + "fmt" + "strconv" + "strings" + "testing" + "time" + + dockernetwork "github.com/moby/moby/api/types/network" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e" + k8sframework "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/config" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + + "github.com/onsi/ginkgo/v2" + + apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/ovs" + "github.com/kubeovn/kube-ovn/pkg/util" + "github.com/kubeovn/kube-ovn/test/e2e/framework" + "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" + "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" +) + +const ( + dockerExtNet1Name = "kube-ovn-ext-net1" + dockerExtNet2Name = "kube-ovn-ext-net2" + vpcNatGWConfigMapName = "ovn-vpc-nat-gw-config" + vpcNatConfigName = "ovn-vpc-nat-config" + networkAttachDefName = "ovn-vpc-external-network" + externalSubnetProvider = "ovn-vpc-external-network.kube-system" +) + +const ( + iperf2Port = "20288" + skipIperf = false +) + +const ( + eipLimit = iota*5 + 10 + updatedEIPLimit + newEIPLimit + specificIPLimit + defaultNicLimit +) + +func setupNetworkAttachmentDefinition( + f *framework.Framework, + dockerExtNetNetwork *dockernetwork.Inspect, + attachNetClient *framework.NetworkAttachmentDefinitionClient, + subnetClient *framework.SubnetClient, + externalNetworkName string, + nicName string, + provider string, + dockerExtNetName string, +) { + ginkgo.GinkgoHelper() + + ginkgo.By("Getting docker network " + dockerExtNetName) + network, err := docker.NetworkInspect(dockerExtNetName) + framework.ExpectNoError(err, "getting docker network "+dockerExtNetName) + ginkgo.By("Getting or creating network attachment definition " + externalNetworkName) + attachConf := fmt.Sprintf(`{ + "cniVersion": "0.3.0", + "type": "macvlan", + "master": "%s", + "mode": "bridge", + "ipam": { + "type": "kube-ovn", + "server_socket": "/run/openvswitch/kube-ovn-daemon.sock", + "provider": "%s" + } + }`, nicName, provider) + + // Try to get existing NAD first using raw Kubernetes API to avoid ExpectNoError + nad, err := attachNetClient.NetworkAttachmentDefinitionInterface.Get(context.TODO(), externalNetworkName, metav1.GetOptions{}) + if err != nil && k8serrors.IsNotFound(err) { + // NAD doesn't exist, create it + attachNet := framework.MakeNetworkAttachmentDefinition(externalNetworkName, framework.KubeOvnNamespace, attachConf) + nad = attachNetClient.Create(attachNet) + } else { + framework.ExpectNoError(err, "getting network attachment definition "+externalNetworkName) + } + + ginkgo.By("Got network attachment definition " + nad.Name) + + ginkgo.By("Creating underlay macvlan subnet " + externalNetworkName) + var cidrV4, cidrV6, gatewayV4, gatewayV6 string + for _, config := range dockerExtNetNetwork.IPAM.Config { + switch util.CheckProtocol(config.Subnet.Addr().String()) { + case apiv1.ProtocolIPv4: + if f.HasIPv4() { + cidrV4 = config.Subnet.String() + gatewayV4 = config.Gateway.String() + } + case apiv1.ProtocolIPv6: + if f.HasIPv6() { + cidrV6 = config.Subnet.String() + gatewayV6 = config.Gateway.String() + } + } + } + cidr := make([]string, 0, 2) + gateway := make([]string, 0, 2) + if f.HasIPv4() { + cidr = append(cidr, cidrV4) + gateway = append(gateway, gatewayV4) + } + if f.HasIPv6() { + cidr = append(cidr, cidrV6) + gateway = append(gateway, gatewayV6) + } + excludeIPs := make([]string, 0, len(network.Containers)*2) + for _, container := range network.Containers { + if container.IPv4Address.IsValid() && f.HasIPv4() { + excludeIPs = append(excludeIPs, container.IPv4Address.Addr().String()) + } + if container.IPv6Address.IsValid() && f.HasIPv6() { + excludeIPs = append(excludeIPs, container.IPv6Address.Addr().String()) + } + } + + // Check if subnet already exists + _, err = subnetClient.SubnetInterface.Get(context.TODO(), externalNetworkName, metav1.GetOptions{}) + if err != nil && k8serrors.IsNotFound(err) { + // Subnet doesn't exist, create it + macvlanSubnet := framework.MakeSubnet(externalNetworkName, "", strings.Join(cidr, ","), strings.Join(gateway, ","), "", provider, excludeIPs, nil, nil) + _ = subnetClient.CreateSync(macvlanSubnet) + } else { + framework.ExpectNoError(err, "getting subnet "+externalNetworkName) + } +} + +func setupVpcNatGwTestEnvironment( + f *framework.Framework, + dockerExtNetNetwork *dockernetwork.Inspect, + attachNetClient *framework.NetworkAttachmentDefinitionClient, + subnetClient *framework.SubnetClient, + vpcClient *framework.VpcClient, + vpcNatGwClient *framework.VpcNatGatewayClient, + vpcName string, + overlaySubnetName string, + vpcNatGwName string, + natGwQosPolicy string, + overlaySubnetV4Cidr string, + overlaySubnetV4Gw string, + lanIP string, + dockerExtNetName string, + externalNetworkName string, + nicName string, + provider string, + skipNADSetup bool, +) { + ginkgo.GinkgoHelper() + + if !skipNADSetup { + setupNetworkAttachmentDefinition( + f, dockerExtNetNetwork, attachNetClient, + subnetClient, externalNetworkName, nicName, provider, dockerExtNetName) + } + + ginkgo.By("Getting config map " + vpcNatGWConfigMapName) + _, err := f.ClientSet.CoreV1().ConfigMaps(framework.KubeOvnNamespace).Get(context.Background(), vpcNatGWConfigMapName, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get ConfigMap") + + ginkgo.By("Creating custom vpc " + vpcName) + vpc := framework.MakeVpc(vpcName, lanIP, false, false, nil) + _ = vpcClient.CreateSync(vpc) + + ginkgo.By("Creating custom overlay subnet " + overlaySubnetName) + overlaySubnet := framework.MakeSubnet(overlaySubnetName, "", overlaySubnetV4Cidr, overlaySubnetV4Gw, vpcName, "", nil, nil, nil) + _ = subnetClient.CreateSync(overlaySubnet) + + ginkgo.By("Creating custom vpc nat gw " + vpcNatGwName) + vpcNatGw := framework.MakeVpcNatGateway(vpcNatGwName, vpcName, overlaySubnetName, lanIP, externalNetworkName, natGwQosPolicy) + _ = vpcNatGwClient.CreateSync(vpcNatGw, f.ClientSet) +} + +type qosParams struct { + vpc1Name string + vpc2Name string + vpc1SubnetName string + vpc2SubnetName string + vpcNat1GwName string + vpcNat2GwName string + vpc1EIPName string + vpc2EIPName string + vpc1FIPName string + vpc2FIPName string + vpc1PodName string + vpc2PodName string + attachDefName string + subnetProvider string +} + +// waitForIptablesEIPReady waits for an IptablesEIP to be ready +func waitForIptablesEIPReady(eipClient *framework.IptablesEIPClient, eipName string, timeout time.Duration) *apiv1.IptablesEIP { + ginkgo.GinkgoHelper() + var eip *apiv1.IptablesEIP + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + eip = eipClient.Get(eipName) + if eip != nil && eip.Status.IP != "" && eip.Status.Ready { + framework.Logf("IptablesEIP %s is ready with IP: %s", eipName, eip.Status.IP) + return eip + } + time.Sleep(2 * time.Second) + } + framework.Failf("Timeout waiting for IptablesEIP %s to be ready", eipName) + return nil +} + +func iperf(f *framework.Framework, iperfClientPod *corev1.Pod, iperfServerEIP *apiv1.IptablesEIP) string { + ginkgo.GinkgoHelper() + + for i := range 20 { + command := fmt.Sprintf("iperf -e -p %s --reportstyle C -i 1 -c %s -t 10", iperf2Port, iperfServerEIP.Status.IP) + stdOutput, errOutput, err := framework.ExecShellInPod(context.Background(), f, iperfClientPod.Namespace, iperfClientPod.Name, command) + framework.Logf("output from exec on client pod %s (eip %s)\n", iperfClientPod.Name, iperfServerEIP.Name) + if stdOutput != "" && err == nil { + framework.Logf("output:\n%s", stdOutput) + return stdOutput + } + framework.Logf("exec %s failed err: %v, errOutput: %s, stdOutput: %s, retried %d times.", command, err, errOutput, stdOutput, i) + time.Sleep(6 * time.Second) + } + framework.ExpectNoError(errors.New("iperf failed")) + return "" +} + +func checkQos(f *framework.Framework, + vpc1Pod, vpc2Pod *corev1.Pod, vpc1EIP, vpc2EIP *apiv1.IptablesEIP, + limit int, expect bool, +) { + ginkgo.GinkgoHelper() + + if !skipIperf { + if expect { + output := iperf(f, vpc1Pod, vpc2EIP) + framework.ExpectTrue(validRateLimit(output, limit)) + output = iperf(f, vpc2Pod, vpc1EIP) + framework.ExpectTrue(validRateLimit(output, limit)) + } else { + output := iperf(f, vpc1Pod, vpc2EIP) + framework.ExpectFalse(validRateLimit(output, limit)) + output = iperf(f, vpc2Pod, vpc1EIP) + framework.ExpectFalse(validRateLimit(output, limit)) + } + } +} + +func getNicDefaultQoSPolicy(limit int) apiv1.QoSPolicyBandwidthLimitRules { + return apiv1.QoSPolicyBandwidthLimitRules{ + apiv1.QoSPolicyBandwidthLimitRule{ + Name: "net1-ingress", + Interface: "net1", + RateMax: strconv.Itoa(limit), + BurstMax: strconv.Itoa(limit), + Priority: 3, + Direction: apiv1.QoSDirectionIngress, + }, + apiv1.QoSPolicyBandwidthLimitRule{ + Name: "net1-egress", + Interface: "net1", + RateMax: strconv.Itoa(limit), + BurstMax: strconv.Itoa(limit), + Priority: 3, + Direction: apiv1.QoSDirectionEgress, + }, + } +} + +func getEIPQoSRule(limit int) apiv1.QoSPolicyBandwidthLimitRules { + return apiv1.QoSPolicyBandwidthLimitRules{ + apiv1.QoSPolicyBandwidthLimitRule{ + Name: "eip-ingress", + RateMax: strconv.Itoa(limit), + BurstMax: strconv.Itoa(limit), + Priority: 1, + Direction: apiv1.QoSDirectionIngress, + }, + apiv1.QoSPolicyBandwidthLimitRule{ + Name: "eip-egress", + RateMax: strconv.Itoa(limit), + BurstMax: strconv.Itoa(limit), + Priority: 1, + Direction: apiv1.QoSDirectionEgress, + }, + } +} + +func getSpecialQoSRule(limit int, ip string) apiv1.QoSPolicyBandwidthLimitRules { + return apiv1.QoSPolicyBandwidthLimitRules{ + apiv1.QoSPolicyBandwidthLimitRule{ + Name: "net1-extip-ingress", + Interface: "net1", + RateMax: strconv.Itoa(limit), + BurstMax: strconv.Itoa(limit), + Priority: 2, + Direction: apiv1.QoSDirectionIngress, + MatchType: apiv1.QoSMatchTypeIP, + MatchValue: "src " + ip + "/32", + }, + apiv1.QoSPolicyBandwidthLimitRule{ + Name: "net1-extip-egress", + Interface: "net1", + RateMax: strconv.Itoa(limit), + BurstMax: strconv.Itoa(limit), + Priority: 2, + Direction: apiv1.QoSDirectionEgress, + MatchType: apiv1.QoSMatchTypeIP, + MatchValue: "dst " + ip + "/32", + }, + } +} + +// defaultQoSCases test default qos policy= +func defaultQoSCases(f *framework.Framework, + vpcNatGwClient *framework.VpcNatGatewayClient, + podClient *framework.PodClient, + qosPolicyClient *framework.QoSPolicyClient, + vpc1Pod *corev1.Pod, + vpc2Pod *corev1.Pod, + vpc1EIP *apiv1.IptablesEIP, + vpc2EIP *apiv1.IptablesEIP, + natgwName string, +) { + ginkgo.GinkgoHelper() + + // create nic qos policy + qosPolicyName := "default-nic-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + qosPolicyName) + rules := getNicDefaultQoSPolicy(defaultNicLimit) + + qosPolicy := framework.MakeQoSPolicy(qosPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) + _ = qosPolicyClient.CreateSync(qosPolicy) + + ginkgo.By("Patch natgw " + natgwName + " with qos policy " + qosPolicyName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) + + ginkgo.By("Delete natgw pod " + natgwName + "-0") + natGwPodName := util.GenNatGwPodName(natgwName) + podClient.DeleteSync(natGwPodName) + + ginkgo.By("Wait for natgw " + natgwName + "qos rebuild") + time.Sleep(5 * time.Second) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) + + ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + natgwName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") + + ginkgo.By("Deleting qos policy " + qosPolicyName) + qosPolicyClient.DeleteSync(qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) +} + +// eipQoSCases test default qos policy +func eipQoSCases(f *framework.Framework, + eipClient *framework.IptablesEIPClient, + podClient *framework.PodClient, + qosPolicyClient *framework.QoSPolicyClient, + vpc1Pod *corev1.Pod, + vpc2Pod *corev1.Pod, + vpc1EIP *apiv1.IptablesEIP, + vpc2EIP *apiv1.IptablesEIP, + eipName string, + natgwName string, +) { + ginkgo.GinkgoHelper() + + // create eip qos policy + qosPolicyName := "eip-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + qosPolicyName) + rules := getEIPQoSRule(eipLimit) + + qosPolicy := framework.MakeQoSPolicy(qosPolicyName, false, apiv1.QoSBindingTypeEIP, rules) + qosPolicy = qosPolicyClient.CreateSync(qosPolicy) + + ginkgo.By("Patch eip " + eipName + " with qos policy " + qosPolicyName) + _ = eipClient.PatchQoSPolicySync(eipName, qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(eipLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) + + ginkgo.By("Update qos policy " + qosPolicyName + " with new rate limit") + + rules = getEIPQoSRule(updatedEIPLimit) + modifiedqosPolicy := qosPolicy.DeepCopy() + modifiedqosPolicy.Spec.BandwidthLimitRules = rules + qosPolicyClient.Patch(qosPolicy, modifiedqosPolicy) + qosPolicyClient.WaitToQoSReady(qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is changed to " + strconv.Itoa(updatedEIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, updatedEIPLimit, true) + + ginkgo.By("Delete natgw pod " + natgwName + "-0") + natGwPodName := util.GenNatGwPodName(natgwName) + podClient.DeleteSync(natGwPodName) + + ginkgo.By("Wait for natgw " + natgwName + "qos rebuid") + time.Sleep(5 * time.Second) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(updatedEIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, updatedEIPLimit, true) + + newQoSPolicyName := "new-eip-qos-policy-" + framework.RandomSuffix() + newRules := getEIPQoSRule(newEIPLimit) + newQoSPolicy := framework.MakeQoSPolicy(newQoSPolicyName, false, apiv1.QoSBindingTypeEIP, newRules) + _ = qosPolicyClient.CreateSync(newQoSPolicy) + + ginkgo.By("Change qos policy of eip " + eipName + " to " + newQoSPolicyName) + _ = eipClient.PatchQoSPolicySync(eipName, newQoSPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(newEIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, newEIPLimit, true) + + ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + eipName) + _ = eipClient.PatchQoSPolicySync(eipName, "") + + ginkgo.By("Deleting qos policy " + qosPolicyName) + qosPolicyClient.DeleteSync(qosPolicyName) + + ginkgo.By("Deleting qos policy " + newQoSPolicyName) + qosPolicyClient.DeleteSync(newQoSPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + strconv.Itoa(newEIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, newEIPLimit, false) +} + +// specifyingIPQoSCases test default qos policy +func specifyingIPQoSCases(f *framework.Framework, + vpcNatGwClient *framework.VpcNatGatewayClient, + qosPolicyClient *framework.QoSPolicyClient, + vpc1Pod *corev1.Pod, + vpc2Pod *corev1.Pod, + vpc1EIP *apiv1.IptablesEIP, + vpc2EIP *apiv1.IptablesEIP, + natgwName string, +) { + ginkgo.GinkgoHelper() + + // create nic qos policy + qosPolicyName := "specifying-ip-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + qosPolicyName) + + rules := getSpecialQoSRule(specificIPLimit, vpc2EIP.Status.IP) + + qosPolicy := framework.MakeQoSPolicy(qosPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) + _ = qosPolicyClient.CreateSync(qosPolicy) + + ginkgo.By("Patch natgw " + natgwName + " with qos policy " + qosPolicyName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(specificIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, true) + + ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + natgwName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") + + ginkgo.By("Deleting qos policy " + qosPolicyName) + qosPolicyClient.DeleteSync(qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + strconv.Itoa(specificIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, false) +} + +// priorityQoSCases test qos match priority +func priorityQoSCases(f *framework.Framework, + vpcNatGwClient *framework.VpcNatGatewayClient, + eipClient *framework.IptablesEIPClient, + qosPolicyClient *framework.QoSPolicyClient, + vpc1Pod *corev1.Pod, + vpc2Pod *corev1.Pod, + vpc1EIP *apiv1.IptablesEIP, + vpc2EIP *apiv1.IptablesEIP, + natgwName string, + eipName string, +) { + ginkgo.GinkgoHelper() + + // create nic qos policy + natGwQoSPolicyName := "priority-nic-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + natGwQoSPolicyName) + // default qos policy + special qos policy + natgwRules := getNicDefaultQoSPolicy(defaultNicLimit) + natgwRules = append(natgwRules, getSpecialQoSRule(specificIPLimit, vpc2EIP.Status.IP)...) + + natgwQoSPolicy := framework.MakeQoSPolicy(natGwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, natgwRules) + _ = qosPolicyClient.CreateSync(natgwQoSPolicy) + + ginkgo.By("Patch natgw " + natgwName + " with qos policy " + natGwQoSPolicyName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, natGwQoSPolicyName) + + eipQoSPolicyName := "eip-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + eipQoSPolicyName) + eipRules := getEIPQoSRule(eipLimit) + + eipQoSPolicy := framework.MakeQoSPolicy(eipQoSPolicyName, false, apiv1.QoSBindingTypeEIP, eipRules) + _ = qosPolicyClient.CreateSync(eipQoSPolicy) + + ginkgo.By("Patch eip " + eipName + " with qos policy " + eipQoSPolicyName) + _ = eipClient.PatchQoSPolicySync(eipName, eipQoSPolicyName) + + // match qos of priority 1 + ginkgo.By("Check qos to match priority 1 is limited to " + strconv.Itoa(eipLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) + + ginkgo.By("Remove qos policy " + eipQoSPolicyName + " from natgw " + eipName) + _ = eipClient.PatchQoSPolicySync(eipName, "") + + ginkgo.By("Deleting qos policy " + eipQoSPolicyName) + qosPolicyClient.DeleteSync(eipQoSPolicyName) + + // match qos of priority 2 + ginkgo.By("Check qos to match priority 2 is limited to " + strconv.Itoa(specificIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, true) + + // change qos policy of natgw + newNatGwQoSPolicyName := "new-priority-nic-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + newNatGwQoSPolicyName) + newNatgwRules := getNicDefaultQoSPolicy(defaultNicLimit) + + newNatgwQoSPolicy := framework.MakeQoSPolicy(newNatGwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, newNatgwRules) + _ = qosPolicyClient.CreateSync(newNatgwQoSPolicy) + + ginkgo.By("Change qos policy of natgw " + natgwName + " to " + newNatGwQoSPolicyName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, newNatGwQoSPolicyName) + + // match qos of priority 3 + ginkgo.By("Check qos to match priority 3 is limited to " + strconv.Itoa(specificIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) + + ginkgo.By("Remove qos policy " + natGwQoSPolicyName + " from natgw " + natgwName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") + + ginkgo.By("Deleting qos policy " + natGwQoSPolicyName) + qosPolicyClient.DeleteSync(natGwQoSPolicyName) + + ginkgo.By("Deleting qos policy " + newNatGwQoSPolicyName) + qosPolicyClient.DeleteSync(newNatGwQoSPolicyName) + + ginkgo.By("Check qos " + natGwQoSPolicyName + " is not limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) +} + +func createNatGwAndSetQosCases(f *framework.Framework, + vpcNatGwClient *framework.VpcNatGatewayClient, + ipClient *framework.IPClient, + eipClient *framework.IptablesEIPClient, + fipClient *framework.IptablesFIPClient, + subnetClient *framework.SubnetClient, + qosPolicyClient *framework.QoSPolicyClient, + vpc1Pod *corev1.Pod, + vpc2Pod *corev1.Pod, + vpc2EIP *apiv1.IptablesEIP, + natgwName string, + eipName string, + fipName string, + vpcName string, + overlaySubnetName string, + lanIP string, + attachDefName string, +) { + ginkgo.GinkgoHelper() + + // delete fip + ginkgo.By("Deleting fip " + fipName) + fipClient.DeleteSync(fipName) + + ginkgo.By("Deleting eip " + eipName) + eipClient.DeleteSync(eipName) + + // the only pod for vpc nat gateway + vpcNatGw1PodName := util.GenNatGwPodName(natgwName) + + // delete vpc nat gw statefulset remaining ip for eth0 and net2 + ginkgo.By("Deleting custom vpc nat gw " + natgwName) + vpcNatGwClient.DeleteSync(natgwName) + + overlaySubnet1 := subnetClient.Get(overlaySubnetName) + macvlanSubnet := subnetClient.Get(attachDefName) + eth0IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, overlaySubnet1.Spec.Provider) + net1IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) + ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) + ipClient.DeleteSync(eth0IpName) + ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) + ipClient.DeleteSync(net1IpName) + + natgwQoSPolicyName := "default-nic-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + natgwQoSPolicyName) + rules := getNicDefaultQoSPolicy(defaultNicLimit) + + qosPolicy := framework.MakeQoSPolicy(natgwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) + _ = qosPolicyClient.CreateSync(qosPolicy) + + ginkgo.By("Creating custom vpc nat gw") + vpcNatGw := framework.MakeVpcNatGateway(natgwName, vpcName, overlaySubnetName, lanIP, attachDefName, natgwQoSPolicyName) + _ = vpcNatGwClient.CreateSync(vpcNatGw, f.ClientSet) + + eipQoSPolicyName := "eip-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + eipQoSPolicyName) + rules = getEIPQoSRule(eipLimit) + + eipQoSPolicy := framework.MakeQoSPolicy(eipQoSPolicyName, false, apiv1.QoSBindingTypeEIP, rules) + _ = qosPolicyClient.CreateSync(eipQoSPolicy) + + ginkgo.By("Creating eip " + eipName) + vpc1EIP := framework.MakeIptablesEIP(eipName, "", "", "", natgwName, attachDefName, eipQoSPolicyName) + _ = eipClient.CreateSync(vpc1EIP) + vpc1EIP = waitForIptablesEIPReady(eipClient, eipName, 60*time.Second) + + ginkgo.By("Creating fip " + fipName) + fip := framework.MakeIptablesFIPRule(fipName, eipName, vpc1Pod.Status.PodIP) + _ = fipClient.CreateSync(fip) + + ginkgo.By("Check qos " + eipQoSPolicyName + " is limited to " + strconv.Itoa(eipLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) + + ginkgo.By("Remove qos policy " + eipQoSPolicyName + " from natgw " + natgwName) + _ = eipClient.PatchQoSPolicySync(eipName, "") + + ginkgo.By("Check qos " + natgwQoSPolicyName + " is limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) + + ginkgo.By("Remove qos policy " + natgwQoSPolicyName + " from natgw " + natgwName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") + + ginkgo.By("Check qos " + natgwQoSPolicyName + " is not limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) + + ginkgo.By("Deleting qos policy " + natgwQoSPolicyName) + qosPolicyClient.DeleteSync(natgwQoSPolicyName) + + ginkgo.By("Deleting qos policy " + eipQoSPolicyName) + qosPolicyClient.DeleteSync(eipQoSPolicyName) +} + +func validRateLimit(text string, limit int) bool { + maxValue := float64(limit) * 1024 * 1024 * 1.2 + minValue := float64(limit) * 1024 * 1024 * 0.8 + lines := strings.SplitSeq(text, "\n") + for line := range lines { + if line == "" { + continue + } + fields := strings.Split(line, ",") + number, err := strconv.Atoi(fields[len(fields)-1]) + if err != nil { + continue + } + if v := float64(number); v >= minValue && v <= maxValue { + return true + } + } + return false +} + +var _ = framework.Describe("[group:qos-policy]", func() { + f := framework.NewDefaultFramework("qos-policy") + + var skip bool + var cs clientset.Interface + var attachNetClient *framework.NetworkAttachmentDefinitionClient + var clusterName string + var vpcClient *framework.VpcClient + var vpcNatGwClient *framework.VpcNatGatewayClient + var subnetClient *framework.SubnetClient + var podClient *framework.PodClient + var ipClient *framework.IPClient + var iptablesEIPClient *framework.IptablesEIPClient + var iptablesFIPClient *framework.IptablesFIPClient + var qosPolicyClient *framework.QoSPolicyClient + + var net1NicName string + var dockerExtNetName string + + // docker network + var dockerExtNetNetwork *dockernetwork.Inspect + + var vpcQosParams *qosParams + var vpc1Pod *corev1.Pod + var vpc2Pod *corev1.Pod + var vpc1EIP *apiv1.IptablesEIP + var vpc2EIP *apiv1.IptablesEIP + var vpc1FIP *apiv1.IptablesFIPRule + var vpc2FIP *apiv1.IptablesFIPRule + + var lanIP string + var overlaySubnetV4Cidr string + var overlaySubnetV4Gw string + var eth0Exist, net1Exist bool + var annotations1 map[string]string + var annotations2 map[string]string + var iperfServerCmd []string + + ginkgo.BeforeEach(func() { + randomSuffix := framework.RandomSuffix() + vpcQosParams = &qosParams{ + vpc1Name: "qos-vpc1-" + randomSuffix, + vpc2Name: "qos-vpc2-" + randomSuffix, + vpc1SubnetName: "qos-vpc1-subnet-" + randomSuffix, + vpc2SubnetName: "qos-vpc2-subnet-" + randomSuffix, + vpcNat1GwName: "qos-gw1-" + randomSuffix, + vpcNat2GwName: "qos-gw2-" + randomSuffix, + vpc1EIPName: "qos-vpc1-eip-" + randomSuffix, + vpc2EIPName: "qos-vpc2-eip-" + randomSuffix, + vpc1FIPName: "qos-vpc1-fip-" + randomSuffix, + vpc2FIPName: "qos-vpc2-fip-" + randomSuffix, + vpc1PodName: "qos-vpc1-pod-" + randomSuffix, + vpc2PodName: "qos-vpc2-pod-" + randomSuffix, + attachDefName: "qos-ovn-vpc-external-network-" + randomSuffix, + } + vpcQosParams.subnetProvider = fmt.Sprintf("%s.%s", vpcQosParams.attachDefName, framework.KubeOvnNamespace) + + dockerExtNetName = "kube-ovn-qos-" + randomSuffix + + cs = f.ClientSet + podClient = f.PodClient() + attachNetClient = f.NetworkAttachmentDefinitionClientNS(framework.KubeOvnNamespace) + subnetClient = f.SubnetClient() + vpcClient = f.VpcClient() + vpcNatGwClient = f.VpcNatGatewayClient() + iptablesEIPClient = f.IptablesEIPClient() + ipClient = f.IPClient() + iptablesFIPClient = f.IptablesFIPClient() + qosPolicyClient = f.QoSPolicyClient() + + if skip { + ginkgo.Skip("underlay spec only runs on kind clusters") + } + + if clusterName == "" { + ginkgo.By("Getting k8s nodes") + k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) + framework.ExpectNoError(err) + + cluster, ok := kind.IsKindProvided(k8sNodes.Items[0].Spec.ProviderID) + if !ok { + skip = true + ginkgo.Skip("underlay spec only runs on kind clusters") + } + clusterName = cluster + } + + ginkgo.By("Ensuring docker network " + dockerExtNetName + " exists") + network, err := docker.NetworkCreate(dockerExtNetName, true, true) + framework.ExpectNoError(err, "creating docker network "+dockerExtNetName) + dockerExtNetNetwork = network + + ginkgo.By("Getting kind nodes") + nodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in kind cluster") + framework.ExpectNotEmpty(nodes) + + ginkgo.By("Connecting nodes to the docker network") + err = kind.NetworkConnect(dockerExtNetNetwork.ID, nodes) + framework.ExpectNoError(err, "connecting nodes to network "+dockerExtNetName) + + ginkgo.By("Getting node links that belong to the docker network") + nodes, err = kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in kind cluster") + + ginkgo.By("Validating node links") + network1, err := docker.NetworkInspect(dockerExtNetName) + framework.ExpectNoError(err) + for _, node := range nodes { + links, err := node.ListLinks() + framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) + net1Mac := network1.Containers[node.ID].MacAddress + for _, link := range links { + ginkgo.By("exist node nic " + link.IfName) + if link.IfName == "eth0" { + eth0Exist = true + } + if link.Address == net1Mac.String() { + net1NicName = link.IfName + net1Exist = true + } + } + framework.ExpectTrue(eth0Exist) + framework.ExpectTrue(net1Exist) + } + setupNetworkAttachmentDefinition( + f, dockerExtNetNetwork, attachNetClient, + subnetClient, vpcQosParams.attachDefName, net1NicName, vpcQosParams.subnetProvider, dockerExtNetName) + }) + + ginkgo.AfterEach(func() { + ginkgo.By("Deleting macvlan underlay subnet " + vpcQosParams.attachDefName) + subnetClient.DeleteSync(vpcQosParams.attachDefName) + + // delete net1 attachment definition + ginkgo.By("Deleting nad " + vpcQosParams.attachDefName) + attachNetClient.Delete(vpcQosParams.attachDefName) + + ginkgo.By("Getting nodes") + nodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in cluster") + + if dockerExtNetNetwork != nil { + ginkgo.By("Disconnecting nodes from the docker network") + err = kind.NetworkDisconnect(dockerExtNetNetwork.ID, nodes) + framework.ExpectNoError(err, "disconnecting nodes from network "+dockerExtNetName) + ginkgo.By("Deleting docker network " + dockerExtNetName + " exists") + err := docker.NetworkRemove(dockerExtNetNetwork.ID) + framework.ExpectNoError(err, "deleting docker network "+dockerExtNetName) + } + }) + + _ = framework.Describe("vpc qos", func() { + ginkgo.BeforeEach(func() { + iperfServerCmd = []string{"iperf", "-s", "-i", "1", "-p", iperf2Port} + overlaySubnetV4Cidr = "10.0.0.0/24" + overlaySubnetV4Gw = "10.0.0.1" + lanIP = "10.0.0.254" + natgwQoS := "" + setupVpcNatGwTestEnvironment( + f, dockerExtNetNetwork, attachNetClient, + subnetClient, vpcClient, vpcNatGwClient, + vpcQosParams.vpc1Name, vpcQosParams.vpc1SubnetName, vpcQosParams.vpcNat1GwName, + natgwQoS, overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIP, + dockerExtNetName, vpcQosParams.attachDefName, net1NicName, + vpcQosParams.subnetProvider, + true, + ) + annotations1 = map[string]string{ + util.LogicalSwitchAnnotation: vpcQosParams.vpc1SubnetName, + } + ginkgo.By("Creating pod " + vpcQosParams.vpc1PodName) + vpc1Pod = framework.MakePod(f.Namespace.Name, vpcQosParams.vpc1PodName, nil, annotations1, framework.AgnhostImage, iperfServerCmd, nil) + vpc1Pod = podClient.CreateSync(vpc1Pod) + + ginkgo.By("Creating eip " + vpcQosParams.vpc1EIPName) + vpc1EIP = framework.MakeIptablesEIP(vpcQosParams.vpc1EIPName, "", "", "", vpcQosParams.vpcNat1GwName, vpcQosParams.attachDefName, "") + _ = iptablesEIPClient.CreateSync(vpc1EIP) + vpc1EIP = waitForIptablesEIPReady(iptablesEIPClient, vpcQosParams.vpc1EIPName, 60*time.Second) + + ginkgo.By("Creating fip " + vpcQosParams.vpc1FIPName) + vpc1FIP = framework.MakeIptablesFIPRule(vpcQosParams.vpc1FIPName, vpcQosParams.vpc1EIPName, vpc1Pod.Status.PodIP) + _ = iptablesFIPClient.CreateSync(vpc1FIP) + + setupVpcNatGwTestEnvironment( + f, dockerExtNetNetwork, attachNetClient, + subnetClient, vpcClient, vpcNatGwClient, + vpcQosParams.vpc2Name, vpcQosParams.vpc2SubnetName, vpcQosParams.vpcNat2GwName, + natgwQoS, overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIP, + dockerExtNetName, vpcQosParams.attachDefName, net1NicName, + vpcQosParams.subnetProvider, + true, + ) + + annotations2 = map[string]string{ + util.LogicalSwitchAnnotation: vpcQosParams.vpc2SubnetName, + } + + ginkgo.By("Creating pod " + vpcQosParams.vpc2PodName) + vpc2Pod = framework.MakePod(f.Namespace.Name, vpcQosParams.vpc2PodName, nil, annotations2, framework.AgnhostImage, iperfServerCmd, nil) + vpc2Pod = podClient.CreateSync(vpc2Pod) + + ginkgo.By("Creating eip " + vpcQosParams.vpc2EIPName) + vpc2EIP = framework.MakeIptablesEIP(vpcQosParams.vpc2EIPName, "", "", "", vpcQosParams.vpcNat2GwName, vpcQosParams.attachDefName, "") + _ = iptablesEIPClient.CreateSync(vpc2EIP) + vpc2EIP = waitForIptablesEIPReady(iptablesEIPClient, vpcQosParams.vpc2EIPName, 60*time.Second) + + ginkgo.By("Creating fip " + vpcQosParams.vpc2FIPName) + vpc2FIP = framework.MakeIptablesFIPRule(vpcQosParams.vpc2FIPName, vpcQosParams.vpc2EIPName, vpc2Pod.Status.PodIP) + _ = iptablesFIPClient.CreateSync(vpc2FIP) + }) + ginkgo.AfterEach(func() { + ginkgo.By("Deleting fip " + vpcQosParams.vpc1FIPName) + iptablesFIPClient.DeleteSync(vpcQosParams.vpc1FIPName) + + ginkgo.By("Deleting fip " + vpcQosParams.vpc2FIPName) + iptablesFIPClient.DeleteSync(vpcQosParams.vpc2FIPName) + + ginkgo.By("Deleting eip " + vpcQosParams.vpc1EIPName) + iptablesEIPClient.DeleteSync(vpcQosParams.vpc1EIPName) + + ginkgo.By("Deleting eip " + vpcQosParams.vpc2EIPName) + iptablesEIPClient.DeleteSync(vpcQosParams.vpc2EIPName) + + ginkgo.By("Deleting pod " + vpcQosParams.vpc1PodName) + podClient.DeleteSync(vpcQosParams.vpc1PodName) + + ginkgo.By("Deleting pod " + vpcQosParams.vpc2PodName) + podClient.DeleteSync(vpcQosParams.vpc2PodName) + + ginkgo.By("Deleting custom vpc nat gw " + vpcQosParams.vpcNat1GwName) + vpcNatGwClient.DeleteSync(vpcQosParams.vpcNat1GwName) + + ginkgo.By("Deleting custom vpc nat gw " + vpcQosParams.vpcNat2GwName) + vpcNatGwClient.DeleteSync(vpcQosParams.vpcNat2GwName) + + // the only pod for vpc nat gateway + vpcNatGw1PodName := util.GenNatGwPodName(vpcQosParams.vpcNat1GwName) + + // delete vpc nat gw statefulset remaining ip for eth0 and net2 + overlaySubnet1 := subnetClient.Get(vpcQosParams.vpc1SubnetName) + macvlanSubnet := subnetClient.Get(vpcQosParams.attachDefName) + eth0IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, overlaySubnet1.Spec.Provider) + net1IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) + ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) + ipClient.DeleteSync(eth0IpName) + ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) + ipClient.DeleteSync(net1IpName) + ginkgo.By("Deleting overlay subnet " + vpcQosParams.vpc1SubnetName) + subnetClient.DeleteSync(vpcQosParams.vpc1SubnetName) + + ginkgo.By("Getting overlay subnet " + vpcQosParams.vpc2SubnetName) + overlaySubnet2 := subnetClient.Get(vpcQosParams.vpc2SubnetName) + + vpcNatGw2PodName := util.GenNatGwPodName(vpcQosParams.vpcNat2GwName) + eth0IpName = ovs.PodNameToPortName(vpcNatGw2PodName, framework.KubeOvnNamespace, overlaySubnet2.Spec.Provider) + net1IpName = ovs.PodNameToPortName(vpcNatGw2PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) + ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) + ipClient.DeleteSync(eth0IpName) + ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) + ipClient.DeleteSync(net1IpName) + ginkgo.By("Deleting overlay subnet " + vpcQosParams.vpc2SubnetName) + subnetClient.DeleteSync(vpcQosParams.vpc2SubnetName) + + ginkgo.By("Deleting custom vpc " + vpcQosParams.vpc1Name) + vpcClient.DeleteSync(vpcQosParams.vpc1Name) + + ginkgo.By("Deleting custom vpc " + vpcQosParams.vpc2Name) + vpcClient.DeleteSync(vpcQosParams.vpc2Name) + }) + framework.ConformanceIt("default nic qos", func() { + // case 1: set qos policy for natgw + // case 2: rebuild qos when natgw pod restart + defaultQoSCases(f, vpcNatGwClient, podClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpcNat1GwName) + }) + framework.ConformanceIt("eip qos", func() { + // case 1: set qos policy for eip + // case 2: update qos policy for eip + // case 3: change qos policy of eip + // case 4: rebuild qos when natgw pod restart + eipQoSCases(f, iptablesEIPClient, podClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpc1EIPName, vpcQosParams.vpcNat1GwName) + }) + framework.ConformanceIt("specifying ip qos", func() { + // case 1: set specific ip qos policy for natgw + specifyingIPQoSCases(f, vpcNatGwClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpcNat1GwName) + }) + framework.ConformanceIt("qos priority matching", func() { + // case 1: test qos match priority + // case 2: change qos policy of natgw + priorityQoSCases(f, vpcNatGwClient, iptablesEIPClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpcNat1GwName, vpcQosParams.vpc1EIPName) + }) + framework.ConformanceIt("create resource with qos policy", func() { + // case 1: test qos when create natgw with qos policy + // case 2: test qos when create eip with qos policy + createNatGwAndSetQosCases(f, + vpcNatGwClient, ipClient, iptablesEIPClient, iptablesFIPClient, + subnetClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc2EIP, vpcQosParams.vpcNat1GwName, + vpcQosParams.vpc1EIPName, vpcQosParams.vpc1FIPName, vpcQosParams.vpc1Name, + vpcQosParams.vpc1SubnetName, lanIP, vpcQosParams.attachDefName) + }) + }) +}) + +func init() { + klog.SetOutput(ginkgo.GinkgoWriter) + + // Register flags. + config.CopyFlags(config.Flags, flag.CommandLine) + k8sframework.RegisterCommonFlags(flag.CommandLine) + k8sframework.RegisterClusterFlags(flag.CommandLine) +} + +func TestE2E(t *testing.T) { + k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) + e2e.RunE2ETests(t) +} diff --git a/test/e2e/iptables-vpc-nat-gw/e2e_test.go b/test/e2e/iptables-vpc-nat-gw/e2e_test.go index 33c2f336613..2668da96fef 100644 --- a/test/e2e/iptables-vpc-nat-gw/e2e_test.go +++ b/test/e2e/iptables-vpc-nat-gw/e2e_test.go @@ -2,16 +2,14 @@ package ovn_eip import ( "context" - "errors" "flag" "fmt" - "strconv" "strings" "testing" "time" dockernetwork "github.com/moby/moby/api/types/network" - corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" @@ -39,36 +37,6 @@ const ( externalSubnetProvider = "ovn-vpc-external-network.kube-system" ) -const ( - iperf2Port = "20288" - skipIperf = false -) - -const ( - eipLimit = iota*5 + 10 - updatedEIPLimit - newEIPLimit - specificIPLimit - defaultNicLimit -) - -type qosParams struct { - vpc1Name string - vpc2Name string - vpc1SubnetName string - vpc2SubnetName string - vpcNat1GwName string - vpcNat2GwName string - vpc1EIPName string - vpc2EIPName string - vpc1FIPName string - vpc2FIPName string - vpc1PodName string - vpc2PodName string - attachDefName string - subnetProvider string -} - func setupNetworkAttachmentDefinition( f *framework.Framework, dockerExtNetNetwork *dockernetwork.Inspect, @@ -84,7 +52,7 @@ func setupNetworkAttachmentDefinition( ginkgo.By("Getting docker network " + dockerExtNetName) network, err := docker.NetworkInspect(dockerExtNetName) framework.ExpectNoError(err, "getting docker network "+dockerExtNetName) - ginkgo.By("Getting network attachment definition " + externalNetworkName) + ginkgo.By("Getting or creating network attachment definition " + externalNetworkName) attachConf := fmt.Sprintf(`{ "cniVersion": "0.3.0", "type": "macvlan", @@ -96,9 +64,16 @@ func setupNetworkAttachmentDefinition( "provider": "%s" } }`, nicName, provider) - attachNet := framework.MakeNetworkAttachmentDefinition(externalNetworkName, framework.KubeOvnNamespace, attachConf) - attachNetClient.Create(attachNet) - nad := attachNetClient.Get(externalNetworkName) + + // Try to get existing NAD first + nad, err := attachNetClient.NetworkAttachmentDefinitionInterface.Get(context.TODO(), externalNetworkName, metav1.GetOptions{}) + if err != nil && k8serrors.IsNotFound(err) { + // NAD doesn't exist, create it + attachNet := framework.MakeNetworkAttachmentDefinition(externalNetworkName, framework.KubeOvnNamespace, attachConf) + nad = attachNetClient.Create(attachNet) + } else { + framework.ExpectNoError(err, "getting network attachment definition "+externalNetworkName) + } ginkgo.By("Got network attachment definition " + nad.Name) @@ -137,8 +112,16 @@ func setupNetworkAttachmentDefinition( excludeIPs = append(excludeIPs, container.IPv6Address.Addr().String()) } } - macvlanSubnet := framework.MakeSubnet(externalNetworkName, "", strings.Join(cidr, ","), strings.Join(gateway, ","), "", provider, excludeIPs, nil, nil) - _ = subnetClient.CreateSync(macvlanSubnet) + + // Check if subnet already exists + _, err = subnetClient.SubnetInterface.Get(context.TODO(), externalNetworkName, metav1.GetOptions{}) + if err != nil && k8serrors.IsNotFound(err) { + // Subnet doesn't exist, create it + macvlanSubnet := framework.MakeSubnet(externalNetworkName, "", strings.Join(cidr, ","), strings.Join(gateway, ","), "", provider, excludeIPs, nil, nil) + _ = subnetClient.CreateSync(macvlanSubnet) + } else { + framework.ExpectNoError(err, "getting subnet "+externalNetworkName) + } } func setupVpcNatGwTestEnvironment( @@ -207,6 +190,54 @@ func cleanVpcNatGwTestEnvironment( vpcClient.DeleteSync(vpcName) } +// waitForIptablesEIPReady waits for an IptablesEIP to be ready +func waitForIptablesEIPReady(eipClient *framework.IptablesEIPClient, eipName string, timeout time.Duration) *apiv1.IptablesEIP { + ginkgo.GinkgoHelper() + var eip *apiv1.IptablesEIP + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + eip = eipClient.Get(eipName) + if eip != nil && eip.Status.IP != "" && eip.Status.Ready { + framework.Logf("IptablesEIP %s is ready with IP: %s", eipName, eip.Status.IP) + return eip + } + time.Sleep(2 * time.Second) + } + framework.Failf("Timeout waiting for IptablesEIP %s to be ready", eipName) + return nil +} + +// verifySubnetStatusAfterEIPOperation verifies subnet status after EIP operation +func verifySubnetStatusAfterEIPOperation(subnetClient *framework.SubnetClient, subnetName string, + protocol, operation, shouldContainIP string, +) { + ginkgo.GinkgoHelper() + + subnet := subnetClient.Get(subnetName) + framework.Logf("Verifying subnet %s status after %s: Protocol=%s", subnetName, operation, protocol) + + switch protocol { + case apiv1.ProtocolIPv4: + framework.Logf("V4 Status: Available=%.0f, Using=%.0f", + subnet.Status.V4AvailableIPs, subnet.Status.V4UsingIPs) + if shouldContainIP != "" { + framework.ExpectTrue(strings.Contains(subnet.Status.V4UsingIPRange, shouldContainIP), + "IP %s should be in V4UsingIPRange after %s", shouldContainIP, operation) + } + case apiv1.ProtocolIPv6: + framework.Logf("V6 Status: Available=%.0f, Using=%.0f", + subnet.Status.V6AvailableIPs, subnet.Status.V6UsingIPs) + if shouldContainIP != "" { + framework.ExpectTrue(strings.Contains(subnet.Status.V6UsingIPRange, shouldContainIP), + "IP %s should be in V6UsingIPRange after %s", shouldContainIP, operation) + } + case apiv1.ProtocolDual: + framework.Logf("Dual Stack Status: V4Available=%.0f, V4Using=%.0f, V6Available=%.0f, V6Using=%.0f", + subnet.Status.V4AvailableIPs, subnet.Status.V4UsingIPs, + subnet.Status.V6AvailableIPs, subnet.Status.V6UsingIPs) + } +} + var _ = framework.SerialDescribe("[group:iptables-vpc-nat-gw]", func() { f := framework.NewDefaultFramework("iptables-vpc-nat-gw") @@ -240,37 +271,38 @@ var _ = framework.SerialDescribe("[group:iptables-vpc-nat-gw]", func() { var net2VpcName string var net2EipName string - vpcName = "vpc-" + framework.RandomSuffix() - vpcNatGwName = "gw-" + framework.RandomSuffix() - - fipVipName = "fip-vip-" + framework.RandomSuffix() - fipEipName = "fip-eip-" + framework.RandomSuffix() - fipName = "fip-" + framework.RandomSuffix() - - dnatVipName = "dnat-vip-" + framework.RandomSuffix() - dnatEipName = "dnat-eip-" + framework.RandomSuffix() - dnatName = "dnat-" + framework.RandomSuffix() - - // sharing case - sharedVipName = "shared-vip-" + framework.RandomSuffix() - sharedEipName = "shared-eip-" + framework.RandomSuffix() - sharedEipDnatName = "shared-eip-dnat-" + framework.RandomSuffix() - sharedEipSnatName = "shared-eip-snat-" + framework.RandomSuffix() - sharedEipFipShouldOkName = "shared-eip-fip-should-ok-" + framework.RandomSuffix() - sharedEipFipShouldFailName = "shared-eip-fip-should-fail-" + framework.RandomSuffix() - - snatEipName = "snat-eip-" + framework.RandomSuffix() - snatName = "snat-" + framework.RandomSuffix() - overlaySubnetName = "overlay-subnet-" + framework.RandomSuffix() - - net2AttachDefName = "net2-ovn-vpc-external-network-" + framework.RandomSuffix() - net2SubnetProvider = fmt.Sprintf("%s.%s", net2AttachDefName, framework.KubeOvnNamespace) - net2OverlaySubnetName = "net2-overlay-subnet-" + framework.RandomSuffix() - net2VpcNatGwName = "net2-gw-" + framework.RandomSuffix() - net2VpcName = "net2-vpc-" + framework.RandomSuffix() - net2EipName = "net2-eip-" + framework.RandomSuffix() - ginkgo.BeforeEach(func() { + randomSuffix := framework.RandomSuffix() + vpcName = "vpc-" + randomSuffix + vpcNatGwName = "gw-" + randomSuffix + + fipVipName = "fip-vip-" + randomSuffix + fipEipName = "fip-eip-" + randomSuffix + fipName = "fip-" + randomSuffix + + dnatVipName = "dnat-vip-" + randomSuffix + dnatEipName = "dnat-eip-" + randomSuffix + dnatName = "dnat-" + randomSuffix + + // sharing case + sharedVipName = "shared-vip-" + randomSuffix + sharedEipName = "shared-eip-" + randomSuffix + sharedEipDnatName = "shared-eip-dnat-" + randomSuffix + sharedEipSnatName = "shared-eip-snat-" + randomSuffix + sharedEipFipShouldOkName = "shared-eip-fip-should-ok-" + randomSuffix + sharedEipFipShouldFailName = "shared-eip-fip-should-fail-" + randomSuffix + + snatEipName = "snat-eip-" + randomSuffix + snatName = "snat-" + randomSuffix + overlaySubnetName = "overlay-subnet-" + randomSuffix + + net2AttachDefName = "net2-ovn-vpc-external-network-" + randomSuffix + net2SubnetProvider = fmt.Sprintf("%s.%s", net2AttachDefName, framework.KubeOvnNamespace) + net2OverlaySubnetName = "net2-overlay-subnet-" + randomSuffix + net2VpcNatGwName = "net2-gw-" + randomSuffix + net2VpcName = "net2-vpc-" + randomSuffix + net2EipName = "net2-eip-" + randomSuffix + cs = f.ClientSet attachNetClient = f.NetworkAttachmentDefinitionClientNS(framework.KubeOvnNamespace) subnetClient = f.SubnetClient() @@ -399,7 +431,7 @@ var _ = framework.SerialDescribe("[group:iptables-vpc-nat-gw]", func() { cm, err := f.ClientSet.CoreV1().ConfigMaps(framework.KubeOvnNamespace).Get(context.Background(), vpcNatConfigName, metav1.GetOptions{}) framework.ExpectNoError(err) oldImage := cm.Data["image"] - cm.Data["image"] = "docker.io/kubeovn/vpc-nat-gateway:v1.12.18" + cm.Data["image"] = "docker.io/kubeovn/vpc-nat-gateway:v1.14.19" cm, err = f.ClientSet.CoreV1().ConfigMaps(framework.KubeOvnNamespace).Update(context.Background(), cm, metav1.UpdateOptions{}) framework.ExpectNoError(err) time.Sleep(3 * time.Second) @@ -520,10 +552,10 @@ var _ = framework.SerialDescribe("[group:iptables-vpc-nat-gw]", func() { iptablesFIPClient.DeleteSync(sharedEipFipShouldOkName) ginkgo.By("Deleting share iptables fip " + sharedEipFipShouldFailName) iptablesFIPClient.DeleteSync(sharedEipFipShouldFailName) - ginkgo.By("Deleting share iptables dnat " + dnatName) - iptablesDnatRuleClient.DeleteSync(dnatName) - ginkgo.By("Deleting share iptables snat " + snatName) - iptablesSnatRuleClient.DeleteSync(snatName) + ginkgo.By("Deleting share iptables dnat " + sharedEipDnatName) + iptablesDnatRuleClient.DeleteSync(sharedEipDnatName) + ginkgo.By("Deleting share iptables snat " + sharedEipSnatName) + iptablesSnatRuleClient.DeleteSync(sharedEipSnatName) ginkgo.By("Deleting iptables fip " + fipName) iptablesFIPClient.DeleteSync(fipName) @@ -616,779 +648,451 @@ var _ = framework.SerialDescribe("[group:iptables-vpc-nat-gw]", func() { ginkgo.By("Deleting custom vpc " + net2VpcName) vpcClient.DeleteSync(net2VpcName) }) -}) -func iperf(f *framework.Framework, iperfClientPod *corev1.Pod, iperfServerEIP *apiv1.IptablesEIP) string { - ginkgo.GinkgoHelper() - - for i := range 20 { - command := fmt.Sprintf("iperf -e -p %s --reportstyle C -i 1 -c %s -t 10", iperf2Port, iperfServerEIP.Status.IP) - stdOutput, errOutput, err := framework.ExecShellInPod(context.Background(), f, iperfClientPod.Namespace, iperfClientPod.Name, command) - framework.Logf("output from exec on client pod %s (eip %s)\n", iperfClientPod.Name, iperfServerEIP.Name) - if stdOutput != "" && err == nil { - framework.Logf("output:\n%s", stdOutput) - return stdOutput - } - framework.Logf("exec %s failed err: %v, errOutput: %s, stdOutput: %s, retried %d times.", command, err, errOutput, stdOutput, i) - time.Sleep(6 * time.Second) - } - framework.ExpectNoError(errors.New("iperf failed")) - return "" -} + framework.ConformanceIt("should properly manage IptablesEIP lifecycle with finalizer and update subnet status", func() { + f.SkipVersionPriorTo(1, 13, "This feature was introduced in v1.13") -func checkQos(f *framework.Framework, - vpc1Pod, vpc2Pod *corev1.Pod, vpc1EIP, vpc2EIP *apiv1.IptablesEIP, - limit int, expect bool, -) { - ginkgo.GinkgoHelper() + overlaySubnetV4Cidr := "10.0.3.0/24" + overlaySubnetV4Gw := "10.0.3.1" + lanIP := "10.0.3.254" + natgwQoS := "" + setupVpcNatGwTestEnvironment( + f, dockerExtNet1Network, attachNetClient, + subnetClient, vpcClient, vpcNatGwClient, + vpcName, overlaySubnetName, vpcNatGwName, natgwQoS, + overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIP, + dockerExtNet1Name, networkAttachDefName, net1NicName, + externalSubnetProvider, + false, + ) - if !skipIperf { - if expect { - output := iperf(f, vpc1Pod, vpc2EIP) - framework.ExpectTrue(validRateLimit(output, limit)) - output = iperf(f, vpc2Pod, vpc1EIP) - framework.ExpectTrue(validRateLimit(output, limit)) - } else { - output := iperf(f, vpc1Pod, vpc2EIP) - framework.ExpectFalse(validRateLimit(output, limit)) - output = iperf(f, vpc2Pod, vpc1EIP) - framework.ExpectFalse(validRateLimit(output, limit)) + ginkgo.By("1. Get initial external subnet status") + externalSubnetName := util.GetExternalNetwork(networkAttachDefName) + initialSubnet := subnetClient.Get(externalSubnetName) + initialV4AvailableIPs := initialSubnet.Status.V4AvailableIPs + initialV4UsingIPs := initialSubnet.Status.V4UsingIPs + initialV6AvailableIPs := initialSubnet.Status.V6AvailableIPs + initialV6UsingIPs := initialSubnet.Status.V6UsingIPs + initialV4AvailableIPRange := initialSubnet.Status.V4AvailableIPRange + initialV4UsingIPRange := initialSubnet.Status.V4UsingIPRange + initialV6AvailableIPRange := initialSubnet.Status.V6AvailableIPRange + initialV6UsingIPRange := initialSubnet.Status.V6UsingIPRange + + ginkgo.By("2. Create IptablesEIP to trigger IP allocation") + eipName := "test-eip-finalizer-" + framework.RandomSuffix() + eip := framework.MakeIptablesEIP(eipName, "", "", "", vpcNatGwName, "", "") + _ = iptablesEIPClient.CreateSync(eip) + + ginkgo.By("3. Wait for IptablesEIP CR to be ready") + eipCR := waitForIptablesEIPReady(iptablesEIPClient, eipName, 60*time.Second) + framework.ExpectNotNil(eipCR, "IptablesEIP CR should be created and ready") + framework.ExpectNotEmpty(eipCR.Status.IP, "IptablesEIP should have IP assigned") + + ginkgo.By("4. Wait for IptablesEIP CR finalizer to be added") + for range 60 { + eipCR = iptablesEIPClient.Get(eipName) + if eipCR != nil && len(eipCR.Finalizers) > 0 { + break + } + time.Sleep(1 * time.Second) } - } -} - -func newVPCQoSParamsInit() *qosParams { - qosParams := &qosParams{ - vpc1Name: "qos-vpc1-" + framework.RandomSuffix(), - vpc2Name: "qos-vpc2-" + framework.RandomSuffix(), - vpc1SubnetName: "qos-vpc1-subnet-" + framework.RandomSuffix(), - vpc2SubnetName: "qos-vpc2-subnet-" + framework.RandomSuffix(), - vpcNat1GwName: "qos-vpc1-gw-" + framework.RandomSuffix(), - vpcNat2GwName: "qos-vpc2-gw-" + framework.RandomSuffix(), - vpc1EIPName: "qos-vpc1-eip-" + framework.RandomSuffix(), - vpc2EIPName: "qos-vpc2-eip-" + framework.RandomSuffix(), - vpc1FIPName: "qos-vpc1-fip-" + framework.RandomSuffix(), - vpc2FIPName: "qos-vpc2-fip-" + framework.RandomSuffix(), - vpc1PodName: "qos-vpc1-pod-" + framework.RandomSuffix(), - vpc2PodName: "qos-vpc2-pod-" + framework.RandomSuffix(), - attachDefName: "qos-ovn-vpc-external-network-" + framework.RandomSuffix(), - } - qosParams.subnetProvider = fmt.Sprintf("%s.%s", qosParams.attachDefName, framework.KubeOvnNamespace) - return qosParams -} - -func getNicDefaultQoSPolicy(limit int) apiv1.QoSPolicyBandwidthLimitRules { - return apiv1.QoSPolicyBandwidthLimitRules{ - apiv1.QoSPolicyBandwidthLimitRule{ - Name: "net1-ingress", - Interface: "net1", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 3, - Direction: apiv1.QoSDirectionIngress, - }, - apiv1.QoSPolicyBandwidthLimitRule{ - Name: "net1-egress", - Interface: "net1", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 3, - Direction: apiv1.QoSDirectionEgress, - }, - } -} - -func getEIPQoSRule(limit int) apiv1.QoSPolicyBandwidthLimitRules { - return apiv1.QoSPolicyBandwidthLimitRules{ - apiv1.QoSPolicyBandwidthLimitRule{ - Name: "eip-ingress", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 1, - Direction: apiv1.QoSDirectionIngress, - }, - apiv1.QoSPolicyBandwidthLimitRule{ - Name: "eip-egress", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 1, - Direction: apiv1.QoSDirectionEgress, - }, - } -} - -func getSpecialQoSRule(limit int, ip string) apiv1.QoSPolicyBandwidthLimitRules { - return apiv1.QoSPolicyBandwidthLimitRules{ - apiv1.QoSPolicyBandwidthLimitRule{ - Name: "net1-extip-ingress", - Interface: "net1", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 2, - Direction: apiv1.QoSDirectionIngress, - MatchType: apiv1.QoSMatchTypeIP, - MatchValue: "src " + ip + "/32", - }, - apiv1.QoSPolicyBandwidthLimitRule{ - Name: "net1-extip-egress", - Interface: "net1", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 2, - Direction: apiv1.QoSDirectionEgress, - MatchType: apiv1.QoSMatchTypeIP, - MatchValue: "dst " + ip + "/32", - }, - } -} - -// defaultQoSCases test default qos policy= -func defaultQoSCases(f *framework.Framework, - vpcNatGwClient *framework.VpcNatGatewayClient, - podClient *framework.PodClient, - qosPolicyClient *framework.QoSPolicyClient, - vpc1Pod *corev1.Pod, - vpc2Pod *corev1.Pod, - vpc1EIP *apiv1.IptablesEIP, - vpc2EIP *apiv1.IptablesEIP, - natgwName string, -) { - ginkgo.GinkgoHelper() - - // create nic qos policy - qosPolicyName := "default-nic-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + qosPolicyName) - rules := getNicDefaultQoSPolicy(defaultNicLimit) - - qosPolicy := framework.MakeQoSPolicy(qosPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) - _ = qosPolicyClient.CreateSync(qosPolicy) - - ginkgo.By("Patch natgw " + natgwName + " with qos policy " + qosPolicyName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) - - ginkgo.By("Delete natgw pod " + natgwName + "-0") - natGwPodName := util.GenNatGwPodName(natgwName) - podClient.DeleteSync(natGwPodName) - - ginkgo.By("Wait for natgw " + natgwName + "qos rebuild") - time.Sleep(5 * time.Second) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) - - ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + natgwName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") - - ginkgo.By("Deleting qos policy " + qosPolicyName) - qosPolicyClient.DeleteSync(qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) -} - -// eipQoSCases test default qos policy -func eipQoSCases(f *framework.Framework, - eipClient *framework.IptablesEIPClient, - podClient *framework.PodClient, - qosPolicyClient *framework.QoSPolicyClient, - vpc1Pod *corev1.Pod, - vpc2Pod *corev1.Pod, - vpc1EIP *apiv1.IptablesEIP, - vpc2EIP *apiv1.IptablesEIP, - eipName string, - natgwName string, -) { - ginkgo.GinkgoHelper() - - // create eip qos policy - qosPolicyName := "eip-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + qosPolicyName) - rules := getEIPQoSRule(eipLimit) - - qosPolicy := framework.MakeQoSPolicy(qosPolicyName, false, apiv1.QoSBindingTypeEIP, rules) - qosPolicy = qosPolicyClient.CreateSync(qosPolicy) - - ginkgo.By("Patch eip " + eipName + " with qos policy " + qosPolicyName) - _ = eipClient.PatchQoSPolicySync(eipName, qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(eipLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) - - ginkgo.By("Update qos policy " + qosPolicyName + " with new rate limit") - - rules = getEIPQoSRule(updatedEIPLimit) - modifiedqosPolicy := qosPolicy.DeepCopy() - modifiedqosPolicy.Spec.BandwidthLimitRules = rules - qosPolicyClient.Patch(qosPolicy, modifiedqosPolicy) - qosPolicyClient.WaitToQoSReady(qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is changed to " + strconv.Itoa(updatedEIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, updatedEIPLimit, true) - - ginkgo.By("Delete natgw pod " + natgwName + "-0") - natGwPodName := util.GenNatGwPodName(natgwName) - podClient.DeleteSync(natGwPodName) - - ginkgo.By("Wait for natgw " + natgwName + "qos rebuid") - time.Sleep(5 * time.Second) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(updatedEIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, updatedEIPLimit, true) - - newQoSPolicyName := "new-eip-qos-policy-" + framework.RandomSuffix() - newRules := getEIPQoSRule(newEIPLimit) - newQoSPolicy := framework.MakeQoSPolicy(newQoSPolicyName, false, apiv1.QoSBindingTypeEIP, newRules) - _ = qosPolicyClient.CreateSync(newQoSPolicy) - - ginkgo.By("Change qos policy of eip " + eipName + " to " + newQoSPolicyName) - _ = eipClient.PatchQoSPolicySync(eipName, newQoSPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(newEIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, newEIPLimit, true) - - ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + eipName) - _ = eipClient.PatchQoSPolicySync(eipName, "") - - ginkgo.By("Deleting qos policy " + qosPolicyName) - qosPolicyClient.DeleteSync(qosPolicyName) - - ginkgo.By("Deleting qos policy " + newQoSPolicyName) - qosPolicyClient.DeleteSync(newQoSPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + strconv.Itoa(newEIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, newEIPLimit, false) -} - -// specifyingIPQoSCases test default qos policy -func specifyingIPQoSCases(f *framework.Framework, - vpcNatGwClient *framework.VpcNatGatewayClient, - qosPolicyClient *framework.QoSPolicyClient, - vpc1Pod *corev1.Pod, - vpc2Pod *corev1.Pod, - vpc1EIP *apiv1.IptablesEIP, - vpc2EIP *apiv1.IptablesEIP, - natgwName string, -) { - ginkgo.GinkgoHelper() - - // create nic qos policy - qosPolicyName := "specifying-ip-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + qosPolicyName) - - rules := getSpecialQoSRule(specificIPLimit, vpc2EIP.Status.IP) - - qosPolicy := framework.MakeQoSPolicy(qosPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) - _ = qosPolicyClient.CreateSync(qosPolicy) - - ginkgo.By("Patch natgw " + natgwName + " with qos policy " + qosPolicyName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(specificIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, true) - - ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + natgwName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") - - ginkgo.By("Deleting qos policy " + qosPolicyName) - qosPolicyClient.DeleteSync(qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + strconv.Itoa(specificIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, false) -} - -// priorityQoSCases test qos match priority -func priorityQoSCases(f *framework.Framework, - vpcNatGwClient *framework.VpcNatGatewayClient, - eipClient *framework.IptablesEIPClient, - qosPolicyClient *framework.QoSPolicyClient, - vpc1Pod *corev1.Pod, - vpc2Pod *corev1.Pod, - vpc1EIP *apiv1.IptablesEIP, - vpc2EIP *apiv1.IptablesEIP, - natgwName string, - eipName string, -) { - ginkgo.GinkgoHelper() - - // create nic qos policy - natGwQoSPolicyName := "priority-nic-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + natGwQoSPolicyName) - // default qos policy + special qos policy - natgwRules := getNicDefaultQoSPolicy(defaultNicLimit) - natgwRules = append(natgwRules, getSpecialQoSRule(specificIPLimit, vpc2EIP.Status.IP)...) - - natgwQoSPolicy := framework.MakeQoSPolicy(natGwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, natgwRules) - _ = qosPolicyClient.CreateSync(natgwQoSPolicy) - - ginkgo.By("Patch natgw " + natgwName + " with qos policy " + natGwQoSPolicyName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, natGwQoSPolicyName) - - eipQoSPolicyName := "eip-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + eipQoSPolicyName) - eipRules := getEIPQoSRule(eipLimit) - - eipQoSPolicy := framework.MakeQoSPolicy(eipQoSPolicyName, false, apiv1.QoSBindingTypeEIP, eipRules) - _ = qosPolicyClient.CreateSync(eipQoSPolicy) - - ginkgo.By("Patch eip " + eipName + " with qos policy " + eipQoSPolicyName) - _ = eipClient.PatchQoSPolicySync(eipName, eipQoSPolicyName) - - // match qos of priority 1 - ginkgo.By("Check qos to match priority 1 is limited to " + strconv.Itoa(eipLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) - - ginkgo.By("Remove qos policy " + eipQoSPolicyName + " from natgw " + eipName) - _ = eipClient.PatchQoSPolicySync(eipName, "") - - ginkgo.By("Deleting qos policy " + eipQoSPolicyName) - qosPolicyClient.DeleteSync(eipQoSPolicyName) - - // match qos of priority 2 - ginkgo.By("Check qos to match priority 2 is limited to " + strconv.Itoa(specificIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, true) - - // change qos policy of natgw - newNatGwQoSPolicyName := "new-priority-nic-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + newNatGwQoSPolicyName) - newNatgwRules := getNicDefaultQoSPolicy(defaultNicLimit) - - newNatgwQoSPolicy := framework.MakeQoSPolicy(newNatGwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, newNatgwRules) - _ = qosPolicyClient.CreateSync(newNatgwQoSPolicy) - - ginkgo.By("Change qos policy of natgw " + natgwName + " to " + newNatGwQoSPolicyName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, newNatGwQoSPolicyName) - - // match qos of priority 3 - ginkgo.By("Check qos to match priority 3 is limited to " + strconv.Itoa(specificIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) - - ginkgo.By("Remove qos policy " + natGwQoSPolicyName + " from natgw " + natgwName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") - - ginkgo.By("Deleting qos policy " + natGwQoSPolicyName) - qosPolicyClient.DeleteSync(natGwQoSPolicyName) - - ginkgo.By("Deleting qos policy " + newNatGwQoSPolicyName) - qosPolicyClient.DeleteSync(newNatGwQoSPolicyName) - - ginkgo.By("Check qos " + natGwQoSPolicyName + " is not limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) -} - -func createNatGwAndSetQosCases(f *framework.Framework, - vpcNatGwClient *framework.VpcNatGatewayClient, - ipClient *framework.IPClient, - eipClient *framework.IptablesEIPClient, - fipClient *framework.IptablesFIPClient, - subnetClient *framework.SubnetClient, - qosPolicyClient *framework.QoSPolicyClient, - vpc1Pod *corev1.Pod, - vpc2Pod *corev1.Pod, - vpc2EIP *apiv1.IptablesEIP, - natgwName string, - eipName string, - fipName string, - vpcName string, - overlaySubnetName string, - lanIP string, - attachDefName string, -) { - ginkgo.GinkgoHelper() - - // delete fip - ginkgo.By("Deleting fip " + fipName) - fipClient.DeleteSync(fipName) - - ginkgo.By("Deleting eip " + eipName) - eipClient.DeleteSync(eipName) - - // the only pod for vpc nat gateway - vpcNatGw1PodName := util.GenNatGwPodName(natgwName) - - // delete vpc nat gw statefulset remaining ip for eth0 and net2 - ginkgo.By("Deleting custom vpc nat gw " + natgwName) - vpcNatGwClient.DeleteSync(natgwName) - - overlaySubnet1 := subnetClient.Get(overlaySubnetName) - macvlanSubnet := subnetClient.Get(attachDefName) - eth0IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, overlaySubnet1.Spec.Provider) - net1IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) - ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) - ipClient.DeleteSync(eth0IpName) - ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) - ipClient.DeleteSync(net1IpName) - - natgwQoSPolicyName := "default-nic-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + natgwQoSPolicyName) - rules := getNicDefaultQoSPolicy(defaultNicLimit) - - qosPolicy := framework.MakeQoSPolicy(natgwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) - _ = qosPolicyClient.CreateSync(qosPolicy) - - ginkgo.By("Creating custom vpc nat gw") - vpcNatGw := framework.MakeVpcNatGateway(natgwName, vpcName, overlaySubnetName, lanIP, attachDefName, natgwQoSPolicyName) - _ = vpcNatGwClient.CreateSync(vpcNatGw, f.ClientSet) - - eipQoSPolicyName := "eip-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + eipQoSPolicyName) - rules = getEIPQoSRule(eipLimit) - - eipQoSPolicy := framework.MakeQoSPolicy(eipQoSPolicyName, false, apiv1.QoSBindingTypeEIP, rules) - _ = qosPolicyClient.CreateSync(eipQoSPolicy) + framework.ExpectNotNil(eipCR, "IptablesEIP CR should exist") + framework.ExpectContainElement(eipCR.Finalizers, util.KubeOVNControllerFinalizer, + "IptablesEIP CR should have finalizer after creation") - ginkgo.By("Creating eip " + eipName) - vpc1EIP := framework.MakeIptablesEIP(eipName, "", "", "", natgwName, attachDefName, eipQoSPolicyName) - vpc1EIP = eipClient.CreateSync(vpc1EIP) + ginkgo.By("5. Wait for external subnet status to be updated after IptablesEIP creation") + time.Sleep(5 * time.Second) - ginkgo.By("Creating fip " + fipName) - fip := framework.MakeIptablesFIPRule(fipName, eipName, vpc1Pod.Status.PodIP) - _ = fipClient.CreateSync(fip) + ginkgo.By("6. Verify external subnet status after IptablesEIP CR creation") + afterCreateSubnet := subnetClient.Get(externalSubnetName) + verifySubnetStatusAfterEIPOperation(subnetClient, externalSubnetName, + afterCreateSubnet.Spec.Protocol, "IptablesEIP creation", eipCR.Status.IP) - ginkgo.By("Check qos " + eipQoSPolicyName + " is limited to " + strconv.Itoa(eipLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) - - ginkgo.By("Remove qos policy " + eipQoSPolicyName + " from natgw " + natgwName) - _ = eipClient.PatchQoSPolicySync(eipName, "") - - ginkgo.By("Check qos " + natgwQoSPolicyName + " is limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) - - ginkgo.By("Remove qos policy " + natgwQoSPolicyName + " from natgw " + natgwName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") - - ginkgo.By("Check qos " + natgwQoSPolicyName + " is not limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) - - ginkgo.By("Deleting qos policy " + natgwQoSPolicyName) - qosPolicyClient.DeleteSync(natgwQoSPolicyName) - - ginkgo.By("Deleting qos policy " + eipQoSPolicyName) - qosPolicyClient.DeleteSync(eipQoSPolicyName) -} - -func validRateLimit(text string, limit int) bool { - maxValue := float64(limit) * 1024 * 1024 * 1.2 - minValue := float64(limit) * 1024 * 1024 * 0.8 - lines := strings.SplitSeq(text, "\n") - for line := range lines { - if line == "" { - continue - } - fields := strings.Split(line, ",") - number, err := strconv.Atoi(fields[len(fields)-1]) - if err != nil { - continue - } - if v := float64(number); v >= minValue && v <= maxValue { - return true + // Verify IP count and range changes + switch afterCreateSubnet.Spec.Protocol { + case apiv1.ProtocolIPv4: + framework.ExpectEqual(initialV4AvailableIPs-1, afterCreateSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should decrease by 1 after IptablesEIP creation") + framework.ExpectEqual(initialV4UsingIPs+1, afterCreateSubnet.Status.V4UsingIPs, + "V4UsingIPs should increase by 1 after IptablesEIP creation") + framework.ExpectNotEqual(initialV4AvailableIPRange, afterCreateSubnet.Status.V4AvailableIPRange, + "V4AvailableIPRange should change after IptablesEIP creation") + framework.ExpectNotEqual(initialV4UsingIPRange, afterCreateSubnet.Status.V4UsingIPRange, + "V4UsingIPRange should change after IptablesEIP creation") + case apiv1.ProtocolIPv6: + framework.ExpectEqual(initialV6AvailableIPs-1, afterCreateSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should decrease by 1 after IptablesEIP creation") + framework.ExpectEqual(initialV6UsingIPs+1, afterCreateSubnet.Status.V6UsingIPs, + "V6UsingIPs should increase by 1 after IptablesEIP creation") + framework.ExpectNotEqual(initialV6AvailableIPRange, afterCreateSubnet.Status.V6AvailableIPRange, + "V6AvailableIPRange should change after IptablesEIP creation") + framework.ExpectNotEqual(initialV6UsingIPRange, afterCreateSubnet.Status.V6UsingIPRange, + "V6UsingIPRange should change after IptablesEIP creation") + default: + // Dual stack + framework.ExpectEqual(initialV4AvailableIPs-1, afterCreateSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should decrease by 1 after IptablesEIP creation") + framework.ExpectEqual(initialV4UsingIPs+1, afterCreateSubnet.Status.V4UsingIPs, + "V4UsingIPs should increase by 1 after IptablesEIP creation") + framework.ExpectEqual(initialV6AvailableIPs-1, afterCreateSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should decrease by 1 after IptablesEIP creation") + framework.ExpectEqual(initialV6UsingIPs+1, afterCreateSubnet.Status.V6UsingIPs, + "V6UsingIPs should increase by 1 after IptablesEIP creation") + framework.ExpectNotEqual(initialV4AvailableIPRange, afterCreateSubnet.Status.V4AvailableIPRange, + "V4AvailableIPRange should change after IptablesEIP creation") + framework.ExpectNotEqual(initialV4UsingIPRange, afterCreateSubnet.Status.V4UsingIPRange, + "V4UsingIPRange should change after IptablesEIP creation") + framework.ExpectNotEqual(initialV6AvailableIPRange, afterCreateSubnet.Status.V6AvailableIPRange, + "V6AvailableIPRange should change after IptablesEIP creation") + framework.ExpectNotEqual(initialV6UsingIPRange, afterCreateSubnet.Status.V6UsingIPRange, + "V6UsingIPRange should change after IptablesEIP creation") } - } - return false -} - -var _ = framework.Describe("[group:qos-policy]", func() { - f := framework.NewDefaultFramework("qos-policy") - var skip bool - var cs clientset.Interface - var attachNetClient *framework.NetworkAttachmentDefinitionClient - var clusterName string - var vpcClient *framework.VpcClient - var vpcNatGwClient *framework.VpcNatGatewayClient - var subnetClient *framework.SubnetClient - var podClient *framework.PodClient - var ipClient *framework.IPClient - var iptablesEIPClient *framework.IptablesEIPClient - var iptablesFIPClient *framework.IptablesFIPClient - var qosPolicyClient *framework.QoSPolicyClient + // Store the status after creation for later comparison + afterCreateV4AvailableIPs := afterCreateSubnet.Status.V4AvailableIPs + afterCreateV4UsingIPs := afterCreateSubnet.Status.V4UsingIPs + afterCreateV6AvailableIPs := afterCreateSubnet.Status.V6AvailableIPs + afterCreateV6UsingIPs := afterCreateSubnet.Status.V6UsingIPs + afterCreateV4AvailableIPRange := afterCreateSubnet.Status.V4AvailableIPRange + afterCreateV4UsingIPRange := afterCreateSubnet.Status.V4UsingIPRange + afterCreateV6AvailableIPRange := afterCreateSubnet.Status.V6AvailableIPRange + afterCreateV6UsingIPRange := afterCreateSubnet.Status.V6UsingIPRange + + ginkgo.By("7. Delete the IptablesEIP to trigger IP release") + iptablesEIPClient.DeleteSync(eipName) + + ginkgo.By("8. Wait for IptablesEIP CR to be deleted") + deleted := false + for range 30 { + _, err := f.KubeOVNClientSet.KubeovnV1().IptablesEIPs().Get(context.Background(), eipName, metav1.GetOptions{}) + if err != nil && k8serrors.IsNotFound(err) { + deleted = true + break + } + time.Sleep(1 * time.Second) + } + framework.ExpectTrue(deleted, "IptablesEIP CR should be deleted") - var net1NicName string - var dockerExtNetName string - - // docker network - var dockerExtNetNetwork *dockernetwork.Inspect - - var vpcQosParams *qosParams - var vpc1Pod *corev1.Pod - var vpc2Pod *corev1.Pod - var vpc1EIP *apiv1.IptablesEIP - var vpc2EIP *apiv1.IptablesEIP - var vpc1FIP *apiv1.IptablesFIPRule - var vpc2FIP *apiv1.IptablesFIPRule - - var lanIP string - var overlaySubnetV4Cidr string - var overlaySubnetV4Gw string - var eth0Exist, net1Exist bool - var annotations1 map[string]string - var annotations2 map[string]string - var iperfServerCmd []string + ginkgo.By("9. Wait for external subnet status to be updated after IptablesEIP deletion") + time.Sleep(5 * time.Second) - ginkgo.BeforeEach(func() { - vpcQosParams = newVPCQoSParamsInit() + ginkgo.By("10. Verify external subnet status after IptablesEIP CR deletion") + afterDeleteSubnet := subnetClient.Get(externalSubnetName) + verifySubnetStatusAfterEIPOperation(subnetClient, externalSubnetName, + afterDeleteSubnet.Spec.Protocol, "IptablesEIP deletion", "") - dockerExtNetName = "kube-ovn-qos-" + framework.RandomSuffix() + // Verify IP count and range restoration + switch afterDeleteSubnet.Spec.Protocol { + case apiv1.ProtocolIPv4: + // Verify IP count is restored + framework.ExpectEqual(afterCreateV4AvailableIPs+1, afterDeleteSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should increase by 1 after IptablesEIP deletion") + framework.ExpectEqual(afterCreateV4UsingIPs-1, afterDeleteSubnet.Status.V4UsingIPs, + "V4UsingIPs should decrease by 1 after IptablesEIP deletion") + + // Verify IP range changed + framework.ExpectNotEqual(afterCreateV4AvailableIPRange, afterDeleteSubnet.Status.V4AvailableIPRange, + "V4AvailableIPRange should change after IptablesEIP deletion") + framework.ExpectNotEqual(afterCreateV4UsingIPRange, afterDeleteSubnet.Status.V4UsingIPRange, + "V4UsingIPRange should change after IptablesEIP deletion") + + // Verify counts match initial state + framework.ExpectEqual(initialV4AvailableIPs, afterDeleteSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should return to initial value after IptablesEIP deletion") + framework.ExpectEqual(initialV4UsingIPs, afterDeleteSubnet.Status.V4UsingIPs, + "V4UsingIPs should return to initial value after IptablesEIP deletion") + case apiv1.ProtocolIPv6: + // Verify IP count is restored + framework.ExpectEqual(afterCreateV6AvailableIPs+1, afterDeleteSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should increase by 1 after IptablesEIP deletion") + framework.ExpectEqual(afterCreateV6UsingIPs-1, afterDeleteSubnet.Status.V6UsingIPs, + "V6UsingIPs should decrease by 1 after IptablesEIP deletion") + + // Verify IP range changed + framework.ExpectNotEqual(afterCreateV6AvailableIPRange, afterDeleteSubnet.Status.V6AvailableIPRange, + "V6AvailableIPRange should change after IptablesEIP deletion") + framework.ExpectNotEqual(afterCreateV6UsingIPRange, afterDeleteSubnet.Status.V6UsingIPRange, + "V6UsingIPRange should change after IptablesEIP deletion") + + // Verify counts match initial state + framework.ExpectEqual(initialV6AvailableIPs, afterDeleteSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should return to initial value after IptablesEIP deletion") + framework.ExpectEqual(initialV6UsingIPs, afterDeleteSubnet.Status.V6UsingIPs, + "V6UsingIPs should return to initial value after IptablesEIP deletion") + default: + // Dual stack + framework.ExpectEqual(afterCreateV4AvailableIPs+1, afterDeleteSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should increase by 1 after IptablesEIP deletion") + framework.ExpectEqual(afterCreateV4UsingIPs-1, afterDeleteSubnet.Status.V4UsingIPs, + "V4UsingIPs should decrease by 1 after IptablesEIP deletion") + framework.ExpectEqual(afterCreateV6AvailableIPs+1, afterDeleteSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should increase by 1 after IptablesEIP deletion") + framework.ExpectEqual(afterCreateV6UsingIPs-1, afterDeleteSubnet.Status.V6UsingIPs, + "V6UsingIPs should decrease by 1 after IptablesEIP deletion") + + framework.ExpectNotEqual(afterCreateV4AvailableIPRange, afterDeleteSubnet.Status.V4AvailableIPRange, + "V4AvailableIPRange should change after IptablesEIP deletion") + framework.ExpectNotEqual(afterCreateV4UsingIPRange, afterDeleteSubnet.Status.V4UsingIPRange, + "V4UsingIPRange should change after IptablesEIP deletion") + framework.ExpectNotEqual(afterCreateV6AvailableIPRange, afterDeleteSubnet.Status.V6AvailableIPRange, + "V6AvailableIPRange should change after IptablesEIP deletion") + framework.ExpectNotEqual(afterCreateV6UsingIPRange, afterDeleteSubnet.Status.V6UsingIPRange, + "V6UsingIPRange should change after IptablesEIP deletion") + + framework.ExpectEqual(initialV4AvailableIPs, afterDeleteSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should return to initial value after IptablesEIP deletion") + framework.ExpectEqual(initialV4UsingIPs, afterDeleteSubnet.Status.V4UsingIPs, + "V4UsingIPs should return to initial value after IptablesEIP deletion") + framework.ExpectEqual(initialV6AvailableIPs, afterDeleteSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should return to initial value after IptablesEIP deletion") + framework.ExpectEqual(initialV6UsingIPs, afterDeleteSubnet.Status.V6UsingIPs, + "V6UsingIPs should return to initial value after IptablesEIP deletion") + } - vpcQosParams.vpc1SubnetName = "qos-vpc1-subnet-" + framework.RandomSuffix() - vpcQosParams.vpc2SubnetName = "qos-vpc2-subnet-" + framework.RandomSuffix() + ginkgo.By("11. Test completed: IptablesEIP CR creation and deletion properly updates external subnet status via finalizer handlers") + }) - vpcQosParams.vpcNat1GwName = "qos-gw1-" + framework.RandomSuffix() - vpcQosParams.vpcNat2GwName = "qos-gw2-" + framework.RandomSuffix() + framework.ConformanceIt("Test IptablesEIP finalizer cannot be removed when used by NAT rules", func() { + f.SkipVersionPriorTo(1, 13, "This feature was introduced in v1.13") - vpcQosParams.vpc1EIPName = "qos-vpc1-eip-" + framework.RandomSuffix() - vpcQosParams.vpc2EIPName = "qos-vpc2-eip-" + framework.RandomSuffix() + overlaySubnetV4Cidr := "10.0.4.0/24" + overlaySubnetV4Gw := "10.0.4.1" + lanIP := "10.0.4.254" + natgwQoS := "" + setupVpcNatGwTestEnvironment( + f, dockerExtNet1Network, attachNetClient, + subnetClient, vpcClient, vpcNatGwClient, + vpcName, overlaySubnetName, vpcNatGwName, natgwQoS, + overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIP, + dockerExtNet1Name, networkAttachDefName, net1NicName, + externalSubnetProvider, + false, + ) - vpcQosParams.vpc1FIPName = "qos-vpc1-fip-" + framework.RandomSuffix() - vpcQosParams.vpc2FIPName = "qos-vpc2-fip-" + framework.RandomSuffix() + ginkgo.By("1. Create a VIP for FIP") + vipName := "test-vip-" + framework.RandomSuffix() + vip := framework.MakeVip(f.Namespace.Name, vipName, overlaySubnetName, "", "", "") + _ = vipClient.CreateSync(vip) + vip = vipClient.Get(vipName) - vpcQosParams.vpc1PodName = "qos-vpc1-pod-" + framework.RandomSuffix() - vpcQosParams.vpc2PodName = "qos-vpc2-pod-" + framework.RandomSuffix() + ginkgo.By("2. Create IptablesEIP") + eipName := "test-eip-with-fip-" + framework.RandomSuffix() + eip := framework.MakeIptablesEIP(eipName, "", "", "", vpcNatGwName, "", "") + _ = iptablesEIPClient.CreateSync(eip) - vpcQosParams.attachDefName = "qos-ovn-vpc-external-network-" + framework.RandomSuffix() - vpcQosParams.subnetProvider = fmt.Sprintf("%s.%s", vpcQosParams.attachDefName, framework.KubeOvnNamespace) + ginkgo.By("3. Wait for IptablesEIP to be ready") + eipCR := waitForIptablesEIPReady(iptablesEIPClient, eipName, 60*time.Second) + framework.ExpectNotNil(eipCR, "IptablesEIP CR should be created and ready") - cs = f.ClientSet - podClient = f.PodClient() - attachNetClient = f.NetworkAttachmentDefinitionClientNS(framework.KubeOvnNamespace) - subnetClient = f.SubnetClient() - vpcClient = f.VpcClient() - vpcNatGwClient = f.VpcNatGatewayClient() - iptablesEIPClient = f.IptablesEIPClient() - ipClient = f.IPClient() - iptablesFIPClient = f.IptablesFIPClient() - qosPolicyClient = f.QoSPolicyClient() + ginkgo.By("4. Create IptablesFIP using the EIP") + fipName := "test-fip-" + framework.RandomSuffix() + fip := framework.MakeIptablesFIPRule(fipName, eipName, vip.Status.V4ip) + _ = iptablesFIPClient.CreateSync(fip) - if skip { - ginkgo.Skip("underlay spec only runs on kind clusters") + ginkgo.By("5. Wait for EIP status to show it's being used by FIP") + for range 60 { + eipCR = iptablesEIPClient.Get(eipName) + if eipCR != nil && strings.Contains(eipCR.Status.Nat, util.FipUsingEip) { + break + } + time.Sleep(1 * time.Second) } + framework.ExpectTrue(strings.Contains(eipCR.Status.Nat, util.FipUsingEip), + "EIP status.Nat should contain 'fip' when used by FIP rule") + + ginkgo.By("6. Delete the IptablesEIP (should not remove finalizer while FIP exists)") + err := f.KubeOVNClientSet.KubeovnV1().IptablesEIPs().Delete(context.Background(), eipName, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "Deleting IptablesEIP should succeed") + + ginkgo.By("7. Wait and verify EIP still exists with finalizer (blocked by FIP)") + time.Sleep(5 * time.Second) + eipCR = iptablesEIPClient.Get(eipName) + framework.ExpectNotNil(eipCR, "IptablesEIP should still exist") + framework.ExpectNotNil(eipCR.DeletionTimestamp, "IptablesEIP should have DeletionTimestamp") + framework.ExpectContainElement(eipCR.Finalizers, util.KubeOVNControllerFinalizer, + "IptablesEIP should still have finalizer because it's being used by FIP") + + ginkgo.By("8. Delete the FIP to unblock EIP deletion") + iptablesFIPClient.DeleteSync(fipName) - if clusterName == "" { - ginkgo.By("Getting k8s nodes") - k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) + ginkgo.By("9. Wait for FIP to be deleted") + fipDeleted := false + for range 30 { + _, err := f.KubeOVNClientSet.KubeovnV1().IptablesFIPRules().Get(context.Background(), fipName, metav1.GetOptions{}) + if err != nil && k8serrors.IsNotFound(err) { + fipDeleted = true + break + } + time.Sleep(1 * time.Second) + } + framework.ExpectTrue(fipDeleted, "FIP should be deleted") + + ginkgo.By("10. Wait for EIP status.Nat to be cleared or EIP to be deleted") + for range 30 { + eipCR, err := f.KubeOVNClientSet.KubeovnV1().IptablesEIPs().Get(context.Background(), eipName, metav1.GetOptions{}) + if err != nil && k8serrors.IsNotFound(err) { + // EIP already deleted, which is expected + break + } + framework.ExpectNoError(err, "Failed to get IptablesEIP") + if eipCR.Status.Nat == "" { + break + } + time.Sleep(1 * time.Second) + } - cluster, ok := kind.IsKindProvided(k8sNodes.Items[0].Spec.ProviderID) - if !ok { - skip = true - ginkgo.Skip("underlay spec only runs on kind clusters") + ginkgo.By("11. Verify EIP is now deleted after FIP is removed") + eipDeleted := false + for range 30 { + _, err := f.KubeOVNClientSet.KubeovnV1().IptablesEIPs().Get(context.Background(), eipName, metav1.GetOptions{}) + if err != nil && k8serrors.IsNotFound(err) { + eipDeleted = true + break } - clusterName = cluster + time.Sleep(1 * time.Second) } + framework.ExpectTrue(eipDeleted, "IptablesEIP should be deleted after FIP is removed") - ginkgo.By("Ensuring docker network " + dockerExtNetName + " exists") - network, err := docker.NetworkCreate(dockerExtNetName, true, true) - framework.ExpectNoError(err, "creating docker network "+dockerExtNetName) - dockerExtNetNetwork = network + ginkgo.By("12. Clean up VIP") + vipClient.DeleteSync(vipName) - ginkgo.By("Getting kind nodes") - nodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in kind cluster") - framework.ExpectNotEmpty(nodes) - - ginkgo.By("Connecting nodes to the docker network") - err = kind.NetworkConnect(dockerExtNetNetwork.ID, nodes) - framework.ExpectNoError(err, "connecting nodes to network "+dockerExtNetName) + ginkgo.By("13. Test completed: IptablesEIP finalizer correctly blocks deletion when used by NAT rules") + }) - ginkgo.By("Getting node links that belong to the docker network") - nodes, err = kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in kind cluster") + framework.ConformanceIt("Test VPC NAT Gateway with no IPAM NAD and noDefaultEIP", func() { + f.SkipVersionPriorTo(1, 13, "This feature was introduced in v1.13") - ginkgo.By("Validating node links") - network1, err := docker.NetworkInspect(dockerExtNetName) - framework.ExpectNoError(err) - for _, node := range nodes { - links, err := node.ListLinks() - framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) - net1Mac := network1.Containers[node.ID].MacAddress - for _, link := range links { - ginkgo.By("exist node nic " + link.IfName) - if link.IfName == "eth0" { - eth0Exist = true + overlaySubnetV4Cidr := "10.0.5.0/24" + overlaySubnetV4Gw := "10.0.5.1" + lanIP := "10.0.5.254" + natgwQoS := "" + noIPAMNadName := "no-ipam-nad-" + framework.RandomSuffix() + noIPAMProvider := fmt.Sprintf("%s.%s", noIPAMNadName, framework.KubeOvnNamespace) + + ginkgo.By("1. Setting up NAD without IPAM and creating subnet using standard flow") + // Create NAD without IPAM section + ginkgo.By("Getting docker network " + dockerExtNet1Name) + network, err := docker.NetworkInspect(dockerExtNet1Name) + framework.ExpectNoError(err, "getting docker network "+dockerExtNet1Name) + + ginkgo.By("Creating network attachment definition without IPAM " + noIPAMNadName) + // NAD config without ipam - this is the key difference + attachConf := fmt.Sprintf(`{ + "cniVersion": "0.3.0", + "type": "macvlan", + "master": "%s", + "mode": "bridge" + }`, net1NicName) + + attachNet := framework.MakeNetworkAttachmentDefinition(noIPAMNadName, framework.KubeOvnNamespace, attachConf) + nad := attachNetClient.Create(attachNet) + ginkgo.By("Got network attachment definition " + nad.Name) + + ginkgo.By("Creating underlay macvlan subnet " + noIPAMNadName) + var cidrV4, cidrV6, gatewayV4, gatewayV6 string + for _, config := range dockerExtNet1Network.IPAM.Config { + switch util.CheckProtocol(config.Subnet.Addr().String()) { + case apiv1.ProtocolIPv4: + if f.HasIPv4() { + cidrV4 = config.Subnet.String() + gatewayV4 = config.Gateway.String() } - if link.Address == net1Mac.String() { - net1NicName = link.IfName - net1Exist = true + case apiv1.ProtocolIPv6: + if f.HasIPv6() { + cidrV6 = config.Subnet.String() + gatewayV6 = config.Gateway.String() } } - framework.ExpectTrue(eth0Exist) - framework.ExpectTrue(net1Exist) } - setupNetworkAttachmentDefinition( - f, dockerExtNetNetwork, attachNetClient, - subnetClient, vpcQosParams.attachDefName, net1NicName, vpcQosParams.subnetProvider, dockerExtNetName) - }) - - ginkgo.AfterEach(func() { - ginkgo.By("Deleting macvlan underlay subnet " + vpcQosParams.attachDefName) - subnetClient.DeleteSync(vpcQosParams.attachDefName) - - // delete net1 attachment definition - ginkgo.By("Deleting nad " + vpcQosParams.attachDefName) - attachNetClient.Delete(vpcQosParams.attachDefName) - - ginkgo.By("Getting nodes") - nodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in cluster") - - if dockerExtNetNetwork != nil { - ginkgo.By("Disconnecting nodes from the docker network") - err = kind.NetworkDisconnect(dockerExtNetNetwork.ID, nodes) - framework.ExpectNoError(err, "disconnecting nodes from network "+dockerExtNetName) - ginkgo.By("Deleting docker network " + dockerExtNetName + " exists") - err := docker.NetworkRemove(dockerExtNetNetwork.ID) - framework.ExpectNoError(err, "deleting docker network "+dockerExtNetName) + cidr := make([]string, 0, 2) + gateway := make([]string, 0, 2) + if f.HasIPv4() { + cidr = append(cidr, cidrV4) + gateway = append(gateway, gatewayV4) } - }) - - _ = framework.Describe("vpc qos", func() { - ginkgo.BeforeEach(func() { - iperfServerCmd = []string{"iperf", "-s", "-i", "1", "-p", iperf2Port} - overlaySubnetV4Cidr = "10.0.0.0/24" - overlaySubnetV4Gw = "10.0.0.1" - lanIP = "10.0.0.254" - natgwQoS := "" - setupVpcNatGwTestEnvironment( - f, dockerExtNetNetwork, attachNetClient, - subnetClient, vpcClient, vpcNatGwClient, - vpcQosParams.vpc1Name, vpcQosParams.vpc1SubnetName, vpcQosParams.vpcNat1GwName, - natgwQoS, overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIP, - dockerExtNetName, vpcQosParams.attachDefName, net1NicName, - vpcQosParams.subnetProvider, - true, - ) - annotations1 = map[string]string{ - util.LogicalSwitchAnnotation: vpcQosParams.vpc1SubnetName, + if f.HasIPv6() { + cidr = append(cidr, cidrV6) + gateway = append(gateway, gatewayV6) + } + excludeIPs := make([]string, 0, len(network.Containers)*2) + for _, container := range network.Containers { + if container.IPv4Address.IsValid() && f.HasIPv4() { + excludeIPs = append(excludeIPs, container.IPv4Address.Addr().String()) + } + if container.IPv6Address.IsValid() && f.HasIPv6() { + excludeIPs = append(excludeIPs, container.IPv6Address.Addr().String()) } - ginkgo.By("Creating pod " + vpcQosParams.vpc1PodName) - vpc1Pod = framework.MakePod(f.Namespace.Name, vpcQosParams.vpc1PodName, nil, annotations1, framework.AgnhostImage, iperfServerCmd, nil) - vpc1Pod = podClient.CreateSync(vpc1Pod) - - ginkgo.By("Creating eip " + vpcQosParams.vpc1EIPName) - vpc1EIP = framework.MakeIptablesEIP(vpcQosParams.vpc1EIPName, "", "", "", vpcQosParams.vpcNat1GwName, vpcQosParams.attachDefName, "") - vpc1EIP = iptablesEIPClient.CreateSync(vpc1EIP) - - ginkgo.By("Creating fip " + vpcQosParams.vpc1FIPName) - vpc1FIP = framework.MakeIptablesFIPRule(vpcQosParams.vpc1FIPName, vpcQosParams.vpc1EIPName, vpc1Pod.Status.PodIP) - _ = iptablesFIPClient.CreateSync(vpc1FIP) - - setupVpcNatGwTestEnvironment( - f, dockerExtNetNetwork, attachNetClient, - subnetClient, vpcClient, vpcNatGwClient, - vpcQosParams.vpc2Name, vpcQosParams.vpc2SubnetName, vpcQosParams.vpcNat2GwName, - natgwQoS, overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIP, - dockerExtNetName, vpcQosParams.attachDefName, net1NicName, - vpcQosParams.subnetProvider, - true, - ) - - annotations2 = map[string]string{ - util.LogicalSwitchAnnotation: vpcQosParams.vpc2SubnetName, + } + macvlanSubnet := framework.MakeSubnet(noIPAMNadName, "", strings.Join(cidr, ","), strings.Join(gateway, ","), "", noIPAMProvider, excludeIPs, nil, nil) + _ = subnetClient.CreateSync(macvlanSubnet) + + ginkgo.By("2. Creating custom vpc " + vpcName) + vpc := framework.MakeVpc(vpcName, lanIP, false, false, nil) + _ = vpcClient.CreateSync(vpc) + + ginkgo.By("3. Creating custom overlay subnet " + overlaySubnetName) + overlaySubnet := framework.MakeSubnet(overlaySubnetName, "", overlaySubnetV4Cidr, overlaySubnetV4Gw, vpcName, "", nil, nil, nil) + _ = subnetClient.CreateSync(overlaySubnet) + + ginkgo.By("4. Creating custom vpc nat gw with noDefaultEIP=true " + vpcNatGwName) + vpcNatGw := framework.MakeVpcNatGatewayWithNoDefaultEIP(vpcNatGwName, vpcName, overlaySubnetName, lanIP, noIPAMNadName, natgwQoS, true) + _ = vpcNatGwClient.CreateSync(vpcNatGw, f.ClientSet) + + ginkgo.By("5. Verifying VPC NAT Gateway is created") + createdGw := vpcNatGwClient.Get(vpcNatGwName) + framework.ExpectNotNil(createdGw, "VPC NAT Gateway should be created") + framework.ExpectTrue(createdGw.Spec.NoDefaultEIP, "noDefaultEIP should be true") + + ginkgo.By("6. Verifying no default EIP is created") + time.Sleep(10 * time.Second) + eips, err := f.KubeOVNClientSet.KubeovnV1().IptablesEIPs().List(context.Background(), metav1.ListOptions{}) + framework.ExpectNoError(err, "Failed to list IptablesEIPs") + hasDefaultEIP := false + for _, eip := range eips.Items { + if eip.Spec.NatGwDp == vpcNatGwName { + hasDefaultEIP = true + break } + } + framework.ExpectFalse(hasDefaultEIP, "No default EIP should be created when noDefaultEIP is true") + + ginkgo.By("7. Testing manual EIP creation") + eipName := "manual-eip-" + framework.RandomSuffix() + eip := framework.MakeIptablesEIP(eipName, "", "", "", vpcNatGwName, noIPAMNadName, "") + _ = iptablesEIPClient.CreateSync(eip) + + ginkgo.By("8. Verifying manually created EIP") + eipCR := waitForIptablesEIPReady(iptablesEIPClient, eipName, 60*time.Second) + framework.ExpectNotNil(eipCR, "Manual EIP should be created successfully") + framework.ExpectNotEmpty(eipCR.Status.IP, "Manual EIP should have IP assigned") + + ginkgo.By("9. Testing VIP and FIP with manual EIP") + vipName := "test-vip-no-ipam-" + framework.RandomSuffix() + vip := framework.MakeVip(f.Namespace.Name, vipName, overlaySubnetName, "", "", "") + _ = vipClient.CreateSync(vip) + vip = vipClient.Get(vipName) + + fipName := "test-fip-no-ipam-" + framework.RandomSuffix() + fip := framework.MakeIptablesFIPRule(fipName, eipName, vip.Status.V4ip) + _ = iptablesFIPClient.CreateSync(fip) + + ginkgo.By("10. Verifying FIP is created successfully") + createdFip := iptablesFIPClient.Get(fipName) + framework.ExpectNotNil(createdFip, "FIP should be created successfully") + framework.ExpectTrue(createdFip.Status.Ready, "FIP should be ready") + + ginkgo.By("11. Cleaning up resources") + iptablesFIPClient.DeleteSync(fipName) + vipClient.DeleteSync(vipName) + iptablesEIPClient.DeleteSync(eipName) + + vpcNatGwClient.DeleteSync(vpcNatGwName) + subnetClient.DeleteSync(overlaySubnetName) + subnetClient.DeleteSync(noIPAMNadName) + vpcClient.DeleteSync(vpcName) + attachNetClient.Delete(noIPAMNadName) - ginkgo.By("Creating pod " + vpcQosParams.vpc2PodName) - vpc2Pod = framework.MakePod(f.Namespace.Name, vpcQosParams.vpc2PodName, nil, annotations2, framework.AgnhostImage, iperfServerCmd, nil) - vpc2Pod = podClient.CreateSync(vpc2Pod) - - ginkgo.By("Creating eip " + vpcQosParams.vpc2EIPName) - vpc2EIP = framework.MakeIptablesEIP(vpcQosParams.vpc2EIPName, "", "", "", vpcQosParams.vpcNat2GwName, vpcQosParams.attachDefName, "") - vpc2EIP = iptablesEIPClient.CreateSync(vpc2EIP) - - ginkgo.By("Creating fip " + vpcQosParams.vpc2FIPName) - vpc2FIP = framework.MakeIptablesFIPRule(vpcQosParams.vpc2FIPName, vpcQosParams.vpc2EIPName, vpc2Pod.Status.PodIP) - _ = iptablesFIPClient.CreateSync(vpc2FIP) - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting fip " + vpcQosParams.vpc1FIPName) - iptablesFIPClient.DeleteSync(vpcQosParams.vpc1FIPName) - - ginkgo.By("Deleting fip " + vpcQosParams.vpc2FIPName) - iptablesFIPClient.DeleteSync(vpcQosParams.vpc2FIPName) - - ginkgo.By("Deleting eip " + vpcQosParams.vpc1EIPName) - iptablesEIPClient.DeleteSync(vpcQosParams.vpc1EIPName) - - ginkgo.By("Deleting eip " + vpcQosParams.vpc2EIPName) - iptablesEIPClient.DeleteSync(vpcQosParams.vpc2EIPName) - - ginkgo.By("Deleting pod " + vpcQosParams.vpc1PodName) - podClient.DeleteSync(vpcQosParams.vpc1PodName) - - ginkgo.By("Deleting pod " + vpcQosParams.vpc2PodName) - podClient.DeleteSync(vpcQosParams.vpc2PodName) - - ginkgo.By("Deleting custom vpc nat gw " + vpcQosParams.vpcNat1GwName) - vpcNatGwClient.DeleteSync(vpcQosParams.vpcNat1GwName) - - ginkgo.By("Deleting custom vpc nat gw " + vpcQosParams.vpcNat2GwName) - vpcNatGwClient.DeleteSync(vpcQosParams.vpcNat2GwName) - - // the only pod for vpc nat gateway - vpcNatGw1PodName := util.GenNatGwPodName(vpcQosParams.vpcNat1GwName) - - // delete vpc nat gw statefulset remaining ip for eth0 and net2 - overlaySubnet1 := subnetClient.Get(vpcQosParams.vpc1SubnetName) - macvlanSubnet := subnetClient.Get(vpcQosParams.attachDefName) - eth0IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, overlaySubnet1.Spec.Provider) - net1IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) - ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) - ipClient.DeleteSync(eth0IpName) - ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) - ipClient.DeleteSync(net1IpName) - ginkgo.By("Deleting overlay subnet " + vpcQosParams.vpc1SubnetName) - subnetClient.DeleteSync(vpcQosParams.vpc1SubnetName) - - ginkgo.By("Getting overlay subnet " + vpcQosParams.vpc2SubnetName) - overlaySubnet2 := subnetClient.Get(vpcQosParams.vpc2SubnetName) - - vpcNatGw2PodName := util.GenNatGwPodName(vpcQosParams.vpcNat2GwName) - eth0IpName = ovs.PodNameToPortName(vpcNatGw2PodName, framework.KubeOvnNamespace, overlaySubnet2.Spec.Provider) - net1IpName = ovs.PodNameToPortName(vpcNatGw2PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) - ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) - ipClient.DeleteSync(eth0IpName) - ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) - ipClient.DeleteSync(net1IpName) - ginkgo.By("Deleting overlay subnet " + vpcQosParams.vpc2SubnetName) - subnetClient.DeleteSync(vpcQosParams.vpc2SubnetName) - - ginkgo.By("Deleting custom vpc " + vpcQosParams.vpc1Name) - vpcClient.DeleteSync(vpcQosParams.vpc1Name) - - ginkgo.By("Deleting custom vpc " + vpcQosParams.vpc2Name) - vpcClient.DeleteSync(vpcQosParams.vpc2Name) - }) - framework.ConformanceIt("default nic qos", func() { - // case 1: set qos policy for natgw - // case 2: rebuild qos when natgw pod restart - defaultQoSCases(f, vpcNatGwClient, podClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpcNat1GwName) - }) - framework.ConformanceIt("eip qos", func() { - // case 1: set qos policy for eip - // case 2: update qos policy for eip - // case 3: change qos policy of eip - // case 4: rebuild qos when natgw pod restart - eipQoSCases(f, iptablesEIPClient, podClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpc1EIPName, vpcQosParams.vpcNat1GwName) - }) - framework.ConformanceIt("specifying ip qos", func() { - // case 1: set specific ip qos policy for natgw - specifyingIPQoSCases(f, vpcNatGwClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpcNat1GwName) - }) - framework.ConformanceIt("qos priority matching", func() { - // case 1: test qos match priority - // case 2: change qos policy of natgw - priorityQoSCases(f, vpcNatGwClient, iptablesEIPClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpcNat1GwName, vpcQosParams.vpc1EIPName) - }) - framework.ConformanceIt("create resource with qos policy", func() { - // case 1: test qos when create natgw with qos policy - // case 2: test qos when create eip with qos policy - createNatGwAndSetQosCases(f, - vpcNatGwClient, ipClient, iptablesEIPClient, iptablesFIPClient, - subnetClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc2EIP, vpcQosParams.vpcNat1GwName, - vpcQosParams.vpc1EIPName, vpcQosParams.vpc1FIPName, vpcQosParams.vpc1Name, - vpcQosParams.vpc1SubnetName, lanIP, vpcQosParams.attachDefName) - }) + ginkgo.By("12. Test completed: VPC NAT Gateway with no IPAM NAD and noDefaultEIP works correctly") }) }) diff --git a/test/e2e/vip/e2e_test.go b/test/e2e/vip/e2e_test.go index 580cd78e765..a0ec0b1ed53 100644 --- a/test/e2e/vip/e2e_test.go +++ b/test/e2e/vip/e2e_test.go @@ -199,13 +199,198 @@ var _ = framework.Describe("[group:vip]", func() { securityGroupClient.DeleteSync(securityGroupName) }) + framework.ConformanceIt("Test vip subnet status update with finalizer", func() { + f.SkipVersionPriorTo(1, 13, "This feature was introduced in v1.13") + + ginkgo.By("1. Get initial subnet status") + initialSubnet := subnetClient.Get(subnetName) + initialV4AvailableIPs := initialSubnet.Status.V4AvailableIPs + initialV4UsingIPs := initialSubnet.Status.V4UsingIPs + initialV6AvailableIPs := initialSubnet.Status.V6AvailableIPs + initialV6UsingIPs := initialSubnet.Status.V6UsingIPs + initialV4AvailableIPRange := initialSubnet.Status.V4AvailableIPRange + initialV4UsingIPRange := initialSubnet.Status.V4UsingIPRange + initialV6AvailableIPRange := initialSubnet.Status.V6AvailableIPRange + initialV6UsingIPRange := initialSubnet.Status.V6UsingIPRange + + ginkgo.By("2. Create a VIP and verify finalizer is added") + testVipName := "test-vip-finalizer-" + framework.RandomSuffix() + testVip := makeOvnVip(namespaceName, testVipName, subnetName, "", "", "") + testVip = vipClient.CreateSync(testVip) + + // Verify VIP has finalizer + framework.ExpectContainElement(testVip.Finalizers, util.KubeOVNControllerFinalizer) + + ginkgo.By("3. Wait for subnet status to be updated after VIP creation") + time.Sleep(5 * time.Second) + + ginkgo.By("4. Verify subnet status after VIP creation") + afterCreateSubnet := subnetClient.Get(subnetName) + switch afterCreateSubnet.Spec.Protocol { + case apiv1.ProtocolIPv4: + // Verify IP count changed + framework.ExpectEqual(initialV4AvailableIPs-1, afterCreateSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should decrease by 1 after VIP creation") + framework.ExpectEqual(initialV4UsingIPs+1, afterCreateSubnet.Status.V4UsingIPs, + "V4UsingIPs should increase by 1 after VIP creation") + + // Verify IP range changed + framework.ExpectNotEqual(initialV4AvailableIPRange, afterCreateSubnet.Status.V4AvailableIPRange, + "V4AvailableIPRange should change after VIP creation") + framework.ExpectNotEqual(initialV4UsingIPRange, afterCreateSubnet.Status.V4UsingIPRange, + "V4UsingIPRange should change after VIP creation") + + // Verify the VIP's IP is in the using range + vipIP := testVip.Status.V4ip + framework.ExpectTrue(strings.Contains(afterCreateSubnet.Status.V4UsingIPRange, vipIP), + "VIP IP %s should be in V4UsingIPRange %s", vipIP, afterCreateSubnet.Status.V4UsingIPRange) + case apiv1.ProtocolIPv6: + // Verify IP count changed + framework.ExpectEqual(initialV6AvailableIPs-1, afterCreateSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should decrease by 1 after VIP creation") + framework.ExpectEqual(initialV6UsingIPs+1, afterCreateSubnet.Status.V6UsingIPs, + "V6UsingIPs should increase by 1 after VIP creation") + + // Verify IP range changed + framework.ExpectNotEqual(initialV6AvailableIPRange, afterCreateSubnet.Status.V6AvailableIPRange, + "V6AvailableIPRange should change after VIP creation") + framework.ExpectNotEqual(initialV6UsingIPRange, afterCreateSubnet.Status.V6UsingIPRange, + "V6UsingIPRange should change after VIP creation") + + // Verify the VIP's IP is in the using range + vipIP := testVip.Status.V6ip + framework.ExpectTrue(strings.Contains(afterCreateSubnet.Status.V6UsingIPRange, vipIP), + "VIP IP %s should be in V6UsingIPRange %s", vipIP, afterCreateSubnet.Status.V6UsingIPRange) + default: + // Dual stack + framework.ExpectEqual(initialV4AvailableIPs-1, afterCreateSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should decrease by 1 after VIP creation") + framework.ExpectEqual(initialV4UsingIPs+1, afterCreateSubnet.Status.V4UsingIPs, + "V4UsingIPs should increase by 1 after VIP creation") + framework.ExpectEqual(initialV6AvailableIPs-1, afterCreateSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should decrease by 1 after VIP creation") + framework.ExpectEqual(initialV6UsingIPs+1, afterCreateSubnet.Status.V6UsingIPs, + "V6UsingIPs should increase by 1 after VIP creation") + + framework.ExpectNotEqual(initialV4AvailableIPRange, afterCreateSubnet.Status.V4AvailableIPRange, + "V4AvailableIPRange should change after VIP creation") + framework.ExpectNotEqual(initialV4UsingIPRange, afterCreateSubnet.Status.V4UsingIPRange, + "V4UsingIPRange should change after VIP creation") + framework.ExpectNotEqual(initialV6AvailableIPRange, afterCreateSubnet.Status.V6AvailableIPRange, + "V6AvailableIPRange should change after VIP creation") + framework.ExpectNotEqual(initialV6UsingIPRange, afterCreateSubnet.Status.V6UsingIPRange, + "V6UsingIPRange should change after VIP creation") + } + + // Store the status after creation for later comparison + afterCreateV4AvailableIPs := afterCreateSubnet.Status.V4AvailableIPs + afterCreateV4UsingIPs := afterCreateSubnet.Status.V4UsingIPs + afterCreateV6AvailableIPs := afterCreateSubnet.Status.V6AvailableIPs + afterCreateV6UsingIPs := afterCreateSubnet.Status.V6UsingIPs + afterCreateV4AvailableIPRange := afterCreateSubnet.Status.V4AvailableIPRange + afterCreateV4UsingIPRange := afterCreateSubnet.Status.V4UsingIPRange + afterCreateV6AvailableIPRange := afterCreateSubnet.Status.V6AvailableIPRange + afterCreateV6UsingIPRange := afterCreateSubnet.Status.V6UsingIPRange + + ginkgo.By("5. Delete the VIP") + vipClient.DeleteSync(testVipName) + + ginkgo.By("6. Wait for subnet status to be updated after VIP deletion") + time.Sleep(5 * time.Second) + + ginkgo.By("7. Verify subnet status after VIP deletion") + afterDeleteSubnet := subnetClient.Get(subnetName) + switch afterDeleteSubnet.Spec.Protocol { + case apiv1.ProtocolIPv4: + // Verify IP count is restored + framework.ExpectEqual(afterCreateV4AvailableIPs+1, afterDeleteSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should increase by 1 after VIP deletion") + framework.ExpectEqual(afterCreateV4UsingIPs-1, afterDeleteSubnet.Status.V4UsingIPs, + "V4UsingIPs should decrease by 1 after VIP deletion") + + // Verify IP range changed + framework.ExpectNotEqual(afterCreateV4AvailableIPRange, afterDeleteSubnet.Status.V4AvailableIPRange, + "V4AvailableIPRange should change after VIP deletion") + framework.ExpectNotEqual(afterCreateV4UsingIPRange, afterDeleteSubnet.Status.V4UsingIPRange, + "V4UsingIPRange should change after VIP deletion") + + // Verify counts match initial state + framework.ExpectEqual(initialV4AvailableIPs, afterDeleteSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should return to initial value after VIP deletion") + framework.ExpectEqual(initialV4UsingIPs, afterDeleteSubnet.Status.V4UsingIPs, + "V4UsingIPs should return to initial value after VIP deletion") + case apiv1.ProtocolIPv6: + // Verify IP count is restored + framework.ExpectEqual(afterCreateV6AvailableIPs+1, afterDeleteSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should increase by 1 after VIP deletion") + framework.ExpectEqual(afterCreateV6UsingIPs-1, afterDeleteSubnet.Status.V6UsingIPs, + "V6UsingIPs should decrease by 1 after VIP deletion") + + // Verify IP range changed + framework.ExpectNotEqual(afterCreateV6AvailableIPRange, afterDeleteSubnet.Status.V6AvailableIPRange, + "V6AvailableIPRange should change after VIP deletion") + framework.ExpectNotEqual(afterCreateV6UsingIPRange, afterDeleteSubnet.Status.V6UsingIPRange, + "V6UsingIPRange should change after VIP deletion") + + // Verify counts match initial state + framework.ExpectEqual(initialV6AvailableIPs, afterDeleteSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should return to initial value after VIP deletion") + framework.ExpectEqual(initialV6UsingIPs, afterDeleteSubnet.Status.V6UsingIPs, + "V6UsingIPs should return to initial value after VIP deletion") + default: + // Dual stack + framework.ExpectEqual(afterCreateV4AvailableIPs+1, afterDeleteSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should increase by 1 after VIP deletion") + framework.ExpectEqual(afterCreateV4UsingIPs-1, afterDeleteSubnet.Status.V4UsingIPs, + "V4UsingIPs should decrease by 1 after VIP deletion") + framework.ExpectEqual(afterCreateV6AvailableIPs+1, afterDeleteSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should increase by 1 after VIP deletion") + framework.ExpectEqual(afterCreateV6UsingIPs-1, afterDeleteSubnet.Status.V6UsingIPs, + "V6UsingIPs should decrease by 1 after VIP deletion") + + framework.ExpectNotEqual(afterCreateV4AvailableIPRange, afterDeleteSubnet.Status.V4AvailableIPRange, + "V4AvailableIPRange should change after VIP deletion") + framework.ExpectNotEqual(afterCreateV4UsingIPRange, afterDeleteSubnet.Status.V4UsingIPRange, + "V4UsingIPRange should change after VIP deletion") + framework.ExpectNotEqual(afterCreateV6AvailableIPRange, afterDeleteSubnet.Status.V6AvailableIPRange, + "V6AvailableIPRange should change after VIP deletion") + framework.ExpectNotEqual(afterCreateV6UsingIPRange, afterDeleteSubnet.Status.V6UsingIPRange, + "V6UsingIPRange should change after VIP deletion") + + framework.ExpectEqual(initialV4AvailableIPs, afterDeleteSubnet.Status.V4AvailableIPs, + "V4AvailableIPs should return to initial value after VIP deletion") + framework.ExpectEqual(initialV4UsingIPs, afterDeleteSubnet.Status.V4UsingIPs, + "V4UsingIPs should return to initial value after VIP deletion") + framework.ExpectEqual(initialV6AvailableIPs, afterDeleteSubnet.Status.V6AvailableIPs, + "V6AvailableIPs should return to initial value after VIP deletion") + framework.ExpectEqual(initialV6UsingIPs, afterDeleteSubnet.Status.V6UsingIPs, + "V6UsingIPs should return to initial value after VIP deletion") + } + + ginkgo.By("8. Test completed: VIP creation and deletion properly updates subnet status via finalizer handlers") + }) + framework.ConformanceIt("Test vip", func() { f.SkipVersionPriorTo(1, 13, "This feature was introduced in v1.13") ginkgo.By("0. Test subnet status counting vip") oldSubnet := subnetClient.Get(subnetName) countingVip := makeOvnVip(namespaceName, countingVipName, subnetName, "", "", "") - _ = vipClient.CreateSync(countingVip) - time.Sleep(3 * time.Second) + countingVip = vipClient.CreateSync(countingVip) + + // Wait for finalizer to be added + ginkgo.By("Waiting for VIP finalizer to be added") + for range 10 { + countingVip = vipClient.Get(countingVipName) + if len(countingVip.Finalizers) > 0 { + break + } + time.Sleep(1 * time.Second) + } + framework.ExpectContainElement(countingVip.Finalizers, util.KubeOVNControllerFinalizer) + + // Wait for subnet status to be updated + ginkgo.By("Waiting for subnet status to be updated after VIP creation") + time.Sleep(5 * time.Second) newSubnet := subnetClient.Get(subnetName) if newSubnet.Spec.Protocol == apiv1.ProtocolIPv4 { framework.ExpectEqual(oldSubnet.Status.V4AvailableIPs-1, newSubnet.Status.V4AvailableIPs) @@ -220,8 +405,9 @@ var _ = framework.Describe("[group:vip]", func() { } oldSubnet = newSubnet // delete counting vip + ginkgo.By("Deleting counting VIP and waiting for subnet status update") vipClient.DeleteSync(countingVipName) - time.Sleep(3 * time.Second) + time.Sleep(5 * time.Second) newSubnet = subnetClient.Get(subnetName) if newSubnet.Spec.Protocol == apiv1.ProtocolIPv4 { framework.ExpectEqual(oldSubnet.Status.V4AvailableIPs+1, newSubnet.Status.V4AvailableIPs) From 297750b391d751d25a8a669a06a45f32009c48e1 Mon Sep 17 00:00:00 2001 From: zbb88888 Date: Thu, 18 Dec 2025 14:48:22 +0800 Subject: [PATCH 2/2] fix after review Signed-off-by: zbb88888 --- pkg/controller/ippool.go | 2 +- pkg/controller/ovn_eip.go | 8 +++----- pkg/controller/subnet.go | 2 +- pkg/controller/vip.go | 8 +++----- pkg/controller/vpc_nat_gw_eip.go | 8 +++----- 5 files changed, 11 insertions(+), 17 deletions(-) diff --git a/pkg/controller/ippool.go b/pkg/controller/ippool.go index d91c028b7ae..967298ce2ca 100644 --- a/pkg/controller/ippool.go +++ b/pkg/controller/ippool.go @@ -52,7 +52,7 @@ func (c *Controller) enqueueUpdateIPPool(oldObj, newObj any) { newIPPool := newObj.(*kubeovnv1.IPPool) if !newIPPool.DeletionTimestamp.IsZero() { klog.V(3).Infof("enqueue delete ippool %s due to deletion timestamp", cache.MetaObjectToName(newIPPool).String()) - c.deleteIPPoolQueue.Add(newIPPool.DeepCopy()) + c.deleteIPPoolQueue.Add(newIPPool) return } if !slices.Equal(oldIPPool.Spec.Namespaces, newIPPool.Spec.Namespaces) || diff --git a/pkg/controller/ovn_eip.go b/pkg/controller/ovn_eip.go index 1e23ca1def0..ca615cf67c2 100644 --- a/pkg/controller/ovn_eip.go +++ b/pkg/controller/ovn_eip.go @@ -73,7 +73,7 @@ func (c *Controller) enqueueDelOvnEip(obj any) { key := cache.MetaObjectToName(eip).String() klog.Infof("enqueue del ovn eip %s", key) - c.delOvnEipQueue.Add(eip.DeepCopy()) + c.delOvnEipQueue.Add(eip) } func (c *Controller) handleAddOvnEip(key string) error { @@ -410,8 +410,7 @@ func (c *Controller) createOrUpdateOvnEipCR(key, subnet, v4ip, v6ip, mac, usageT } } // Trigger subnet status update after CR creation or update - time.Sleep(300 * time.Millisecond) - c.updateSubnetStatusQueue.Add(subnet) + c.updateSubnetStatusQueue.AddAfter(subnet, 300*time.Millisecond) return nil } @@ -603,8 +602,7 @@ func (c *Controller) handleDelOvnEipFinalizer(cachedEip *kubeovnv1.OvnEip) error // Trigger subnet status update after finalizer is removed // This ensures subnet status reflects the IP release // Add delay to ensure API server completes the finalizer removal - time.Sleep(300 * time.Millisecond) - c.updateSubnetStatusQueue.Add(cachedEip.Spec.ExternalSubnet) + c.updateSubnetStatusQueue.AddAfter(cachedEip.Spec.ExternalSubnet, 300*time.Millisecond) return nil } diff --git a/pkg/controller/subnet.go b/pkg/controller/subnet.go index e6a7d64fa2c..9ce55204d6b 100644 --- a/pkg/controller/subnet.go +++ b/pkg/controller/subnet.go @@ -53,7 +53,7 @@ func (c *Controller) enqueueDeleteSubnet(obj any) { } klog.V(3).Infof("enqueue delete subnet %s", subnet.Name) - c.deleteSubnetQueue.Add(subnet.DeepCopy()) + c.deleteSubnetQueue.Add(subnet) } func readyToRemoveFinalizer(subnet *kubeovnv1.Subnet) bool { diff --git a/pkg/controller/vip.go b/pkg/controller/vip.go index 901d830dac1..72950cd4d00 100644 --- a/pkg/controller/vip.go +++ b/pkg/controller/vip.go @@ -64,7 +64,7 @@ func (c *Controller) enqueueDelVirtualIP(obj any) { key := cache.MetaObjectToName(vip).String() klog.Infof("enqueue del vip %s", key) - c.delVirtualIPQueue.Add(vip.DeepCopy()) + c.delVirtualIPQueue.Add(vip) } func (c *Controller) handleAddVirtualIP(key string) error { @@ -434,8 +434,7 @@ func (c *Controller) createOrUpdateVipCR(key, ns, subnet, v4ip, v6ip, mac string } } // Trigger subnet status update after CR creation or update - time.Sleep(300 * time.Millisecond) - c.updateSubnetStatusQueue.Add(subnet) + c.updateSubnetStatusQueue.AddAfter(subnet, 300*time.Millisecond) return nil } @@ -582,8 +581,7 @@ func (c *Controller) handleDelVipFinalizer(key string) error { // Trigger subnet status update after finalizer is removed // This ensures subnet status reflects the IP release // Add delay to ensure API server completes the finalizer removal - time.Sleep(300 * time.Millisecond) - c.updateSubnetStatusQueue.Add(cachedVip.Spec.Subnet) + c.updateSubnetStatusQueue.AddAfter(cachedVip.Spec.Subnet, 300*time.Millisecond) return nil } diff --git a/pkg/controller/vpc_nat_gw_eip.go b/pkg/controller/vpc_nat_gw_eip.go index 8c51e1d3aab..35bba4a09c5 100644 --- a/pkg/controller/vpc_nat_gw_eip.go +++ b/pkg/controller/vpc_nat_gw_eip.go @@ -59,7 +59,7 @@ func (c *Controller) enqueueDelIptablesEip(obj any) { key := cache.MetaObjectToName(eip).String() klog.Infof("enqueue del iptables eip %s", key) - c.delIptablesEipQueue.Add(eip.DeepCopy()) + c.delIptablesEipQueue.Add(eip) } func (c *Controller) handleAddIptablesEip(key string) error { @@ -685,8 +685,7 @@ func (c *Controller) createOrUpdateEipCR(key, v4ip, v6ip, mac, natGwDp, qos, ext } } // Trigger subnet status update after all operations complete - time.Sleep(300 * time.Millisecond) - c.updateSubnetStatusQueue.Add(externalNet) + c.updateSubnetStatusQueue.AddAfter(externalNet, 300*time.Millisecond) return nil } @@ -770,9 +769,8 @@ func (c *Controller) handleDelIptablesEipFinalizer(key string) error { // Trigger subnet status update after finalizer is removed // This ensures subnet status reflects the IP release // Add delay to ensure API server completes the finalizer removal - time.Sleep(300 * time.Millisecond) externalNetwork := util.GetExternalNetwork(cachedIptablesEip.Spec.ExternalSubnet) - c.updateSubnetStatusQueue.Add(externalNetwork) + c.updateSubnetStatusQueue.AddAfter(externalNetwork, 300*time.Millisecond) return nil }