Skip to content

Commit b98e704

Browse files
authored
bugfix: fix eds updates when discovery enabled (#10391)
1 parent e2ca028 commit b98e704

File tree

8 files changed

+145
-14
lines changed

8 files changed

+145
-14
lines changed
+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
changelog:
2+
- type: NON_USER_FACING
3+
description: >-
4+
Fix an issue introduced earlier in this version, where changes in EDS would not be detected
5+
properly if upstream discovery was turn on.

projects/gateway2/krtcollections/endpoints.go

+21-4
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package krtcollections
22

33
import (
44
"context"
5+
"encoding/binary"
56
"encoding/json"
67
"fmt"
78
"hash/fnv"
@@ -128,14 +129,16 @@ type EndpointsForUpstream struct {
128129
Hostname string
129130

130131
LbEpsEqualityHash uint64
132+
upstreamHash uint64
133+
epsEqualityHash uint64
131134
}
132135

133136
func NewEndpointsForUpstream(us UpstreamWrapper, logger *zap.Logger) *EndpointsForUpstream {
134137
// start with a hash of the cluster name. technically we dont need it for krt, as we can compare the upstream name. but it helps later
135138
// to compute the hash we present envoy with.
136139
h := fnv.New64()
137140
h.Write([]byte(us.Inner.GetMetadata().Ref().String()))
138-
lbEpsEqualityHash := h.Sum64()
141+
upstreamHash := h.Sum64()
139142

140143
// add the upstream hash to the clustername, so that if it changes the envoy cluster will become warm again.
141144
clusterName := GetEndpointClusterName(us.Inner)
@@ -148,12 +151,13 @@ func NewEndpointsForUpstream(us UpstreamWrapper, logger *zap.Logger) *EndpointsF
148151
},
149152
Port: ggv2utils.GetPortForUpstream(us.Inner),
150153
Hostname: ggv2utils.GetHostnameForUpstream(us.Inner),
151-
LbEpsEqualityHash: lbEpsEqualityHash,
154+
LbEpsEqualityHash: upstreamHash,
155+
upstreamHash: upstreamHash,
152156
}
153157
}
154158

155159
func hashEndpoints(l PodLocality, emd EndpointWithMd) uint64 {
156-
hasher := fnv.New64()
160+
hasher := fnv.New64a()
157161
hasher.Write([]byte(l.Region))
158162
hasher.Write([]byte(l.Zone))
159163
hasher.Write([]byte(l.Subzone))
@@ -163,10 +167,23 @@ func hashEndpoints(l PodLocality, emd EndpointWithMd) uint64 {
163167
return hasher.Sum64()
164168
}
165169

170+
func hash(a, b uint64) uint64 {
171+
hasher := fnv.New64a()
172+
var buf [16]byte
173+
binary.NativeEndian.PutUint64(buf[:8], a)
174+
binary.NativeEndian.PutUint64(buf[8:], b)
175+
hasher.Write(buf[:])
176+
return hasher.Sum64()
177+
}
178+
166179
func (e *EndpointsForUpstream) Add(l PodLocality, emd EndpointWithMd) {
167180
// xor it as we dont care about order - if we have the same endpoints in the same locality
168181
// we are good.
169-
e.LbEpsEqualityHash ^= hashEndpoints(l, emd)
182+
e.epsEqualityHash ^= hashEndpoints(l, emd)
183+
// we can't xor the endpoint hash with the upstream hash, because upstreams with
184+
// different names and similar endpoints will cancel out, so endpoint changes
185+
// won't result in different equality hashes.
186+
e.LbEpsEqualityHash = hash(e.epsEqualityHash, e.upstreamHash)
170187
e.LbEps[l] = append(e.LbEps[l], emd)
171188
}
172189

projects/gateway2/krtcollections/endpoints_test.go

+108
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,114 @@ func TestEndpointsForUpstreamOrderDoesntMatter(t *testing.T) {
134134

135135
}
136136

137+
func TestEndpointsForUpstreamWithDiscoveredUpstream(t *testing.T) {
138+
g := gomega.NewWithT(t)
139+
140+
us := UpstreamWrapper{
141+
Inner: &gloov1.Upstream{
142+
Metadata: &core.Metadata{Name: "name", Namespace: "ns"},
143+
UpstreamType: &gloov1.Upstream_Kube{
144+
Kube: &kubernetes.UpstreamSpec{
145+
ServiceName: "svc",
146+
ServiceNamespace: "ns",
147+
ServicePort: 8080,
148+
},
149+
},
150+
},
151+
}
152+
usd := UpstreamWrapper{
153+
Inner: &gloov1.Upstream{
154+
Metadata: &core.Metadata{Name: "discovered-name", Namespace: "ns"},
155+
UpstreamType: &gloov1.Upstream_Kube{
156+
Kube: &kubernetes.UpstreamSpec{
157+
ServiceName: "svc",
158+
ServiceNamespace: "ns",
159+
ServicePort: 8080,
160+
},
161+
},
162+
},
163+
}
164+
// input
165+
emd1 := EndpointWithMd{
166+
LbEndpoint: &endpointv3.LbEndpoint{
167+
HostIdentifier: &endpointv3.LbEndpoint_Endpoint{
168+
Endpoint: &endpointv3.Endpoint{
169+
Address: &envoy_config_core_v3.Address{
170+
Address: &envoy_config_core_v3.Address_SocketAddress{
171+
SocketAddress: &envoy_config_core_v3.SocketAddress{
172+
Address: "1.2.3.4",
173+
PortSpecifier: &envoy_config_core_v3.SocketAddress_PortValue{
174+
PortValue: 8080,
175+
},
176+
},
177+
},
178+
},
179+
},
180+
},
181+
},
182+
EndpointMd: EndpointMetadata{
183+
Labels: map[string]string{
184+
corev1.LabelTopologyRegion: "region",
185+
corev1.LabelTopologyZone: "zone",
186+
},
187+
},
188+
}
189+
emd2 := EndpointWithMd{
190+
LbEndpoint: &endpointv3.LbEndpoint{
191+
HostIdentifier: &endpointv3.LbEndpoint_Endpoint{
192+
Endpoint: &endpointv3.Endpoint{
193+
Address: &envoy_config_core_v3.Address{
194+
Address: &envoy_config_core_v3.Address_SocketAddress{
195+
SocketAddress: &envoy_config_core_v3.SocketAddress{
196+
Address: "1.2.3.5",
197+
PortSpecifier: &envoy_config_core_v3.SocketAddress_PortValue{
198+
PortValue: 8080,
199+
},
200+
},
201+
},
202+
},
203+
},
204+
},
205+
},
206+
EndpointMd: EndpointMetadata{
207+
Labels: map[string]string{
208+
corev1.LabelTopologyRegion: "region",
209+
corev1.LabelTopologyZone: "zone",
210+
},
211+
},
212+
}
213+
214+
result1 := NewEndpointsForUpstream(us, nil)
215+
result1.Add(PodLocality{
216+
Region: "region",
217+
Zone: "zone",
218+
}, emd1)
219+
220+
result2 := NewEndpointsForUpstream(usd, nil)
221+
result2.Add(PodLocality{
222+
Region: "region",
223+
Zone: "zone",
224+
}, emd1)
225+
226+
result3 := NewEndpointsForUpstream(us, nil)
227+
result3.Add(PodLocality{
228+
Region: "region",
229+
Zone: "zone",
230+
}, emd2)
231+
232+
result4 := NewEndpointsForUpstream(usd, nil)
233+
result4.Add(PodLocality{
234+
Region: "region",
235+
Zone: "zone",
236+
}, emd2)
237+
238+
h1 := result1.LbEpsEqualityHash ^ result2.LbEpsEqualityHash
239+
h2 := result3.LbEpsEqualityHash ^ result4.LbEpsEqualityHash
240+
241+
g.Expect(h1).NotTo(Equal(h2), "not expected %v, got %v", h1, h2)
242+
243+
}
244+
137245
func TestEndpoints(t *testing.T) {
138246
testCases := []struct {
139247
name string

projects/gateway2/krtcollections/uniqueclients.go

+3-2
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ type callbacks struct {
9191
collection atomic.Pointer[callbacksCollection]
9292
}
9393

94-
type UniquelyConnectedClientsBulider func(ctx context.Context, augmentedPods krt.Collection[LocalityPod]) krt.Collection[UniqlyConnectedClient]
94+
type UniquelyConnectedClientsBulider func(ctx context.Context, handler *krt.DebugHandler, augmentedPods krt.Collection[LocalityPod]) krt.Collection[UniqlyConnectedClient]
9595

9696
// THIS IS THE SET OF THINGS WE RUN TRANSLATION FOR
9797
// add returned callbacks to the xds server.
@@ -102,7 +102,7 @@ func NewUniquelyConnectedClients() (xdsserver.Callbacks, UniquelyConnectedClient
102102
}
103103

104104
func buildCollection(callbacks *callbacks) UniquelyConnectedClientsBulider {
105-
return func(ctx context.Context, augmentedPods krt.Collection[LocalityPod]) krt.Collection[UniqlyConnectedClient] {
105+
return func(ctx context.Context, handler *krt.DebugHandler, augmentedPods krt.Collection[LocalityPod]) krt.Collection[UniqlyConnectedClient] {
106106
trigger := krt.NewRecomputeTrigger(true)
107107
col := &callbacksCollection{
108108
logger: contextutils.LoggerFrom(ctx).Desugar(),
@@ -121,6 +121,7 @@ func buildCollection(callbacks *callbacks) UniquelyConnectedClientsBulider {
121121
return col.getClients()
122122
},
123123
krt.WithName("UniqueConnectedClients"),
124+
krt.WithDebugging(handler),
124125
)
125126
}
126127
}

projects/gateway2/krtcollections/uniqueclients_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ func TestUniqueClients(t *testing.T) {
7676
pods.Synced().WaitUntilSynced(context.Background().Done())
7777

7878
cb, uccBuilder := NewUniquelyConnectedClients()
79-
ucc := uccBuilder(context.Background(), pods)
79+
ucc := uccBuilder(context.Background(), nil, pods)
8080
ucc.Synced().WaitUntilSynced(context.Background().Done())
8181

8282
// check fetch as well

projects/gateway2/setup/ggv2setup.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ func StartGGv2WithConfig(ctx context.Context,
118118
settingsGVR,
119119
krt.WithName("GlooSettings"))
120120

121-
ucc := uccBuilder(ctx, augmentedPods)
121+
ucc := uccBuilder(ctx, setupOpts.KrtDebugger, augmentedPods)
122122

123123
settingsSingle := krt.NewSingleton(func(ctx krt.HandlerContext) *glookubev1.Settings {
124124
s := krt.FetchOne(ctx, setting,

projects/gateway2/setup/ggv2setup_test.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -580,7 +580,7 @@ type xdsFetcher struct {
580580

581581
func (x *xdsFetcher) getclusters(t *testing.T, ctx context.Context) []*envoycluster.Cluster {
582582

583-
ctx, cancel := context.WithTimeout(ctx, time.Second)
583+
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
584584
defer cancel()
585585

586586
cds := envoy_service_cluster_v3.NewClusterDiscoveryServiceClient(x.conn)
@@ -634,7 +634,7 @@ func getroutesnames(l *envoylistener.Listener) []string {
634634

635635
func (x *xdsFetcher) getlisteners(t *testing.T, ctx context.Context) []*envoylistener.Listener {
636636

637-
ctx, cancel := context.WithTimeout(ctx, time.Second)
637+
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
638638
defer cancel()
639639

640640
ds := envoy_service_listener_v3.NewListenerDiscoveryServiceClient(x.conn)
@@ -667,7 +667,7 @@ func (x *xdsFetcher) getlisteners(t *testing.T, ctx context.Context) []*envoylis
667667
func (x *xdsFetcher) getendpoints(t *testing.T, ctx context.Context, clusterServiceNames []string) []*envoyendpoint.ClusterLoadAssignment {
668668

669669
eds := envoy_service_endpoint_v3.NewEndpointDiscoveryServiceClient(x.conn)
670-
ctx, cancel := context.WithTimeout(ctx, time.Second)
670+
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
671671
defer cancel()
672672

673673
epcli, err := eds.StreamEndpoints(ctx)
@@ -700,7 +700,7 @@ func (x *xdsFetcher) getendpoints(t *testing.T, ctx context.Context, clusterServ
700700
func (x *xdsFetcher) getroutes(t *testing.T, ctx context.Context, rosourceNames []string) []*envoy_config_route_v3.RouteConfiguration {
701701

702702
eds := envoy_service_route_v3.NewRouteDiscoveryServiceClient(x.conn)
703-
ctx, cancel := context.WithTimeout(ctx, time.Second)
703+
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
704704
defer cancel()
705705

706706
epcli, err := eds.StreamRoutes(ctx)

projects/gateway2/utils/hash.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,12 @@ import (
1414
)
1515

1616
func HashProto(resource proto.Message) uint64 {
17-
hasher := fnv.New64()
17+
hasher := fnv.New64a()
1818
HashProtoWithHasher(hasher, resource)
1919
return hasher.Sum64()
2020
}
2121

22-
func HashProtoWithHasher(hasher hash.Hash64, resource proto.Message) {
22+
func HashProtoWithHasher(hasher hash.Hash, resource proto.Message) {
2323
var buffer [1024]byte
2424
mo := proto.MarshalOptions{Deterministic: true}
2525
buf := buffer[:0]

0 commit comments

Comments
 (0)