Skip to content

Commit 0b44cb2

Browse files
authored
Merge pull request #246 from kubescape/fix
use same cooldownqueue as node-agent and synchronizer
2 parents 20df76f + 73bd4e8 commit 0b44cb2

File tree

8 files changed

+137
-184
lines changed

8 files changed

+137
-184
lines changed

continuousscanning/cooldownqueue.go

-81
This file was deleted.

continuousscanning/service.go

+7-7
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@ package continuousscanning
22

33
import (
44
"context"
5-
"time"
65

6+
"github.com/kubescape/operator/watcher"
77
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
88
"k8s.io/apimachinery/pkg/watch"
99
"k8s.io/client-go/dynamic"
@@ -19,7 +19,7 @@ type ContinuousScanningService struct {
1919
workDone chan struct{}
2020
k8sdynamic dynamic.Interface
2121
eventHandlers []EventHandler
22-
eventQueue *cooldownQueue
22+
eventQueue *watcher.CooldownQueue
2323
}
2424

2525
func (s *ContinuousScanningService) listen(ctx context.Context) <-chan armoapi.Command {
@@ -34,8 +34,8 @@ func (s *ContinuousScanningService) listen(ctx context.Context) <-chan armoapi.C
3434
wp.Run(ctx, resourceEventsCh)
3535
logger.L().Info("ran watch pool")
3636

37-
go func(shutdownCh <-chan struct{}, resourceEventsCh <-chan watch.Event, out *cooldownQueue) {
38-
defer out.Stop(ctx)
37+
go func(shutdownCh <-chan struct{}, resourceEventsCh <-chan watch.Event, out *watcher.CooldownQueue) {
38+
defer out.Stop()
3939

4040
for {
4141
select {
@@ -44,7 +44,7 @@ func (s *ContinuousScanningService) listen(ctx context.Context) <-chan armoapi.C
4444
"got event from channel",
4545
helpers.Interface("event", e),
4646
)
47-
out.Enqueue(ctx, e)
47+
out.Enqueue(e)
4848
case <-shutdownCh:
4949
return
5050
}
@@ -100,9 +100,9 @@ func (s *ContinuousScanningService) Stop() {
100100
<-s.workDone
101101
}
102102

103-
func NewContinuousScanningService(client dynamic.Interface, tl TargetLoader, queueSize int, sameEventCooldown time.Duration, h ...EventHandler) *ContinuousScanningService {
103+
func NewContinuousScanningService(client dynamic.Interface, tl TargetLoader, h ...EventHandler) *ContinuousScanningService {
104104
doneCh := make(chan struct{})
105-
eventQueue := NewCooldownQueue(queueSize, sameEventCooldown)
105+
eventQueue := watcher.NewCooldownQueue()
106106
workDone := make(chan struct{})
107107

108108
return &ContinuousScanningService{

continuousscanning/service_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ func TestAddEventHandler(t *testing.T) {
151151
tl := NewTargetLoader(f)
152152
// We use the spy handler later to verify if it's been called
153153
spyH := &spyHandler{called: false, wg: resourcesCreatedWg, mx: &sync.RWMutex{}}
154-
css := NewContinuousScanningService(dynClient, tl, DefaultQueueSize, DefaultTTL, spyH)
154+
css := NewContinuousScanningService(dynClient, tl, spyH)
155155
css.Launch(ctx)
156156

157157
// Create Pods to be listened
@@ -265,7 +265,7 @@ func TestContinuousScanningService(t *testing.T) {
265265
triggeringHandler := NewTriggeringHandler(wp, operatorConfig)
266266
stubFetcher := &stubFetcher{podMatchRules}
267267
loader := NewTargetLoader(stubFetcher)
268-
css := NewContinuousScanningService(dynClient, loader, DefaultQueueSize, DefaultTTL, triggeringHandler)
268+
css := NewContinuousScanningService(dynClient, loader, triggeringHandler)
269269
css.Launch(ctx)
270270

271271
// Create Pods to be listened

main.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ import (
2222
rulebindingcachev1 "github.com/kubescape/operator/admission/rulebinding/cache"
2323
"github.com/kubescape/operator/admission/webhook"
2424
"github.com/kubescape/operator/config"
25-
cs "github.com/kubescape/operator/continuousscanning"
2625
"github.com/kubescape/operator/mainhandler"
2726
"github.com/kubescape/operator/notificationhandler"
2827
"github.com/kubescape/operator/objectcache"
@@ -150,7 +149,7 @@ func main() {
150149

151150
if operatorConfig.ContinuousScanEnabled() {
152151
go func(mh *mainhandler.MainHandler) {
153-
err := mh.SetupContinuousScanning(ctx, cs.DefaultQueueSize, cfg.EventDeduplicationInterval)
152+
err := mh.SetupContinuousScanning(ctx)
154153
logger.L().Info("set up cont scanning service")
155154
if err != nil {
156155
logger.L().Ctx(ctx).Fatal(err.Error(), helpers.Error(err))

mainhandler/handlerequests.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ func NewActionHandler(config config.IConfig, k8sAPI *k8sinterface.KubernetesApi,
114114
}
115115

116116
// SetupContinuousScanning sets up the continuous cluster scanning function
117-
func (mainHandler *MainHandler) SetupContinuousScanning(ctx context.Context, queueSize int, eventCooldown time.Duration) error {
117+
func (mainHandler *MainHandler) SetupContinuousScanning(ctx context.Context) error {
118118
ksStorageClient, err := kssc.NewForConfig(k8sinterface.GetK8sConfig())
119119
if err != nil {
120120
logger.L().Ctx(ctx).Fatal(fmt.Sprintf("Unable to initialize the storage client: %v", err))
@@ -133,7 +133,7 @@ func (mainHandler *MainHandler) SetupContinuousScanning(ctx context.Context, que
133133
loader := cs.NewTargetLoader(fetcher)
134134

135135
dynClient := mainHandler.k8sAPI.DynamicClient
136-
svc := cs.NewContinuousScanningService(dynClient, loader, queueSize, eventCooldown, triggeringHandler, deletingHandler)
136+
svc := cs.NewContinuousScanningService(dynClient, loader, triggeringHandler, deletingHandler)
137137
svc.Launch(ctx)
138138

139139
return nil
@@ -150,7 +150,7 @@ func (mainHandler *MainHandler) HandleWatchers(ctx context.Context) {
150150
if err != nil {
151151
logger.L().Ctx(ctx).Fatal(fmt.Sprintf("Unable to initialize the storage client: %v", err))
152152
}
153-
eventQueue := watcher.NewCooldownQueue(watcher.DefaultQueueSize, watcher.DefaultTTL)
153+
eventQueue := watcher.NewCooldownQueue()
154154
watchHandler := watcher.NewWatchHandler(ctx, mainHandler.config, mainHandler.k8sAPI, ksStorageClient, eventQueue)
155155

156156
// wait for the kubevuln component to be ready
@@ -433,16 +433,16 @@ func (mainHandler *MainHandler) HandleImageScanningScopedRequest(ctx context.Con
433433
CommandName: apis.TypeScanImages,
434434
Args: map[string]interface{}{
435435
utils.ArgsContainerData: containerData,
436-
utils.ArgsPod: &pod,
436+
utils.ArgsPod: pod,
437437
},
438438
}
439439

440440
// send specific command to the channel
441441
newSessionObj := utils.NewSessionObj(ctx, mainHandler.config, cmd, "Websocket", sessionObj.Reporter.GetJobID(), "", 1)
442442

443-
logger.L().Info("triggering", helpers.String("id", newSessionObj.Command.GetID()), helpers.String("slug", s), helpers.String("containerName", containerData.ContainerName), helpers.String("imageTag", containerData.ImageTag), helpers.String("imageID", containerData.ImageID))
443+
logger.L().Info("triggering scan image", helpers.String("id", newSessionObj.Command.GetID()), helpers.String("slug", s), helpers.String("containerName", containerData.ContainerName), helpers.String("imageTag", containerData.ImageTag), helpers.String("imageID", containerData.ImageID))
444444
if err := mainHandler.HandleSingleRequest(ctx, newSessionObj); err != nil {
445-
logger.L().Info("failed to complete action", helpers.String("id", newSessionObj.Command.GetID()), helpers.String("slug", s), helpers.String("containerName", containerData.ContainerName), helpers.String("imageTag", containerData.ImageTag), helpers.String("imageID", containerData.ImageID))
445+
logger.L().Info("failed to complete action", helpers.Error(err), helpers.String("id", newSessionObj.Command.GetID()), helpers.String("slug", s), helpers.String("containerName", containerData.ContainerName), helpers.String("imageTag", containerData.ImageTag), helpers.String("imageID", containerData.ImageID))
446446
newSessionObj.Reporter.SendError(err, mainHandler.sendReport, true)
447447
continue
448448
}

watcher/cooldownqueue.go

+35-30
Original file line numberDiff line numberDiff line change
@@ -1,76 +1,81 @@
11
package watcher
22

33
import (
4+
"strings"
5+
"sync"
46
"time"
57

6-
lru "github.com/hashicorp/golang-lru/v2/expirable"
7-
v1 "k8s.io/api/core/v1"
8+
"istio.io/pkg/cache"
9+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
810
"k8s.io/apimachinery/pkg/watch"
911
)
1012

1113
const (
12-
// Default size for the cooldown queue
13-
DefaultQueueSize = 512
14-
// Default TTL for events put in the queue
15-
DefaultTTL = 1 * time.Second
14+
defaultExpiration = 5 * time.Second
15+
evictionInterval = 1 * time.Second
1616
)
1717

1818
// CooldownQueue is a queue that lets clients put events into it with a cooldown
1919
//
20-
// When a client puts an event into a queue, it forwards the event to its
21-
// output channel and starts a cooldown for this event. If a client attempts to
22-
// put the same event into the queue while the cooldown is running, the queue
23-
// will silently drop the event. When the cooldown resets and a client puts the
24-
// same event into the queue, it will be forwarded to the output channel
20+
// When a client puts an event into a queue, it waits for a cooldown period before
21+
// the event is forwarded to the consumer. If an event for the same key is put into the queue
22+
// again before the cooldown period is over, the event is overridden and the cooldown period is reset.
2523
type CooldownQueue struct {
26-
seenEvents *lru.LRU[string, bool]
24+
closed bool
25+
mu sync.Mutex // mutex for closed
26+
chanMu *sync.Mutex // mutex for innerChan
27+
seenEvents cache.ExpiringCache
2728
innerChan chan watch.Event
2829
ResultChan <-chan watch.Event
29-
closed bool
3030
}
3131

3232
// NewCooldownQueue returns a new Cooldown Queue
33-
func NewCooldownQueue(size int, cooldown time.Duration) *CooldownQueue {
34-
cache := lru.NewLRU[string, bool](size, nil, cooldown)
33+
func NewCooldownQueue() *CooldownQueue {
3534
events := make(chan watch.Event)
35+
chanMu := sync.Mutex{}
36+
callback := func(key, value any) {
37+
chanMu.Lock()
38+
defer chanMu.Unlock()
39+
events <- value.(watch.Event)
40+
}
41+
c := cache.NewTTLWithCallback(defaultExpiration, evictionInterval, callback)
3642
return &CooldownQueue{
37-
seenEvents: cache,
43+
chanMu: &chanMu,
44+
seenEvents: c,
3845
innerChan: events,
3946
ResultChan: events,
4047
}
4148
}
4249

4350
// makeEventKey creates a unique key for an event from a watcher
4451
func makeEventKey(e watch.Event) string {
45-
object, ok := e.Object.(*v1.Pod)
46-
if !ok {
47-
return ""
48-
}
49-
eventKey := string(e.Type) + "-" + string(object.GetUID())
50-
return eventKey
52+
gvk := e.Object.GetObjectKind().GroupVersionKind()
53+
meta := e.Object.(metav1.Object)
54+
return strings.Join([]string{gvk.Group, gvk.Version, gvk.Kind, meta.GetNamespace(), meta.GetName()}, "/")
5155
}
5256

5357
func (q *CooldownQueue) Closed() bool {
58+
q.mu.Lock()
59+
defer q.mu.Unlock()
5460
return q.closed
5561
}
5662

5763
// Enqueue enqueues an event in the Cooldown Queue
5864
func (q *CooldownQueue) Enqueue(e watch.Event) {
65+
q.mu.Lock()
66+
defer q.mu.Unlock()
5967
if q.closed {
6068
return
6169
}
6270
eventKey := makeEventKey(e)
63-
_, exists := q.seenEvents.Get(eventKey)
64-
if exists {
65-
return
66-
}
67-
go func() {
68-
q.innerChan <- e
69-
}()
70-
q.seenEvents.Add(eventKey, true)
71+
q.seenEvents.Set(eventKey, e)
7172
}
7273

7374
func (q *CooldownQueue) Stop() {
75+
q.chanMu.Lock()
76+
defer q.chanMu.Unlock()
77+
q.mu.Lock()
78+
defer q.mu.Unlock()
7479
q.closed = true
7580
close(q.innerChan)
7681
}

0 commit comments

Comments
 (0)