Skip to content

Commit 20df76f

Browse files
authored
Merge pull request #245 from kubescape/bump
optimize memory with pagers
2 parents c62c243 + 52c31b8 commit 20df76f

14 files changed

+185
-235
lines changed

.github/workflows/pr-merged.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ jobs:
3737
BUILD_PLATFORM: linux/amd64,linux/arm64
3838
GO_VERSION: "1.23"
3939
REQUIRED_TESTS: '[
40-
"vuln_scan",
40+
"vuln_v2_views",
4141
"vuln_scan_trigger_scan_public_registry",
4242
"vuln_scan_trigger_scan_public_registry_excluded",
4343
"vuln_scan_trigger_scan_private_quay_registry",

continuousscanning/handlers.go

+5-7
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ import (
66

77
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
88
"k8s.io/apimachinery/pkg/runtime"
9-
watch "k8s.io/apimachinery/pkg/watch"
9+
"k8s.io/apimachinery/pkg/watch"
1010
"k8s.io/utils/ptr"
1111

1212
armoapi "github.com/armosec/armoapi-go/apis"
@@ -27,7 +27,7 @@ import (
2727
sets "github.com/deckarep/golang-set/v2"
2828
)
2929

30-
var orphanableWorkloadTypes sets.Set[string] = sets.NewSet[string]("Pod", "ReplicaSet", "Job")
30+
var orphanableWorkloadTypes = sets.NewSet[string]("Pod", "ReplicaSet", "Job")
3131

3232
type EventHandler interface {
3333
Handle(ctx context.Context, e watch.Event) error
@@ -97,7 +97,7 @@ func unstructuredToScanObject(uObject *unstructured.Unstructured) (*objectsenvel
9797
}
9898

9999
func triggerScanFor(ctx context.Context, uObject *unstructured.Unstructured, isDelete bool, wp *ants.PoolWithFunc, clusterConfig config.IConfig) error {
100-
logger.L().Ctx(ctx).Info(
100+
logger.L().Info(
101101
"triggering scan",
102102
helpers.String("kind", uObject.GetKind()),
103103
helpers.String("name", uObject.GetName()),
@@ -122,7 +122,6 @@ func (h *poolInvokerHandler) Handle(ctx context.Context, e watch.Event) error {
122122
if e.Type != watch.Added && e.Type != watch.Modified {
123123
return nil
124124
}
125-
isDelete := false
126125

127126
uObject, err := eventToUnstructured(e)
128127
if err != nil {
@@ -134,7 +133,7 @@ func (h *poolInvokerHandler) Handle(ctx context.Context, e watch.Event) error {
134133
return nil
135134
}
136135

137-
return triggerScanFor(ctx, uObject, isDelete, h.wp, h.clusterConfig)
136+
return triggerScanFor(ctx, uObject, false, h.wp, h.clusterConfig)
138137
}
139138

140139
func NewTriggeringHandler(wp *ants.PoolWithFunc, clusterConfig config.IConfig) EventHandler {
@@ -220,7 +219,6 @@ func (h *deletedCleanerHandler) Handle(ctx context.Context, e watch.Event) error
220219
if e.Type != watch.Deleted {
221220
return nil
222221
}
223-
isDelete := true
224222

225223
uObject, err := eventToUnstructured(e)
226224
if err != nil {
@@ -237,6 +235,6 @@ func (h *deletedCleanerHandler) Handle(ctx context.Context, e watch.Event) error
237235
logger.L().Ctx(ctx).Error("failed to delete CRDs", helpers.Error(err))
238236
}
239237

240-
err = triggerScanFor(ctx, uObject, isDelete, h.wp, h.clusterConfig)
238+
err = triggerScanFor(ctx, uObject, true, h.wp, h.clusterConfig)
241239
return err
242240
}

continuousscanning/service.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import (
55
"time"
66

77
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
8-
watch "k8s.io/apimachinery/pkg/watch"
8+
"k8s.io/apimachinery/pkg/watch"
99
"k8s.io/client-go/dynamic"
1010

1111
armoapi "github.com/armosec/armoapi-go/apis"
@@ -29,18 +29,18 @@ func (s *ContinuousScanningService) listen(ctx context.Context) <-chan armoapi.C
2929
resourceEventsCh := make(chan watch.Event, 100)
3030

3131
gvrs := s.tl.LoadGVRs(ctx)
32-
logger.L().Ctx(ctx).Info("fetched gvrs", helpers.Interface("gvrs", gvrs))
32+
logger.L().Info("fetched gvrs", helpers.Interface("gvrs", gvrs))
3333
wp, _ := NewWatchPool(ctx, s.k8sdynamic, gvrs, listOpts)
3434
wp.Run(ctx, resourceEventsCh)
35-
logger.L().Ctx(ctx).Info("ran watch pool")
35+
logger.L().Info("ran watch pool")
3636

3737
go func(shutdownCh <-chan struct{}, resourceEventsCh <-chan watch.Event, out *cooldownQueue) {
3838
defer out.Stop(ctx)
3939

4040
for {
4141
select {
4242
case e := <-resourceEventsCh:
43-
logger.L().Ctx(ctx).Debug(
43+
logger.L().Debug(
4444
"got event from channel",
4545
helpers.Interface("event", e),
4646
)
@@ -57,7 +57,7 @@ func (s *ContinuousScanningService) listen(ctx context.Context) <-chan armoapi.C
5757

5858
func (s *ContinuousScanningService) work(ctx context.Context) {
5959
for e := range s.eventQueue.ResultChan {
60-
logger.L().Ctx(ctx).Debug(
60+
logger.L().Debug(
6161
"got an event to process",
6262
helpers.Interface("event", e),
6363
)

continuousscanning/watchbuilder.go

+9-9
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,8 @@ func (w *SelfHealingWatch) Run(ctx context.Context, readyWg *sync.WaitGroup, out
6666
return ctx.Err()
6767
default:
6868
gvr := helpers.String("gvr", w.gvr.String())
69-
logger.L().Ctx(ctx).Debug("creating watch for GVR", gvr)
70-
watch, err := w.makeWatchFunc(ctx, w.client, w.gvr, w.opts)
69+
logger.L().Debug("creating watch for GVR", gvr)
70+
watchFunc, err := w.makeWatchFunc(ctx, w.client, w.gvr, w.opts)
7171
if err != nil {
7272
logger.L().Ctx(ctx).Warning(
7373
"got error when creating a watch for gvr",
@@ -76,8 +76,8 @@ func (w *SelfHealingWatch) Run(ctx context.Context, readyWg *sync.WaitGroup, out
7676
)
7777
continue
7878
}
79-
logger.L().Ctx(ctx).Debug("watch created\n")
80-
w.currWatch = watch
79+
logger.L().Debug("watch created\n")
80+
w.currWatch = watchFunc
8181

8282
// Watch is considered ready once it is successfully acquired
8383
// Signal we are done only the first time because
@@ -97,7 +97,7 @@ type WatchPool struct {
9797
}
9898

9999
func (wp *WatchPool) Run(ctx context.Context, out chan<- watch.Event) {
100-
logger.L().Ctx(ctx).Info("Watch pool: starting")
100+
logger.L().Info("Watch pool: starting")
101101

102102
wg := &sync.WaitGroup{}
103103
for idx := range wp.pool {
@@ -106,17 +106,17 @@ func (wp *WatchPool) Run(ctx context.Context, out chan<- watch.Event) {
106106
}
107107
wg.Wait()
108108

109-
logger.L().Ctx(ctx).Info("Watch pool: started ok")
109+
logger.L().Info("Watch pool: started ok")
110110
}
111111

112-
func NewWatchPool(ctx context.Context, client dynamic.Interface, gvrs []schema.GroupVersionResource, opts metav1.ListOptions) (*WatchPool, error) {
112+
func NewWatchPool(_ context.Context, client dynamic.Interface, gvrs []schema.GroupVersionResource, opts metav1.ListOptions) (*WatchPool, error) {
113113
watches := make([]*SelfHealingWatch, len(gvrs))
114114

115115
for idx := range gvrs {
116116
gvr := gvrs[idx]
117-
watch := NewSelfHealingWatch(client, gvr, opts)
117+
selfHealingWatch := NewSelfHealingWatch(client, gvr, opts)
118118

119-
watches[idx] = watch
119+
watches[idx] = selfHealingWatch
120120
}
121121

122122
pool := &WatchPool{pool: watches}

main.go

+4-5
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ import (
3333

3434
"github.com/armosec/utils-k8s-go/probes"
3535
beUtils "github.com/kubescape/backend/pkg/utils"
36-
logger "github.com/kubescape/go-logger"
36+
"github.com/kubescape/go-logger"
3737

3838
"github.com/kubescape/operator/servicehandler"
3939
)
@@ -107,12 +107,12 @@ func main() {
107107
restclient.SetDefaultWarningHandler(restclient.NoWarnings{})
108108

109109
kubernetesCache := objectcache.NewKubernetesCache(k8sApi)
110-
110+
111111
// Creating the ObjectCache using KubernetesCache
112112
objectCache := objectcache.NewObjectCache(kubernetesCache)
113113

114114
if components.ServiceScanConfig.Enabled {
115-
logger.L().Ctx(ctx).Info("service discovery enabeld and started with interval: ", helpers.String("interval", components.ServiceScanConfig.Interval.String()))
115+
logger.L().Info("service discovery enabled and started with interval: ", helpers.String("interval", components.ServiceScanConfig.Interval.String()))
116116
go servicehandler.DiscoveryServiceHandler(ctx, k8sApi, components.ServiceScanConfig.Interval)
117117
}
118118

@@ -151,7 +151,7 @@ func main() {
151151
if operatorConfig.ContinuousScanEnabled() {
152152
go func(mh *mainhandler.MainHandler) {
153153
err := mh.SetupContinuousScanning(ctx, cs.DefaultQueueSize, cfg.EventDeduplicationInterval)
154-
logger.L().Ctx(ctx).Info("set up cont scanning service")
154+
logger.L().Info("set up cont scanning service")
155155
if err != nil {
156156
logger.L().Ctx(ctx).Fatal(err.Error(), helpers.Error(err))
157157
}
@@ -178,7 +178,6 @@ func main() {
178178
ruleBindingNotify := make(chan rulebindingmanager.RuleBindingNotify, 100)
179179
ruleBindingCache.AddNotifier(&ruleBindingNotify)
180180

181-
182181
admissionController := webhook.New(addr, "/etc/certs/tls.crt", "/etc/certs/tls.key", runtime.NewScheme(), webhook.NewAdmissionValidator(k8sApi, objectCache, exporter, ruleBindingCache), ruleBindingCache)
183182
// Start HTTP REST server for webhook
184183
go func() {

0 commit comments

Comments
 (0)