@@ -20,18 +20,15 @@ func dedupAlerts(alerts []pipeline.RuntimeAlert) []*models.Alert {
2020
2121 for idx , alert := range alerts {
2222 log .Tracef ("alert %d/%d" , idx , len (alerts ))
23- // if we have more than one source, we need to dedup
24- if len (alert .Sources ) == 0 || len (alert .Sources ) == 1 {
23+ if len (alert .Sources ) <= 1 {
2524 dedupCache = append (dedupCache , alert .Alert )
2625 continue
2726 }
2827
29- for k := range alert .Sources {
30- refsrc := * alert .Alert // copy
31-
28+ // if we have more than one source, we need to dedup
29+ for k , src := range alert .Sources {
3230 log .Tracef ("source[%s]" , k )
33-
34- src := alert .Sources [k ]
31+ refsrc := * alert .Alert // copy
3532 refsrc .Source = & src
3633 dedupCache = append (dedupCache , & refsrc )
3734 }
@@ -57,8 +54,15 @@ func PushAlerts(ctx context.Context, alerts []pipeline.RuntimeAlert, client *api
5754
5855var bucketOverflows []pipeline.Event
5956
60- func runOutput (ctx context.Context , input chan pipeline.Event , overflow chan pipeline.Event , buckets * leaky.Buckets , postOverflowCTX parser.UnixParserCtx ,
61- postOverflowNodes []parser.Node , client * apiclient.ApiClient ) error {
57+ func runOutput (
58+ ctx context.Context ,
59+ input chan pipeline.Event ,
60+ overflow chan pipeline.Event ,
61+ buckets * leaky.Buckets ,
62+ postOverflowCTX parser.UnixParserCtx ,
63+ postOverflowNodes []parser.Node ,
64+ client * apiclient.ApiClient ,
65+ ) error {
6266 var (
6367 cache []pipeline.RuntimeAlert
6468 cacheMutex sync.Mutex
@@ -73,8 +77,7 @@ func runOutput(ctx context.Context, input chan pipeline.Event, overflow chan pip
7377 if len (cache ) > 0 {
7478 cacheMutex .Lock ()
7579 cachecopy := cache
76- newcache := make ([]pipeline.RuntimeAlert , 0 )
77- cache = newcache
80+ cache = nil
7881 cacheMutex .Unlock ()
7982 /*
8083 This loop needs to block as little as possible as scenarios directly write to the input chan
@@ -103,48 +106,47 @@ func runOutput(ctx context.Context, input chan pipeline.Event, overflow chan pip
103106 }
104107 return nil
105108 case event := <- overflow :
109+ ov := event .Overflow
106110 // if alert is empty and mapKey is present, the overflow is just to cleanup bucket
107- if event . Overflow . Alert == nil && event . Overflow .Mapkey != "" {
111+ if ov . Alert == nil && ov .Mapkey != "" {
108112 buckets .Bucket_map .Delete (event .Overflow .Mapkey )
109113 break
110114 }
115+
111116 /* process post overflow parser nodes */
112117 event , err := parser .Parse (postOverflowCTX , event , postOverflowNodes )
113118 if err != nil {
114119 return fmt .Errorf ("postoverflow failed: %w" , err )
115120 }
116121
117- log .Info (* event . Overflow .Alert .Message )
122+ log .Info (* ov .Alert .Message )
118123
119124 // if the Alert is nil, it's to signal bucket is ready for GC, don't track this
120125 // dump after postoveflow processing to avoid missing whitelist info
121- if dumpStates && event .Overflow .Alert != nil {
122- if bucketOverflows == nil {
123- bucketOverflows = make ([]pipeline.Event , 0 )
124- }
125-
126+ if dumpStates && ov .Alert != nil {
126127 bucketOverflows = append (bucketOverflows , event )
127128 }
128129
129- if event . Overflow .Whitelisted {
130- log .Infof ("[%s] is whitelisted, skip." , * event . Overflow .Alert .Message )
130+ if ov .Whitelisted {
131+ log .Infof ("[%s] is whitelisted, skip." , * ov .Alert .Message )
131132 continue
132133 }
133134
134- if event . Overflow .Reprocess {
135+ if ov .Reprocess {
135136 select {
136137 case input <- event :
137138 log .Debug ("Reprocessing overflow event" )
138139 case <- ctx .Done ():
139140 log .Debug ("Reprocessing overflow event: parsing is dead, skipping" )
140141 }
141142 }
143+
142144 if dumpStates {
143145 continue
144146 }
145147
146148 cacheMutex .Lock ()
147- cache = append (cache , event . Overflow )
149+ cache = append (cache , ov )
148150 cacheMutex .Unlock ()
149151 }
150152 }
0 commit comments