Skip to content

Commit d5ab84d

Browse files
authored
fix: fix kafka writer configure (#3517)
Signed-off-by: Song Gao <disxiaofei@163.com>
1 parent f3cb876 commit d5ab84d

File tree

4 files changed

+28
-45
lines changed

4 files changed

+28
-45
lines changed

extensions/sinks/kafka/ext/kafka.go

Lines changed: 26 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,11 @@ type sinkConf struct {
4747
}
4848

4949
type kafkaConf struct {
50-
MaxAttempts int `json:"maxAttempts"`
51-
RequiredACKs int `json:"requiredACKs"`
52-
Key string `json:"key"`
53-
Headers interface{} `json:"headers"`
54-
WriterConf kafkaWriterConf `json:"writerConf"`
50+
kafkaWriterConf
51+
MaxAttempts int `json:"maxAttempts"`
52+
RequiredACKs int `json:"requiredACKs"`
53+
Key string `json:"key"`
54+
Headers interface{} `json:"headers"`
5555
}
5656

5757
type kafkaWriterConf struct {
@@ -101,7 +101,7 @@ func (m *kafkaSink) Configure(props map[string]interface{}) error {
101101
}
102102
m.tlsConfig = tlsConfig
103103
kc := getDefaultKafkaConf()
104-
if err := cast.MapToStruct(props, kc); err != nil {
104+
if err := kc.configure(props); err != nil {
105105
return err
106106
}
107107
m.kc = kc
@@ -112,15 +112,6 @@ func (m *kafkaSink) Configure(props map[string]interface{}) error {
112112
return m.buildKafkaWriter()
113113
}
114114

115-
func (m *kafkaSink) ConfigureBatch(batchSize int, lingerInterval time.Duration) {
116-
if batchSize > 0 {
117-
m.kc.WriterConf.BatchSize = batchSize
118-
}
119-
if lingerInterval > 0 {
120-
m.kc.WriterConf.BatchTimeout = lingerInterval
121-
}
122-
}
123-
124115
func (m *kafkaSink) buildKafkaWriter() error {
125116
mechanism, err := m.sc.GetMechanism()
126117
if err != nil {
@@ -136,15 +127,15 @@ func (m *kafkaSink) buildKafkaWriter() error {
136127
AllowAutoTopicCreation: true,
137128
MaxAttempts: m.kc.MaxAttempts,
138129
RequiredAcks: kafkago.RequiredAcks(m.kc.RequiredACKs),
139-
BatchSize: m.kc.WriterConf.BatchSize,
140-
BatchBytes: m.kc.WriterConf.BatchBytes,
141-
BatchTimeout: m.kc.WriterConf.BatchTimeout,
130+
BatchSize: m.kc.BatchSize,
131+
BatchBytes: m.kc.BatchBytes,
132+
BatchTimeout: m.kc.BatchTimeout,
142133
Transport: &kafkago.Transport{
143134
SASL: mechanism,
144135
TLS: m.tlsConfig,
145136
},
146137
}
147-
conf.Log.Infof("kafka writer batchSize:%v, batchTimeout:%v", m.kc.WriterConf.BatchSize, m.kc.WriterConf.BatchTimeout.String())
138+
conf.Log.Infof("kafka writer batchSize:%v, batchTimeout:%v", m.kc.BatchSize, m.kc.BatchTimeout.String())
148139
m.writer = w
149140
return nil
150141
}
@@ -332,11 +323,22 @@ func getDefaultKafkaConf() *kafkaConf {
332323
c := &kafkaConf{
333324
RequiredACKs: -1,
334325
MaxAttempts: 1,
335-
WriterConf: kafkaWriterConf{
336-
BatchSize: 5000,
337-
BatchTimeout: 200 * time.Millisecond,
338-
BatchBytes: 1048576 * 10, // 10MB
339-
},
326+
}
327+
c.kafkaWriterConf = kafkaWriterConf{
328+
BatchSize: 5000,
329+
// send batch ASAP
330+
BatchTimeout: time.Microsecond,
331+
BatchBytes: 1048576 * 10, // 10MB
340332
}
341333
return c
342334
}
335+
336+
func (kc *kafkaConf) configure(props map[string]interface{}) error {
337+
if err := cast.MapToStruct(props, kc); err != nil {
338+
return err
339+
}
340+
if err := cast.MapToStruct(props, &kc.kafkaWriterConf); err != nil {
341+
return err
342+
}
343+
return nil
344+
}

internal/topo/node/sink_node.go

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ package node
1717
import (
1818
"fmt"
1919
"sync"
20-
"time"
2120

2221
"github.com/lf-edge/ekuiper/internal/binder/io"
2322
"github.com/lf-edge/ekuiper/internal/conf"
@@ -555,11 +554,6 @@ func resendDataToSink(ctx api.StreamContext, sink api.Sink, outData interface{},
555554
}
556555
}
557556

558-
type batchConf struct {
559-
BatchSize int `json:"batchSize"`
560-
LingerInterval time.Duration `json:"lingerInterval"`
561-
}
562-
563557
func getSink(name string, action map[string]interface{}) (api.Sink, error) {
564558
var (
565559
s api.Sink
@@ -572,11 +566,6 @@ func getSink(name string, action map[string]interface{}) (api.Sink, error) {
572566
if err != nil {
573567
return nil, err
574568
}
575-
if bas, ok := s.(api.BatchAbleSink); ok {
576-
bc := batchConf{}
577-
cast.MapToStruct(newAction, &bc)
578-
bas.ConfigureBatch(bc.BatchSize, bc.LingerInterval)
579-
}
580569
return s, nil
581570
} else {
582571
if err != nil {

internal/topo/planner/sink_planner.go

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ func buildActions(tp *topo.Topo, rule *api.Rule, inputs []api.Emitter) error {
4444
}
4545
// Split sink node
4646
sinkName := fmt.Sprintf("%s_%d", name, i)
47-
newInputs, err := splitSink(s, tp, inputs, sinkName, rule.Options, commonConf)
47+
newInputs, err := splitSink(tp, inputs, sinkName, rule.Options, commonConf)
4848
if err != nil {
4949
return err
5050
}
@@ -65,10 +65,7 @@ func fulfillProps(rule *api.Rule, props map[string]any) map[string]any {
6565
}
6666

6767
// Split sink node according to the sink configuration. Return the new input emitters.
68-
func splitSink(sink api.Sink, tp *topo.Topo, inputs []api.Emitter, sinkName string, options *api.RuleOption, sc *node.SinkConf) ([]api.Emitter, error) {
69-
if _, ok := sink.(api.BatchAbleSink); ok {
70-
return inputs, nil
71-
}
68+
func splitSink(tp *topo.Topo, inputs []api.Emitter, sinkName string, options *api.RuleOption, sc *node.SinkConf) ([]api.Emitter, error) {
7269
index := 0
7370
newInputs := inputs
7471
// Batch enabled

pkg/api/stream.go

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -136,11 +136,6 @@ type LookupSource interface {
136136
Closable
137137
}
138138

139-
type BatchAbleSink interface {
140-
Sink
141-
ConfigureBatch(batchSize int, lingerDuration time.Duration)
142-
}
143-
144139
type Sink interface {
145140
// Open Should be sync function for normal case. The container will run it in go func
146141
Open(ctx StreamContext) error

0 commit comments

Comments
 (0)