21
21
22
22
/**
23
23
* {@link MessageListenerStrategy} commits records for each partition. In case of processing errors
24
- * an error handler can decide if processing should be retried or stopped.
24
+ * the message will be retried for a configured amount of times.
25
+ *
26
+ * <p>After each error an error handler can decide if processing should be retried or stopped.
27
+ *
28
+ * <p>If the retry count exceeds:
29
+ *
30
+ * <ul>
31
+ * <li>The failed message handling will be logged as ERROR
32
+ * <li>a retryLimitExceededErrorHandler will be called allowing to do more error handling (i.e.
33
+ * write to DLT)
34
+ * <li>the offset will get increased, so the message handling gets ignored.
35
+ * </ul>
25
36
*/
26
37
public class RetryProcessingErrorMLS <K , V > extends MessageListenerStrategy <K , V > {
27
38
28
39
private static final Logger LOGGER = LoggerFactory .getLogger (RetryProcessingErrorMLS .class );
29
40
private final MessageHandler <K , V > handler ;
30
41
private final ErrorHandler <K , V > errorHandler ;
42
+ private final ErrorHandler <K , V > retryLimitExceededErrorHandler ;
31
43
private String consumerName ;
44
+ private final RetryCounter retryCounter ;
32
45
46
+ /**
47
+ * Creates a new instance of {@link RetryProcessingErrorMLS} retrying the message on error
48
+ * infinite times.
49
+ *
50
+ * @param handler the message handler
51
+ * @param errorHandler the error handler called after each error
52
+ */
33
53
public RetryProcessingErrorMLS (MessageHandler <K , V > handler , ErrorHandler <K , V > errorHandler ) {
54
+ this (handler , errorHandler , Long .MAX_VALUE , null );
55
+ }
56
+
57
+ /**
58
+ * Creates a new instance of {@link RetryProcessingErrorMLS} retrying the message on error for a
59
+ * given amount of times
60
+ *
61
+ * @param handler the message handler
62
+ * @param errorHandler the error handler called after each error
63
+ * @param maxRetryCount the maximum number of retries
64
+ * @param retryLimitExceededErrorHandler the error handler called if the retry limit is exceeded
65
+ */
66
+ public RetryProcessingErrorMLS (
67
+ MessageHandler <K , V > handler ,
68
+ ErrorHandler <K , V > errorHandler ,
69
+ long maxRetryCount ,
70
+ ErrorHandler <K , V > retryLimitExceededErrorHandler ) {
34
71
this .handler = handler ;
35
72
this .errorHandler = errorHandler ;
73
+ this .retryLimitExceededErrorHandler = retryLimitExceededErrorHandler ;
74
+ this .retryCounter = new RetryCounter (maxRetryCount );
36
75
}
37
76
38
77
@ Override
@@ -57,9 +96,7 @@ private void processRecordsByPartition(
57
96
try {
58
97
Instant timerStart = Instant .now ();
59
98
handler .handle (consumerRecord );
60
- // mark last successful processed record for commit
61
- lastCommitOffset = new OffsetAndMetadata (consumerRecord .offset () + 1 );
62
- addOffsetToCommitOnClose (consumerRecord );
99
+ lastCommitOffset = markConsumerRecordProcessed (consumerRecord );
63
100
64
101
Instant timerEnd = Instant .now ();
65
102
if (LOGGER .isTraceEnabled ()) {
@@ -69,23 +106,28 @@ private void processRecordsByPartition(
69
106
consumerName ,
70
107
consumerRecord .topic ());
71
108
}
72
-
73
109
} catch (RuntimeException e ) {
74
- LOGGER .error (
75
- "Error while handling record {} in message handler {}" ,
76
- consumerRecord .key (),
77
- handler .getClass (),
78
- e );
79
- boolean shouldContinue = errorHandler .handleError (consumerRecord , e , consumer );
80
- if (!shouldContinue ) {
81
- throw new StopListenerException (e );
110
+ retryCounter .incErrorCount (consumerRecord );
111
+ if (retryCounter .isMaxRetryCountReached (consumerRecord )) {
112
+ LOGGER .error (
113
+ "Error while handling record {} in message handler {}, no more retries" ,
114
+ consumerRecord .key (),
115
+ handler .getClass (),
116
+ e );
117
+
118
+ callErrorHandler (retryLimitExceededErrorHandler , consumerRecord , e , consumer );
119
+ lastCommitOffset = markConsumerRecordProcessed (consumerRecord );
82
120
} else {
83
121
LOGGER .warn (
84
- "Error while handling record {} in message handler {}, will be retried" ,
122
+ "Error while handling record {} in message handler {}, will be retried ({} / {})... " ,
85
123
consumerRecord .key (),
86
124
handler .getClass (),
125
+ retryCounter .getOffsetCounter (consumerRecord ),
126
+ retryCounter .getMaxRetryCount (),
87
127
e );
88
128
129
+ callErrorHandler (errorHandler , consumerRecord , e , consumer );
130
+
89
131
// seek to the current offset of the failing record for retry
90
132
consumer .seek (partition , consumerRecord .offset ());
91
133
break ;
@@ -94,6 +136,7 @@ private void processRecordsByPartition(
94
136
}
95
137
}
96
138
if (lastCommitOffset != null ) {
139
+
97
140
consumer .commitSync (Collections .singletonMap (partition , lastCommitOffset ));
98
141
}
99
142
}
@@ -105,4 +148,22 @@ public void verifyConsumerConfig(Map<String, String> config) {
105
148
"The strategy should commit explicitly by partition but property 'enable.auto.commit' in consumer config is set to 'true'" );
106
149
}
107
150
}
151
+
152
+ private void callErrorHandler (
153
+ ErrorHandler <K , V > errorHandler ,
154
+ ConsumerRecord <K , V > consumerRecord ,
155
+ RuntimeException e ,
156
+ KafkaConsumer <K , V > consumer ) {
157
+ if (errorHandler != null ) {
158
+ boolean shouldContinue = errorHandler .handleError (consumerRecord , e , consumer );
159
+ if (!shouldContinue ) {
160
+ throw new StopListenerException (e );
161
+ }
162
+ }
163
+ }
164
+
165
+ private OffsetAndMetadata markConsumerRecordProcessed (ConsumerRecord <K , V > consumerRecord ) {
166
+ addOffsetToCommitOnClose (consumerRecord );
167
+ return new OffsetAndMetadata (consumerRecord .offset () + 1 );
168
+ }
108
169
}
0 commit comments