From 78c61b243bc66f801f26389380198d3be050ec80 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Mon, 28 Apr 2025 11:39:25 +0200 Subject: [PATCH 01/26] initial impl --- .../kafka/tools/consumer/group/CsvUtils.java | 4 +- .../tools/streams/StreamsGroupCommand.java | 498 +++++++++++++++++- .../streams/StreamsGroupCommandOptions.java | 101 +++- .../streams/ResetOffsetStreamsGroupTest.java | 271 ++++++++++ 4 files changed, 866 insertions(+), 8 deletions(-) create mode 100644 tools/src/test/java/org/apache/kafka/tools/streams/ResetOffsetStreamsGroupTest.java diff --git a/tools/src/main/java/org/apache/kafka/tools/consumer/group/CsvUtils.java b/tools/src/main/java/org/apache/kafka/tools/consumer/group/CsvUtils.java index e2ca0e325a779..233792060a788 100644 --- a/tools/src/main/java/org/apache/kafka/tools/consumer/group/CsvUtils.java +++ b/tools/src/main/java/org/apache/kafka/tools/consumer/group/CsvUtils.java @@ -25,11 +25,11 @@ public class CsvUtils { private static final CsvMapper MAPPER = new CsvMapper(); - static ObjectReader readerFor(Class clazz) { + public static ObjectReader readerFor(Class clazz) { return MAPPER.readerFor(clazz).with(getSchema(clazz)); } - static ObjectWriter writerFor(Class clazz) { + public static ObjectWriter writerFor(Class clazz) { return MAPPER.writerFor(clazz).with(getSchema(clazz)); } diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index bbf09c3f36a62..f13f00327356e 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -16,30 +16,42 @@ */ package org.apache.kafka.tools.streams; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectReader; +import com.fasterxml.jackson.databind.ObjectWriter; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.DescribeStreamsGroupsResult; import org.apache.kafka.clients.admin.GroupListing; -import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec; import org.apache.kafka.clients.admin.ListGroupsOptions; import org.apache.kafka.clients.admin.ListGroupsResult; import org.apache.kafka.clients.admin.ListOffsetsResult; +import org.apache.kafka.clients.admin.ListStreamsGroupOffsetsSpec; import org.apache.kafka.clients.admin.OffsetSpec; import org.apache.kafka.clients.admin.StreamsGroupDescription; import org.apache.kafka.clients.admin.StreamsGroupMemberAssignment; import org.apache.kafka.clients.admin.StreamsGroupMemberDescription; import org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription; +import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.GroupIdNotFoundException; +import org.apache.kafka.common.requests.ListOffsetsResponse; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.server.util.CommandLineUtils; import java.io.IOException; +import java.text.ParseException; +import java.time.Duration; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -48,10 +60,15 @@ import java.util.Properties; import java.util.Set; import java.util.concurrent.ExecutionException; +import java.util.function.Function; +import java.util.function.ToIntFunction; import java.util.stream.Collectors; import java.util.stream.Stream; import joptsimple.OptionException; +import org.apache.kafka.tools.consumer.group.CsvUtils; + +import static org.apache.kafka.tools.streams.StreamsGroupCommandOptions.LOGGER; public class StreamsGroupCommand { @@ -61,9 +78,13 @@ public static void main(String[] args) { opts.checkArgs(); // should have exactly one action - long numberOfActions = Stream.of(opts.listOpt, opts.describeOpt).filter(opts.options::has).count(); + long numberOfActions = Stream.of( + opts.listOpt, + opts.describeOpt, + opts.resetOffsetsOpt + ).filter(opts.options::has).count(); if (numberOfActions != 1) - CommandLineUtils.printUsageAndExit(opts.parser, "Command must include exactly one action: --list, or --describe."); + CommandLineUtils.printUsageAndExit(opts.parser, "Command must include exactly one action: --list, --describe, or --reset-offset."); run(opts); } catch (OptionException e) { @@ -77,6 +98,13 @@ public static void run(StreamsGroupCommandOptions opts) { streamsGroupService.listGroups(); } else if (opts.options.has(opts.describeOpt)) { streamsGroupService.describeGroups(); + } else if (opts.options.has(opts.resetOffsetsOpt)) { + Map> offsetsToReset = streamsGroupService.resetOffsets(); + if (opts.options.has(opts.exportOpt)) { + String exported = streamsGroupService.exportOffsetsToCsv(offsetsToReset); + System.out.println(exported); + } else + printOffsetsToReset(offsetsToReset); } else { throw new IllegalArgumentException("Unknown action!"); } @@ -87,6 +115,21 @@ public static void run(StreamsGroupCommandOptions opts) { } } + static void printOffsetsToReset(Map> groupAssignmentsToReset) { + String format = "%n%-30s %-30s %-10s %-15s"; + if (!groupAssignmentsToReset.isEmpty()) + System.out.printf(format, "GROUP", "TOPIC", "PARTITION", "NEW-OFFSET"); + + groupAssignmentsToReset.forEach((groupId, assignment) -> + assignment.forEach((streamsAssignment, offsetAndMetadata) -> + System.out.printf(format, + groupId, + streamsAssignment.topic(), + streamsAssignment.partition(), + offsetAndMetadata.offset()))); + System.out.println(); + } + static Set groupStatesFromString(String input) { Set parsedStates = Arrays.stream(input.split(",")).map(s -> GroupState.parse(s.trim())).collect(Collectors.toSet()); @@ -223,6 +266,32 @@ private void printMembers(StreamsGroupDescription description, boolean verbose) } } + String exportOffsetsToCsv(Map> assignments) { + boolean isSingleGroupQuery = opts.options.valuesOf(opts.groupOpt).size() == 1; + ObjectWriter csvWriter = isSingleGroupQuery + ? CsvUtils.writerFor(CsvUtils.CsvRecordNoGroup.class) + : CsvUtils.writerFor(CsvUtils.CsvRecordWithGroup.class); + + return assignments.entrySet().stream().flatMap(e -> { + String groupId = e.getKey(); + Map partitionInfo = e.getValue(); + + return partitionInfo.entrySet().stream().map(e1 -> { + TopicPartition k = e1.getKey(); + OffsetAndMetadata v = e1.getValue(); + Object csvRecord = isSingleGroupQuery + ? new CsvUtils.CsvRecordNoGroup(k.topic(), k.partition(), v.offset()) + : new CsvUtils.CsvRecordWithGroup(groupId, k.topic(), k.partition(), v.offset()); + + try { + return csvWriter.writeValueAsString(csvRecord); + } catch (JsonProcessingException err) { + throw new RuntimeException(err); + } + }); + }).collect(Collectors.joining()); + } + private String prepareTaskType(List tasks, String taskType) { if (tasks.isEmpty()) { return ""; @@ -330,13 +399,432 @@ Map getOffsets(StreamsGroupDescription description) Map getCommittedOffsets(String groupId) { try { - return adminClient.listConsumerGroupOffsets( - Map.of(groupId, new ListConsumerGroupOffsetsSpec())).partitionsToOffsetAndMetadata(groupId).get(); + return adminClient.listStreamsGroupOffsets( + Map.of(groupId, new ListStreamsGroupOffsetsSpec())).partitionsToOffsetAndMetadata(groupId).get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + + Map> resetOffsets() { + Map> result = new HashMap<>(); + List groupIds = listStreamsGroups(); + if (!groupIds.isEmpty()) { + Map> streamsGroups = adminClient.describeStreamsGroups( + groupIds + ).describedGroups(); + + streamsGroups.forEach((groupId, groupDescription) -> { + try { + String state = groupDescription.get().groupState().toString(); + switch (state) { + case "Empty": + case "Dead": + result.put(groupId, resetOffsetsForInactiveGroup(groupId)); + break; + default: + printError("Assignments can only be reset if the group '" + groupId + "' is inactive, but the current state is " + state + ".", Optional.empty()); + result.put(groupId, Collections.emptyMap()); + } + } catch (InterruptedException ie) { + throw new RuntimeException(ie); + } catch (ExecutionException ee) { + if (ee.getCause() instanceof GroupIdNotFoundException) { + result.put(groupId, resetOffsetsForInactiveGroup(groupId)); + } else { + throw new RuntimeException(ee); + } + } + }); + } + return result; + } + + private Map resetOffsetsForInactiveGroup(String groupId) { + try { + Collection partitionsToReset = getPartitionsToReset(groupId); + Map preparedOffsets = prepareOffsetsToReset(groupId, partitionsToReset); + + // Dry-run is the default behavior if --execute is not specified + boolean dryRun = opts.options.has(opts.dryRunOpt) || !opts.options.has(opts.executeOpt); + if (!dryRun) { + adminClient.alterStreamsGroupOffsets( + groupId, + preparedOffsets + ).all().get(); + } + + return preparedOffsets; + } catch (InterruptedException ie) { + throw new RuntimeException(ie); + } catch (ExecutionException ee) { + Throwable cause = ee.getCause(); + if (cause instanceof KafkaException) { + throw (KafkaException) cause; + } else { + throw new RuntimeException(cause); + } + } + } + + private Collection getPartitionsToReset(String groupId) throws ExecutionException, InterruptedException { + if (opts.options.has(opts.allTopicsOpt)) { + return getCommittedOffsets(groupId).keySet(); + } else if (opts.options.has(opts.topicOpt)) { + List topics = opts.options.valuesOf(opts.topicOpt); + return parseTopicPartitionsToReset(topics); + } else { + if (!opts.options.has(opts.resetFromFileOpt)) + CommandLineUtils.printUsageAndExit(opts.parser, "One of the reset scopes should be defined: --all-topics, --topic."); + + return Collections.emptyList(); + } + } + + private List parseTopicPartitionsToReset(List topicArgs) throws ExecutionException, InterruptedException { + List topicsWithPartitions = new ArrayList<>(); + List topics = new ArrayList<>(); + + topicArgs.forEach(topicArg -> { + if (topicArg.contains(":")) + topicsWithPartitions.add(topicArg); + else + topics.add(topicArg); + }); + + List specifiedPartitions = topicsWithPartitions.stream().flatMap(this::parseTopicsWithPartitions).collect(Collectors.toList()); + + List unspecifiedPartitions = new ArrayList<>(); + + if (!topics.isEmpty()) { + Map descriptionMap = adminClient.describeTopics( + topics + ).allTopicNames().get(); + + descriptionMap.forEach((topic, description) -> + description.partitions().forEach(tpInfo -> unspecifiedPartitions.add(new TopicPartition(topic, tpInfo.partition()))) + ); + } + + specifiedPartitions.addAll(unspecifiedPartitions); + + return specifiedPartitions; + } + + private Stream parseTopicsWithPartitions(String topicArg) { + ToIntFunction partitionNum = partition -> { + try { + return Integer.parseInt(partition); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid partition '" + partition + "' specified in topic arg '" + topicArg + "''"); + } + }; + + String[] arr = topicArg.split(":"); + + if (arr.length != 2) + throw new IllegalArgumentException("Invalid topic arg '" + topicArg + "', expected topic name and partitions"); + + String topic = arr[0]; + String partitions = arr[1]; + + return Arrays.stream(partitions.split(",")). + map(partition -> new TopicPartition(topic, partitionNum.applyAsInt(partition))); + } + + @SuppressWarnings("CyclomaticComplexity") + private Map prepareOffsetsToReset(String groupId, Collection partitionsToReset) { + if (opts.options.has(opts.resetToOffsetOpt)) { + long offset = opts.options.valueOf(opts.resetToOffsetOpt); + return checkOffsetsRange(partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), tp -> offset))) + .entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); + } else if (opts.options.has(opts.resetToEarliestOpt)) { + Map logStartOffsets = getLogStartOffsets(partitionsToReset); + return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { + LogOffsetResult logOffsetResult = logStartOffsets.get(topicPartition); + + if (!(logOffsetResult instanceof LogOffset)) { + CommandLineUtils.printUsageAndExit(opts.parser, "Error getting starting offset of topic partition: " + topicPartition); + } + + return new OffsetAndMetadata(((LogOffset) logOffsetResult).value); + })); + } else if (opts.options.has(opts.resetToLatestOpt)) { + Map logEndOffsets = getLogEndOffsets(partitionsToReset); + return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { + LogOffsetResult logOffsetResult = logEndOffsets.get(topicPartition); + + if (!(logOffsetResult instanceof LogOffset)) { + CommandLineUtils.printUsageAndExit(opts.parser, "Error getting ending offset of topic partition: " + topicPartition); + } + + return new OffsetAndMetadata(((LogOffset) logOffsetResult).value); + })); + } else if (opts.options.has(opts.resetShiftByOpt)) { + Map currentCommittedOffsets = getCommittedOffsets(groupId); + Map requestedOffsets = partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { + long shiftBy = opts.options.valueOf(opts.resetShiftByOpt); + OffsetAndMetadata currentOffset = currentCommittedOffsets.get(topicPartition); + + if (currentOffset == null) { + throw new IllegalArgumentException("Cannot shift offset for partition " + topicPartition + " since there is no current committed offset"); + } + + return currentOffset.offset() + shiftBy; + })); + return checkOffsetsRange(requestedOffsets).entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); + } else if (opts.options.has(opts.resetToDatetimeOpt)) { + try { + long timestamp = Utils.getDateTime(opts.options.valueOf(opts.resetToDatetimeOpt)); + Map logTimestampOffsets = getLogTimestampOffsets(partitionsToReset, timestamp); + return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { + LogOffsetResult logTimestampOffset = logTimestampOffsets.get(topicPartition); + + if (!(logTimestampOffset instanceof LogOffset)) { + CommandLineUtils.printUsageAndExit(opts.parser, "Error getting offset by timestamp of topic partition: " + topicPartition); + } + + return new OffsetAndMetadata(((LogOffset) logTimestampOffset).value); + })); + } catch (ParseException e) { + throw new RuntimeException(e); + } + } else if (opts.options.has(opts.resetByDurationOpt)) { + String duration = opts.options.valueOf(opts.resetByDurationOpt); + Duration durationParsed = Duration.parse(duration); + Instant now = Instant.now(); + durationParsed.negated().addTo(now); + long timestamp = now.minus(durationParsed).toEpochMilli(); + Map logTimestampOffsets = getLogTimestampOffsets(partitionsToReset, timestamp); + return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { + LogOffsetResult logTimestampOffset = logTimestampOffsets.get(topicPartition); + + if (!(logTimestampOffset instanceof LogOffset)) { + CommandLineUtils.printUsageAndExit(opts.parser, "Error getting offset by timestamp of topic partition: " + topicPartition); + } + + return new OffsetAndMetadata(((LogOffset) logTimestampOffset).value); + })); + } else if (resetPlanFromFile().isPresent()) { + return resetPlanFromFile().map(resetPlan -> { + Map resetPlanForGroup = resetPlan.get(groupId); + + if (resetPlanForGroup == null) { + printError("No reset plan for group " + groupId + " found", Optional.empty()); + return Collections.emptyMap(); + } + + Map requestedOffsets = resetPlanForGroup.keySet().stream().collect(Collectors.toMap( + Function.identity(), + topicPartition -> resetPlanForGroup.get(topicPartition).offset())); + + return checkOffsetsRange(requestedOffsets).entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); + }).orElseGet(Collections::emptyMap); + } else if (opts.options.has(opts.resetToCurrentOpt)) { + Map currentCommittedOffsets = getCommittedOffsets(groupId); + Collection partitionsToResetWithCommittedOffset = new ArrayList<>(); + Collection partitionsToResetWithoutCommittedOffset = new ArrayList<>(); + + for (TopicPartition topicPartition : partitionsToReset) { + if (currentCommittedOffsets.containsKey(topicPartition)) + partitionsToResetWithCommittedOffset.add(topicPartition); + else + partitionsToResetWithoutCommittedOffset.add(topicPartition); + } + + Map preparedOffsetsForPartitionsWithCommittedOffset = partitionsToResetWithCommittedOffset.stream() + .collect(Collectors.toMap(Function.identity(), topicPartition -> { + OffsetAndMetadata committedOffset = currentCommittedOffsets.get(topicPartition); + + if (committedOffset == null) { + throw new IllegalStateException("Expected a valid current offset for topic partition: " + topicPartition); + } + + return new OffsetAndMetadata(committedOffset.offset()); + })); + + Map preparedOffsetsForPartitionsWithoutCommittedOffset = getLogEndOffsets(partitionsToResetWithoutCommittedOffset) + .entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> { + if (!(e.getValue() instanceof LogOffset)) { + CommandLineUtils.printUsageAndExit(opts.parser, "Error getting ending offset of topic partition: " + e.getKey()); + } + + return new OffsetAndMetadata(((LogOffset) e.getValue()).value); + })); + + preparedOffsetsForPartitionsWithCommittedOffset.putAll(preparedOffsetsForPartitionsWithoutCommittedOffset); + + return preparedOffsetsForPartitionsWithCommittedOffset; + } + + CommandLineUtils.printUsageAndExit(opts.parser, String.format("Option '%s' requires one of the following scenarios: %s", opts.resetOffsetsOpt, opts.allResetOffsetScenarioOpts)); + return null; + } + + Optional>> resetPlanFromFile() { + if (opts.options.has(opts.resetFromFileOpt)) { + try { + String resetPlanPath = opts.options.valueOf(opts.resetFromFileOpt); + String resetPlanCsv = Utils.readFileAsString(resetPlanPath); + Map> resetPlan = parseResetPlan(resetPlanCsv); + return Optional.of(resetPlan); + } catch (IOException e) { + throw new RuntimeException(e); + } + } else return Optional.empty(); + } + + private Map> parseResetPlan(String resetPlanCsv) { + ObjectReader csvReader = CsvUtils.readerFor(CsvUtils.CsvRecordNoGroup.class); + String[] lines = resetPlanCsv.split("\n"); + boolean isSingleGroupQuery = opts.options.valuesOf(opts.groupOpt).size() == 1; + boolean isOldCsvFormat = false; + try { + if (lines.length > 0) { + csvReader.readValue(lines[0], CsvUtils.CsvRecordNoGroup.class); + isOldCsvFormat = true; + } + } catch (IOException e) { + e.printStackTrace(); + // Ignore. + } + + Map> dataMap = new HashMap<>(); + + try { + // Single group CSV format: "topic,partition,offset" + if (isSingleGroupQuery && isOldCsvFormat) { + String group = opts.options.valueOf(opts.groupOpt); + for (String line : lines) { + CsvUtils.CsvRecordNoGroup rec = csvReader.readValue(line, CsvUtils.CsvRecordNoGroup.class); + dataMap.computeIfAbsent(group, k -> new HashMap<>()) + .put(new TopicPartition(rec.getTopic(), rec.getPartition()), new OffsetAndMetadata(rec.getOffset())); + } + } else { + csvReader = CsvUtils.readerFor(CsvUtils.CsvRecordWithGroup.class); + for (String line : lines) { + CsvUtils.CsvRecordWithGroup rec = csvReader.readValue(line, CsvUtils.CsvRecordWithGroup.class); + dataMap.computeIfAbsent(rec.getGroup(), k -> new HashMap<>()) + .put(new TopicPartition(rec.getTopic(), rec.getPartition()), new OffsetAndMetadata(rec.getOffset())); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + + return dataMap; + } + + private Map checkOffsetsRange(Map requestedOffsets) { + Map logStartOffsets = getLogStartOffsets(requestedOffsets.keySet()); + Map logEndOffsets = getLogEndOffsets(requestedOffsets.keySet()); + + Map res = new HashMap<>(); + + requestedOffsets.forEach((topicPartition, offset) -> { + LogOffsetResult logEndOffset = logEndOffsets.get(topicPartition); + + if (logEndOffset != null) { + if (logEndOffset instanceof LogOffset && offset > ((LogOffset) logEndOffset).value) { + long endOffset = ((LogOffset) logEndOffset).value; + LOGGER.warn("New offset (" + offset + ") is higher than latest offset for topic partition " + topicPartition + ". Value will be set to " + endOffset); + res.put(topicPartition, endOffset); + } else { + LogOffsetResult logStartOffset = logStartOffsets.get(topicPartition); + + if (logStartOffset instanceof LogOffset && offset < ((LogOffset) logStartOffset).value) { + long startOffset = ((LogOffset) logStartOffset).value; + LOGGER.warn("New offset (" + offset + ") is lower than earliest offset for topic partition " + topicPartition + ". Value will be set to " + startOffset); + res.put(topicPartition, startOffset); + } else + res.put(topicPartition, offset); + } + } else { + // the control should not reach here + throw new IllegalStateException("Unexpected non-existing offset value for topic partition " + topicPartition); + } + }); + + return res; + } + + private Map getLogTimestampOffsets(Collection topicPartitions, long timestamp) { + try { + Map timestampOffsets = topicPartitions.stream() + .collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.forTimestamp(timestamp))); + + Map offsets = adminClient.listOffsets( + timestampOffsets).all().get(); + + Map successfulOffsetsForTimes = new HashMap<>(); + Map unsuccessfulOffsetsForTimes = new HashMap<>(); + + offsets.forEach((tp, offsetsResultInfo) -> { + if (offsetsResultInfo.offset() != ListOffsetsResponse.UNKNOWN_OFFSET) + successfulOffsetsForTimes.put(tp, offsetsResultInfo); + else + unsuccessfulOffsetsForTimes.put(tp, offsetsResultInfo); + }); + + Map successfulLogTimestampOffsets = successfulOffsetsForTimes.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new LogOffset(e.getValue().offset()))); + + unsuccessfulOffsetsForTimes.forEach((tp, offsetResultInfo) -> + System.out.println("\nWarn: Partition " + tp.partition() + " from topic " + tp.topic() + + " is empty. Falling back to latest known offset.")); + + successfulLogTimestampOffsets.putAll(getLogEndOffsets(unsuccessfulOffsetsForTimes.keySet())); + + return successfulLogTimestampOffsets; + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + + private Map getLogStartOffsets(Collection topicPartitions) { + return getLogOffsets(topicPartitions, OffsetSpec.earliest()); + } + + private Map getLogEndOffsets(Collection topicPartitions) { + return getLogOffsets(topicPartitions, OffsetSpec.latest()); + } + + private Map getLogOffsets(Collection topicPartitions, OffsetSpec offsetSpec) { + try { + Map startOffsets = topicPartitions.stream() + .collect(Collectors.toMap(Function.identity(), tp -> offsetSpec)); + + Map offsets = adminClient.listOffsets( + startOffsets + ).all().get(); + + return topicPartitions.stream().collect(Collectors.toMap( + Function.identity(), + tp -> offsets.containsKey(tp) + ? new LogOffset(offsets.get(tp).offset()) + : new Unknown() + )); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } + interface LogOffsetResult { } + + private static class LogOffset implements LogOffsetResult { + final long value; + + LogOffset(long value) { + this.value = value; + } + } + + private static class Unknown implements LogOffsetResult { } + + private static class Ignore implements LogOffsetResult { } + /** * Prints an error message if the group state indicates that the group is either dead or empty. * diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index 3990a36f7771c..ee70c27a1c3ee 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -23,16 +23,25 @@ import org.slf4j.LoggerFactory; import java.util.Arrays; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.stream.Collectors; import joptsimple.OptionSpec; +import static org.apache.kafka.tools.ToolsUtils.minus; + public class StreamsGroupCommandOptions extends CommandDefaultOptions { + private static final String NL = System.lineSeparator(); public static final Logger LOGGER = LoggerFactory.getLogger(StreamsGroupCommandOptions.class); public static final String BOOTSTRAP_SERVER_DOC = "REQUIRED: The server(s) to connect to."; public static final String GROUP_DOC = "The streams group we wish to act on."; + private static final String TOPIC_DOC = "The topic whose streams group information should be deleted or topic whose should be included in the reset offset process. " + + "In `reset-offsets` case, partitions can be specified using this format: `topic1:0,1,2`, where 0,1,2 are the partition to be included in the process. " + + "Reset-offsets also supports multiple topic inputs."; + private static final String ALL_TOPICS_DOC = "Consider all topics assigned to a group in the `reset-offsets` process."; public static final String LIST_DOC = "List all streams groups."; public static final String DESCRIBE_DOC = "Describe streams group and list offset lag related to given group."; public static final String TIMEOUT_MS_DOC = "The timeout that can be set for some use cases. For example, it can be used when describing the group " + @@ -43,6 +52,22 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public static final String MEMBERS_DOC = "Describe members of the group. This option may be used with the '--describe' option only."; public static final String OFFSETS_DOC = "Describe the group and list all topic partitions in the group along with their offset information." + "This is the default sub-action and may be used with the '--describe' option only."; + private static final String RESET_OFFSETS_DOC = "Reset offsets of Streams group. The instances should be inactive" + NL + + "Has 2 execution options: --dry-run (the default) to plan which offsets to reset, and --execute to update the offsets." + NL + + "You must choose one of the following reset specifications: --to-datetime, --by-duration, --to-earliest, " + + "--to-latest, --shift-by, --from-file, --to-current, --to-offset." + NL + + "To define the scope use --all-topics or --topic. One scope must be specified unless you use '--from-file'."; + private static final String DRY_RUN_DOC = "Only show results without executing changes on Streams Group. Supported operations: reset-offsets."; + private static final String EXECUTE_DOC = "Execute operation. Supported operations: reset-offsets."; + private static final String EXPORT_DOC = "Export operation execution to a CSV file. Supported operations: reset-offsets."; + private static final String RESET_TO_OFFSET_DOC = "Reset offsets to a specific offset."; + private static final String RESET_FROM_FILE_DOC = "Reset offsets to values defined in CSV file."; + private static final String RESET_TO_DATETIME_DOC = "Reset offsets to offset from datetime. Format: 'YYYY-MM-DDThh:mm:ss.sss'"; + private static final String RESET_BY_DURATION_DOC = "Reset offsets to offset by duration from current timestamp. Format: 'PnDTnHnMnS'"; + private static final String RESET_TO_EARLIEST_DOC = "Reset offsets to earliest offset."; + private static final String RESET_TO_LATEST_DOC = "Reset offsets to latest offset."; + private static final String RESET_TO_CURRENT_DOC = "Reset offsets to current offset."; + private static final String RESET_SHIFT_BY_DOC = "Reset offsets shifting current offset by 'n', where 'n' can be positive or negative."; public static final String VERBOSE_DOC = """ Use with --describe --state to show group epoch and target assignment epoch. Use with --describe --members to show for each member the member epoch, target assignment epoch, current assignment, target assignment, and whether member is still using the classic rebalance protocol. @@ -50,6 +75,8 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public final OptionSpec bootstrapServerOpt; public final OptionSpec groupOpt; + final OptionSpec topicOpt; + final OptionSpec allTopicsOpt; public final OptionSpec listOpt; public final OptionSpec describeOpt; public final OptionSpec timeoutMsOpt; @@ -57,8 +84,23 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public final OptionSpec stateOpt; public final OptionSpec membersOpt; public final OptionSpec offsetsOpt; + public final OptionSpec resetOffsetsOpt; + public final OptionSpec resetToOffsetOpt; + public final OptionSpec resetFromFileOpt; + public final OptionSpec resetToDatetimeOpt; + public final OptionSpec resetByDurationOpt; + public final OptionSpec resetToEarliestOpt; + public final OptionSpec resetToLatestOpt; + public final OptionSpec resetToCurrentOpt; + public final OptionSpec resetShiftByOpt; + public final OptionSpec dryRunOpt; + public final OptionSpec executeOpt; + public final OptionSpec exportOpt; public final OptionSpec verboseOpt; + final Set> allResetOffsetScenarioOpts; + + public static StreamsGroupCommandOptions fromArgs(String[] args) { StreamsGroupCommandOptions opts = new StreamsGroupCommandOptions(args); opts.checkArgs(); @@ -76,6 +118,11 @@ public StreamsGroupCommandOptions(String[] args) { .withRequiredArg() .describedAs("streams group") .ofType(String.class); + topicOpt = parser.accepts("topic", TOPIC_DOC) + .withRequiredArg() + .describedAs("topic") + .ofType(String.class); + allTopicsOpt = parser.accepts("all-topics", ALL_TOPICS_DOC); listOpt = parser.accepts("list", LIST_DOC); describeOpt = parser.accepts("describe", DESCRIBE_DOC); timeoutMsOpt = parser.accepts("timeout", TIMEOUT_MS_DOC) @@ -88,6 +135,7 @@ public StreamsGroupCommandOptions(String[] args) { .withRequiredArg() .describedAs("command config property file") .ofType(String.class); + stateOpt = parser.accepts("state", STATE_DOC) .availableIf(listOpt, describeOpt) .withOptionalArg() @@ -96,10 +144,40 @@ public StreamsGroupCommandOptions(String[] args) { .availableIf(describeOpt); offsetsOpt = parser.accepts("offsets", OFFSETS_DOC) .availableIf(describeOpt); + resetOffsetsOpt = parser.accepts("reset-offsets", RESET_OFFSETS_DOC); + resetToOffsetOpt = parser.accepts("to-offset", RESET_TO_OFFSET_DOC) + .withRequiredArg() + .describedAs("offset") + .ofType(Long.class); + resetFromFileOpt = parser.accepts("from-file", RESET_FROM_FILE_DOC) + .withRequiredArg() + .describedAs("path to CSV file") + .ofType(String.class); + resetToDatetimeOpt = parser.accepts("to-datetime", RESET_TO_DATETIME_DOC) + .withRequiredArg() + .describedAs("datetime") + .ofType(String.class); + resetByDurationOpt = parser.accepts("by-duration", RESET_BY_DURATION_DOC) + .withRequiredArg() + .describedAs("duration") + .ofType(String.class); + resetToEarliestOpt = parser.accepts("to-earliest", RESET_TO_EARLIEST_DOC); + resetToLatestOpt = parser.accepts("to-latest", RESET_TO_LATEST_DOC); + resetToCurrentOpt = parser.accepts("to-current", RESET_TO_CURRENT_DOC); + resetShiftByOpt = parser.accepts("shift-by", RESET_SHIFT_BY_DOC) + .withRequiredArg() + .describedAs("number-of-offsets") + .ofType(Long.class); + verboseOpt = parser.accepts("verbose", VERBOSE_DOC) .availableIf(describeOpt); - + dryRunOpt = parser.accepts("dry-run", DRY_RUN_DOC); + executeOpt = parser.accepts("execute", EXECUTE_DOC); + exportOpt = parser.accepts("export", EXPORT_DOC); options = parser.parse(args); + + allResetOffsetScenarioOpts = new HashSet<>(Arrays.asList(resetToOffsetOpt, resetShiftByOpt, + resetToDatetimeOpt, resetByDurationOpt, resetToEarliestOpt, resetToLatestOpt, resetToCurrentOpt, resetFromFileOpt)); } public void checkArgs() { @@ -121,6 +199,27 @@ public void checkArgs() { LOGGER.debug("Option " + timeoutMsOpt + " is applicable only when " + describeOpt + " is used."); } + if (options.has(resetOffsetsOpt)) { + if (options.has(dryRunOpt) && options.has(executeOpt)) + CommandLineUtils.printUsageAndExit(parser, "Option " + resetOffsetsOpt + " only accepts one of " + executeOpt + " and " + dryRunOpt); + + if (!options.has(dryRunOpt) && !options.has(executeOpt)) { + System.err.println("WARN: No action will be performed as the --execute option is missing. " + + "In a future major release, the default behavior of this command will be to prompt the user before " + + "executing the reset rather than doing a dry run. You should add the --dry-run option explicitly " + + "if you are scripting this command and want to keep the current default behavior without prompting."); + } + + CommandLineUtils.checkInvalidArgs(parser, options, resetToOffsetOpt, minus(allResetOffsetScenarioOpts, resetToOffsetOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetToDatetimeOpt, minus(allResetOffsetScenarioOpts, resetToDatetimeOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetByDurationOpt, minus(allResetOffsetScenarioOpts, resetByDurationOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetToEarliestOpt, minus(allResetOffsetScenarioOpts, resetToEarliestOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetToLatestOpt, minus(allResetOffsetScenarioOpts, resetToLatestOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetToCurrentOpt, minus(allResetOffsetScenarioOpts, resetToCurrentOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetShiftByOpt, minus(allResetOffsetScenarioOpts, resetShiftByOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetFromFileOpt, minus(allResetOffsetScenarioOpts, resetFromFileOpt)); + } + CommandLineUtils.checkInvalidArgs(parser, options, listOpt, membersOpt, offsetsOpt); } } diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/ResetOffsetStreamsGroupTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/ResetOffsetStreamsGroupTest.java new file mode 100644 index 0000000000000..a496fd74ce582 --- /dev/null +++ b/tools/src/test/java/org/apache/kafka/tools/streams/ResetOffsetStreamsGroupTest.java @@ -0,0 +1,271 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.tools.streams; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.serialization.LongSerializer; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; +import org.apache.kafka.streams.GroupProtocol; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.Topology; +import org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster; +import org.apache.kafka.streams.integration.utils.IntegrationTestUtils; +import org.apache.kafka.streams.kstream.Consumed; +import org.apache.kafka.streams.kstream.Produced; +import org.apache.kafka.test.TestUtils; +import org.apache.kafka.tools.ToolsTestUtils; +import org.apache.kafka.tools.consumer.group.ConsumerGroupCommand; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toMap; +import static org.apache.kafka.clients.CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG; +import static org.apache.kafka.streams.integration.utils.IntegrationTestUtils.startApplicationAndWaitUntilRunning; +import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@Timeout(600) +@Tag("integration") +public class ResetOffsetStreamsGroupTest { + private static final String TOPIC_PREFIX = "foo-"; + + public static EmbeddedKafkaCluster cluster = null; + static KafkaStreams streams; + private static final String APP_ID = "streams-group-command-test"; + private static final String INPUT_TOPIC = "customInputTopic"; + private static final String OUTPUT_TOPIC = "customOutputTopic"; + + @BeforeAll + public static void setup() throws Exception { + // start the cluster and create the input topic + final Properties props = new Properties(); + props.setProperty(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer,streams"); + cluster = new EmbeddedKafkaCluster(1, props); + cluster.start(); + cluster.createTopic(INPUT_TOPIC, 2, 1); + + + // start kafka streams + Properties streamsProp = new Properties(); + streamsProp.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + streamsProp.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); + streamsProp.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + streamsProp.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + streamsProp.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()); + streamsProp.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID); + streamsProp.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2); + streamsProp.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.STREAMS.name().toLowerCase(Locale.getDefault())); + + streams = new KafkaStreams(topology(), streamsProp); + startApplicationAndWaitUntilRunning(streams); + } + + @AfterAll + public static void closeCluster() { + streams.close(); + cluster.stop(); + cluster = null; + } + + @Test + public void testResetOffsetsExistingTopic() { + String topic = APP_ID; + String[] args = new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--reset-offsets", "--topic", topic, "--to-offset", "50"}; + produceMessages(100); + resetAndAssertOffsets(args, 50, true, List.of(topic)); +// resetAndAssertOffsets(addTo(args, "--dry-run"), +// 50, true, singletonList(topic)); +// resetAndAssertOffsets(addTo(args, "--execute"), +// 50, false, singletonList(topic)); + } + + +// private void resetAndAssertOffsets(String topic, +// String[] args, +// long expectedOffset) { +// resetAndAssertOffsets(args, expectedOffset, false, singletonList(topic)); +// } + + private void resetAndAssertOffsets(String[] args, + long expectedOffset, + boolean dryRun, + List topics) { + try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { + Map> topicToExpectedOffsets = getTopicExceptOffsets(topics, expectedOffset); + Map> resetOffsetsResultByGroup = + resetOffsets(service); + for (final String topic : topics) { + resetOffsetsResultByGroup.forEach((group, partitionInfo) -> { + Map priorOffsets = committedOffsets(topic, group); + assertEquals(topicToExpectedOffsets.get(topic), partitionToOffsets(topic, partitionInfo)); + assertEquals(dryRun ? priorOffsets : topicToExpectedOffsets.get(topic), + committedOffsets(topic, group)); + }); + } + } + } + + private Map committedOffsets(String topic, + String group) { + try (Admin admin = Admin.create(singletonMap(BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()))) { + return admin.listConsumerGroupOffsets(group) + .all().get() + .get(group).entrySet() + .stream() + .filter(e -> e.getKey().topic().equals(topic)) + .collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())); + } catch (ExecutionException | InterruptedException e) { + throw new RuntimeException(e); + } + } + + private Map partitionToOffsets(String topic, + Map partitionInfo) { + return partitionInfo.entrySet() + .stream() + .filter(entry -> Objects.equals(entry.getKey().topic(), topic)) + .collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())); + } + + + private Map> getTopicExceptOffsets(List topics, + long expectedOffset) { + return topics.stream() + .collect(toMap(Function.identity(), + topic -> singletonMap(new TopicPartition(topic, 0), + expectedOffset))); + } + + private Map> resetOffsets( + StreamsGroupCommand.StreamsGroupService service) { + return service.resetOffsets(); + } + + private void produceMessages(int numMessages) { + final List> data = prepareData(0L, numMessages, 0L); + + IntegrationTestUtils.produceKeyValuesSynchronously( + INPUT_TOPIC, + data, + TestUtils.producerConfig(cluster.bootstrapServers(), LongSerializer.class, LongSerializer.class), + cluster.time + ); + } + + private List> prepareData(final long fromInclusive, + final long toExclusive, + final Long... keys) { + final long dataSize = keys.length * (toExclusive - fromInclusive); + final List> data = new ArrayList<>((int) dataSize); + + for (final Long k : keys) { + for (long v = fromInclusive; v < toExclusive; ++v) { + data.add(new KeyValue<>(k, v)); + } + } + + return data; + } + + + + private static Topology topology() { + final StreamsBuilder builder = new StreamsBuilder(); + builder.stream(INPUT_TOPIC, Consumed.with(Serdes.String(), Serdes.String())) + .flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+"))) + .groupBy((key, value) -> value) + .count() + .toStream().to(OUTPUT_TOPIC, Produced.with(Serdes.String(), Serdes.Long())); + return builder.build(); + } + + private StreamsGroupCommand.StreamsGroupService getStreamsGroupService(String[] args) { + StreamsGroupCommandOptions opts = StreamsGroupCommandOptions.fromArgs(args); + return new StreamsGroupCommand.StreamsGroupService( + opts, + Map.of(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE)) + ); + } + + private static void validateDescribeOutput( + List args, + List expectedHeader, + Set> expectedRows, + List dontCareIndices + ) throws InterruptedException { + final AtomicReference out = new AtomicReference<>(""); + TestUtils.waitForCondition(() -> { + String output = ToolsTestUtils.grabConsoleOutput(() -> StreamsGroupCommand.main(args.toArray(new String[0]))); + out.set(output); + + String[] lines = output.split("\n"); + if (lines.length == 1 && lines[0].isEmpty()) lines = new String[]{}; + + if (lines.length == 0) return false; + List header = Arrays.asList(lines[0].split("\\s+")); + if (!expectedHeader.equals(header)) return false; + + Set> groupDesc = Arrays.stream(Arrays.copyOfRange(lines, 1, lines.length)) + .map(line -> Arrays.asList(line.split("\\s+"))) + .collect(Collectors.toSet()); + if (groupDesc.size() != expectedRows.size()) return false; + // clear the dontCare fields and then compare two sets + return expectedRows + .equals( + groupDesc.stream() + .map(list -> { + List listCloned = new ArrayList<>(list); + dontCareIndices.forEach(index -> listCloned.set(index, "")); + return listCloned; + }).collect(Collectors.toSet()) + ); + }, () -> String.format("Expected header=%s and groups=%s, but found:%n%s", expectedHeader, expectedRows, out.get())); + } +} From 1acb651fd7eca9881b6fbb6b16a4f6b9b3c5edb6 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Mon, 5 May 2025 20:55:46 +0200 Subject: [PATCH 02/26] impl reset offset and all tests --- .../kafka/clients/admin/KafkaAdminClient.java | 20 +- .../kafka/clients/admin/ListGroupsResult.java | 2 +- .../admin/ListStreamsGroupOffsetsSpec.java | 4 +- .../consumer/group/ConsumerGroupCommand.java | 8 +- .../tools/streams/StreamsGroupCommand.java | 12 +- .../streams/ResetOffsetStreamsGroupTest.java | 271 --------- .../streams/ResetStreamsGroupOffsetTest.java | 526 ++++++++++++++++++ .../streams/StreamsGroupCommandTest.java | 141 ++++- 8 files changed, 673 insertions(+), 311 deletions(-) delete mode 100644 tools/src/test/java/org/apache/kafka/tools/streams/ResetOffsetStreamsGroupTest.java create mode 100644 tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java index 844f6962160ff..f3c0280675144 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java @@ -3771,6 +3771,17 @@ public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map groupSpecs, +// ListStreamsGroupOffsetsOptions options) { +// SimpleAdminApiFuture> future = +// ListStteamsGroupOffsetsHandler.newFuture(groupSpecs.keySet()); +// ListConsumerGroupOffsetsHandler handler = +// new ListConsumerGroupOffsetsHandler(groupSpecs, options.requireStable(), logContext); +// invokeDriver(handler, future, options.timeoutMs); +// return new ListConsumerGroupOffsetsResult(future.all()); +// } + @Override public ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map groupSpecs, ListStreamsGroupOffsetsOptions options) { @@ -3779,7 +3790,14 @@ public ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map new ListConsumerGroupOffsetsSpec().topicPartitions(entry.getValue().topicPartitions()) )); - return new ListStreamsGroupOffsetsResult(listConsumerGroupOffsets(consumerGroupSpecs, new ListConsumerGroupOffsetsOptions())); +// Map consumerGroupSpecs = new HashMap<>(); +// for (Map.Entry entry : groupSpecs.entrySet()) { +// ListConsumerGroupOffsetsSpec spec = new ListConsumerGroupOffsetsSpec(); +// spec.topicPartitions(entry.getValue().topicPartitions()); +// consumerGroupSpecs.put(entry.getKey(), spec); +// } + ListConsumerGroupOffsetsResult res = listConsumerGroupOffsets(consumerGroupSpecs, new ListConsumerGroupOffsetsOptions()); + return new ListStreamsGroupOffsetsResult(res); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsResult.java index b19c3e38e9cf5..795d8523d52cf 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsResult.java @@ -37,7 +37,7 @@ public class ListGroupsResult { private final KafkaFutureImpl> valid; private final KafkaFutureImpl> errors; - ListGroupsResult(KafkaFuture> future) { + public ListGroupsResult(KafkaFuture> future) { this.all = new KafkaFutureImpl<>(); this.valid = new KafkaFutureImpl<>(); this.errors = new KafkaFutureImpl<>(); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java index c3fb9babb9a97..dc49942f5c05b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java @@ -35,7 +35,7 @@ public class ListStreamsGroupOffsetsSpec { /** * Set the topic partitions whose offsets are to be listed for a Streams group. */ - ListStreamsGroupOffsetsSpec topicPartitions(Collection topicPartitions) { + public ListStreamsGroupOffsetsSpec topicPartitions(Collection topicPartitions) { this.topicPartitions = topicPartitions; return this; } @@ -43,7 +43,7 @@ ListStreamsGroupOffsetsSpec topicPartitions(Collection topicPart /** * Returns the topic partitions whose offsets are to be listed for a Streams group. */ - Collection topicPartitions() { + public Collection topicPartitions() { return topicPartitions; } } diff --git a/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java index b001ae7c6f7fd..7db6ed5fdfade 100644 --- a/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java @@ -29,6 +29,7 @@ import org.apache.kafka.clients.admin.DescribeTopicsResult; import org.apache.kafka.clients.admin.GroupListing; import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions; +import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult; import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec; import org.apache.kafka.clients.admin.ListGroupsOptions; import org.apache.kafka.clients.admin.ListGroupsResult; @@ -1089,10 +1090,11 @@ private Collection getPartitionsToReset(String groupId) throws E private Map getCommittedOffsets(String groupId) { try { - return adminClient.listConsumerGroupOffsets( + ListConsumerGroupOffsetsResult res = adminClient.listConsumerGroupOffsets( Collections.singletonMap(groupId, new ListConsumerGroupOffsetsSpec()), - withTimeoutMs(new ListConsumerGroupOffsetsOptions()) - ).partitionsToOffsetAndMetadata(groupId).get(); + withTimeoutMs(new ListConsumerGroupOffsetsOptions())); + return res. + partitionsToOffsetAndMetadata(groupId).get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index f13f00327356e..d342d3b1fa7fa 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -16,9 +16,6 @@ */ package org.apache.kafka.tools.streams; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.DescribeStreamsGroupsResult; @@ -43,6 +40,11 @@ import org.apache.kafka.common.requests.ListOffsetsResponse; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.server.util.CommandLineUtils; +import org.apache.kafka.tools.consumer.group.CsvUtils; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectReader; +import com.fasterxml.jackson.databind.ObjectWriter; import java.io.IOException; import java.text.ParseException; @@ -66,7 +68,6 @@ import java.util.stream.Stream; import joptsimple.OptionException; -import org.apache.kafka.tools.consumer.group.CsvUtils; import static org.apache.kafka.tools.streams.StreamsGroupCommandOptions.LOGGER; @@ -492,7 +493,8 @@ private List parseTopicPartitionsToReset(List topicArgs) topics.add(topicArg); }); - List specifiedPartitions = topicsWithPartitions.stream().flatMap(this::parseTopicsWithPartitions).collect(Collectors.toList()); + List specifiedPartitions = + topicsWithPartitions.stream().flatMap(this::parseTopicsWithPartitions).collect(Collectors.toList()); List unspecifiedPartitions = new ArrayList<>(); diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/ResetOffsetStreamsGroupTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/ResetOffsetStreamsGroupTest.java deleted file mode 100644 index a496fd74ce582..0000000000000 --- a/tools/src/test/java/org/apache/kafka/tools/streams/ResetOffsetStreamsGroupTest.java +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.tools.streams; - -import org.apache.kafka.clients.admin.Admin; -import org.apache.kafka.clients.admin.AdminClientConfig; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.serialization.LongSerializer; -import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.common.test.ClusterInstance; -import org.apache.kafka.common.test.api.ClusterTest; -import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; -import org.apache.kafka.streams.GroupProtocol; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.KeyValue; -import org.apache.kafka.streams.StreamsBuilder; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.Topology; -import org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster; -import org.apache.kafka.streams.integration.utils.IntegrationTestUtils; -import org.apache.kafka.streams.kstream.Consumed; -import org.apache.kafka.streams.kstream.Produced; -import org.apache.kafka.test.TestUtils; -import org.apache.kafka.tools.ToolsTestUtils; -import org.apache.kafka.tools.consumer.group.ConsumerGroupCommand; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; -import java.util.Properties; -import java.util.Set; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static java.util.Arrays.asList; -import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; -import static java.util.stream.Collectors.toMap; -import static org.apache.kafka.clients.CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG; -import static org.apache.kafka.streams.integration.utils.IntegrationTestUtils.startApplicationAndWaitUntilRunning; -import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -@Timeout(600) -@Tag("integration") -public class ResetOffsetStreamsGroupTest { - private static final String TOPIC_PREFIX = "foo-"; - - public static EmbeddedKafkaCluster cluster = null; - static KafkaStreams streams; - private static final String APP_ID = "streams-group-command-test"; - private static final String INPUT_TOPIC = "customInputTopic"; - private static final String OUTPUT_TOPIC = "customOutputTopic"; - - @BeforeAll - public static void setup() throws Exception { - // start the cluster and create the input topic - final Properties props = new Properties(); - props.setProperty(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer,streams"); - cluster = new EmbeddedKafkaCluster(1, props); - cluster.start(); - cluster.createTopic(INPUT_TOPIC, 2, 1); - - - // start kafka streams - Properties streamsProp = new Properties(); - streamsProp.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - streamsProp.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); - streamsProp.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); - streamsProp.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); - streamsProp.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()); - streamsProp.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID); - streamsProp.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2); - streamsProp.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.STREAMS.name().toLowerCase(Locale.getDefault())); - - streams = new KafkaStreams(topology(), streamsProp); - startApplicationAndWaitUntilRunning(streams); - } - - @AfterAll - public static void closeCluster() { - streams.close(); - cluster.stop(); - cluster = null; - } - - @Test - public void testResetOffsetsExistingTopic() { - String topic = APP_ID; - String[] args = new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--reset-offsets", "--topic", topic, "--to-offset", "50"}; - produceMessages(100); - resetAndAssertOffsets(args, 50, true, List.of(topic)); -// resetAndAssertOffsets(addTo(args, "--dry-run"), -// 50, true, singletonList(topic)); -// resetAndAssertOffsets(addTo(args, "--execute"), -// 50, false, singletonList(topic)); - } - - -// private void resetAndAssertOffsets(String topic, -// String[] args, -// long expectedOffset) { -// resetAndAssertOffsets(args, expectedOffset, false, singletonList(topic)); -// } - - private void resetAndAssertOffsets(String[] args, - long expectedOffset, - boolean dryRun, - List topics) { - try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { - Map> topicToExpectedOffsets = getTopicExceptOffsets(topics, expectedOffset); - Map> resetOffsetsResultByGroup = - resetOffsets(service); - for (final String topic : topics) { - resetOffsetsResultByGroup.forEach((group, partitionInfo) -> { - Map priorOffsets = committedOffsets(topic, group); - assertEquals(topicToExpectedOffsets.get(topic), partitionToOffsets(topic, partitionInfo)); - assertEquals(dryRun ? priorOffsets : topicToExpectedOffsets.get(topic), - committedOffsets(topic, group)); - }); - } - } - } - - private Map committedOffsets(String topic, - String group) { - try (Admin admin = Admin.create(singletonMap(BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()))) { - return admin.listConsumerGroupOffsets(group) - .all().get() - .get(group).entrySet() - .stream() - .filter(e -> e.getKey().topic().equals(topic)) - .collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())); - } catch (ExecutionException | InterruptedException e) { - throw new RuntimeException(e); - } - } - - private Map partitionToOffsets(String topic, - Map partitionInfo) { - return partitionInfo.entrySet() - .stream() - .filter(entry -> Objects.equals(entry.getKey().topic(), topic)) - .collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())); - } - - - private Map> getTopicExceptOffsets(List topics, - long expectedOffset) { - return topics.stream() - .collect(toMap(Function.identity(), - topic -> singletonMap(new TopicPartition(topic, 0), - expectedOffset))); - } - - private Map> resetOffsets( - StreamsGroupCommand.StreamsGroupService service) { - return service.resetOffsets(); - } - - private void produceMessages(int numMessages) { - final List> data = prepareData(0L, numMessages, 0L); - - IntegrationTestUtils.produceKeyValuesSynchronously( - INPUT_TOPIC, - data, - TestUtils.producerConfig(cluster.bootstrapServers(), LongSerializer.class, LongSerializer.class), - cluster.time - ); - } - - private List> prepareData(final long fromInclusive, - final long toExclusive, - final Long... keys) { - final long dataSize = keys.length * (toExclusive - fromInclusive); - final List> data = new ArrayList<>((int) dataSize); - - for (final Long k : keys) { - for (long v = fromInclusive; v < toExclusive; ++v) { - data.add(new KeyValue<>(k, v)); - } - } - - return data; - } - - - - private static Topology topology() { - final StreamsBuilder builder = new StreamsBuilder(); - builder.stream(INPUT_TOPIC, Consumed.with(Serdes.String(), Serdes.String())) - .flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+"))) - .groupBy((key, value) -> value) - .count() - .toStream().to(OUTPUT_TOPIC, Produced.with(Serdes.String(), Serdes.Long())); - return builder.build(); - } - - private StreamsGroupCommand.StreamsGroupService getStreamsGroupService(String[] args) { - StreamsGroupCommandOptions opts = StreamsGroupCommandOptions.fromArgs(args); - return new StreamsGroupCommand.StreamsGroupService( - opts, - Map.of(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE)) - ); - } - - private static void validateDescribeOutput( - List args, - List expectedHeader, - Set> expectedRows, - List dontCareIndices - ) throws InterruptedException { - final AtomicReference out = new AtomicReference<>(""); - TestUtils.waitForCondition(() -> { - String output = ToolsTestUtils.grabConsoleOutput(() -> StreamsGroupCommand.main(args.toArray(new String[0]))); - out.set(output); - - String[] lines = output.split("\n"); - if (lines.length == 1 && lines[0].isEmpty()) lines = new String[]{}; - - if (lines.length == 0) return false; - List header = Arrays.asList(lines[0].split("\\s+")); - if (!expectedHeader.equals(header)) return false; - - Set> groupDesc = Arrays.stream(Arrays.copyOfRange(lines, 1, lines.length)) - .map(line -> Arrays.asList(line.split("\\s+"))) - .collect(Collectors.toSet()); - if (groupDesc.size() != expectedRows.size()) return false; - // clear the dontCare fields and then compare two sets - return expectedRows - .equals( - groupDesc.stream() - .map(list -> { - List listCloned = new ArrayList<>(list); - dontCareIndices.forEach(index -> listCloned.set(index, "")); - return listCloned; - }).collect(Collectors.toSet()) - ); - }, () -> String.format("Expected header=%s and groups=%s, but found:%n%s", expectedHeader, expectedRows, out.get())); - } -} diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java new file mode 100644 index 0000000000000..afd2342efb28e --- /dev/null +++ b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java @@ -0,0 +1,526 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.tools.streams; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; +import org.apache.kafka.streams.GroupProtocol; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.KeyValueTimestamp; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster; +import org.apache.kafka.streams.integration.utils.IntegrationTestUtils; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KTable; +import org.apache.kafka.streams.kstream.Materialized; +import org.apache.kafka.test.TestUtils; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import joptsimple.OptionException; + +import static java.time.LocalDateTime.now; +import static java.util.Arrays.asList; +import static java.util.stream.Collectors.toMap; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +@Timeout(600) +@Tag("integration") +public class ResetStreamsGroupOffsetTest { + private static final String TOPIC_PREFIX = "foo-"; + private static final String APP_ID_PREFIX = "streams-group-command-test"; + public static EmbeddedKafkaCluster cluster; + private static String bootstrapServers; + private static Admin adminClient; + + @BeforeAll + public static void startCluster() { + final Properties props = new Properties(); + props.setProperty(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer,streams"); + cluster = new EmbeddedKafkaCluster(2, props); + cluster.start(); + + bootstrapServers = cluster.bootstrapServers(); + adminClient = cluster.createAdminClient(); + + STREAMS_CONFIG.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + STREAMS_CONFIG.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); + STREAMS_CONFIG.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.StringSerde.class); + STREAMS_CONFIG.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.StringSerde.class); + STREAMS_CONFIG.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.STREAMS.name().toLowerCase(Locale.getDefault())); + } + + @AfterAll + public static void closeCluster() { + cluster.stop(); + } + + private static final Properties STREAMS_CONFIG = new Properties(); + private static final int RECORD_TOTAL = 10; + + @Test + public void testResetWithUnrecognizedOption() { + String[] args = new String[]{"--unrecognized-option", "--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-topics", "--to-offset", "5"}; + assertThrows(OptionException.class, () -> getStreamsGroupService(args)); + } + + @Test + public void testResetOffset() throws Exception { + final String appId = generateRandomAppId(); + final String topic1 = generateRandomTopic(); + final String topic2 = generateRandomTopic(); + final int numOfPartitions = 2; + String[] args; + produceConsumeShutdown(appId, topic1, topic2, RECORD_TOTAL * numOfPartitions * 2); + produceMessagesOnTwoPartitions(RECORD_TOTAL, topic1); + produceMessagesOnTwoPartitions(RECORD_TOTAL, topic2); + /////////////////////////////////////////////// Specific topic (--topic topic1) //////////////////////////////////////////////// + // reset to specific offset, offset already on 10 + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-offset", "5"}; + resetOffsetsAndAssert(args, appId, topic1, 5L, 10L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 5L, 10L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 5L, 5L, 0, 1); + + resetForNextTest(appId, 10L, topic1); + + // reset to specific date time + DateTimeFormatter format = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS"); + LocalDateTime dateTime = now().minusDays(1); + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-datetime", format.format(dateTime)}; + resetOffsetsAndAssert(args, appId, topic1, 0L, 10L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 0L, 10L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 0L, 0L, 0, 1); + + resetForNextTest(appId, 10L, topic1); + + // reset by duration to earliest + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--by-duration", "PT5M"}; + resetOffsetsAndAssert(args, appId, topic1, 0L, 10L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 0L, 10L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 0L, 0L, 0, 1); + + resetForNextTest(appId, 10L, topic1); + + // reset to earliest + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-earliest"}; + resetOffsetsAndAssert(args, appId, topic1, 0L, 10L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 0L, 10L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 0L, 0L, 0, 1); + + resetForNextTest(appId, 10L, topic1); + + // reset to latest + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-latest"}; + resetOffsetsAndAssert(args, appId, topic1, 20L, 10L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 20L, 10L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 20L, 20L, 0, 1); + + resetForNextTest(appId, 5L, topic1); + + // reset to current + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-current"}; + resetOffsetsAndAssert(args, appId, topic1, 5L, 5L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 5L, 5L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 5L, 5L, 0, 1); + + // reset offset shift+. The current offset is 5, as of the prev test is executed (by --execute) + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "3"}; + resetOffsetsAndAssert(args, appId, topic1, 8L, 5L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 8L, 5L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 8L, 8L, 0, 1); + + // reset offset shift-. The current offset is 8, as of the prev test is executed (by --execute) + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "-3"}; + resetOffsetsAndAssert(args, appId, topic1, 5L, 8L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 5L, 8L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 5L, 5L, 0, 1); + + // reset offset shift by lower than earliest. The current offset is 5, as of the prev test is executed (by --execute) + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "-150"}; + resetOffsetsAndAssert(args, appId, topic1, 0L, 5L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 0L, 5L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 0L, 0L, 0, 1); + + // reset offset shift by higher than latest. The current offset is 0, as of the prev test is executed (by --execute) + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "150"}; + resetOffsetsAndAssert(args, appId, topic1, 20L, 0L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 20L, 0L, 0, 1); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 20L, 20L, 0, 1); + + // export to file + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-offset", "5", "--export"}; + File file = TestUtils.tempFile("reset", ".csv"); + Map exp = Map.of(new TopicPartition(topic1, 0), 5L, new TopicPartition(topic1, 1), 5L); + try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { + Map> exportedOffsets = service.resetOffsets(); + writeContentToFile(file, service.exportOffsetsToCsv(exportedOffsets)); + + assertEquals(exp, toOffsetMap(exportedOffsets.get(appId))); + } + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--from-file", file.getCanonicalPath()}; + try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { + Map> importedOffsets = service.resetOffsets(); + assertEquals(exp, toOffsetMap(importedOffsets.get(appId))); + } + ///////////////////////////////////////// Specific topic and partition (--topic topic1, --topic topic2) ///////////////////////////////////////// + resetForNextTest(appId, 10L, topic1); + + // reset to specific offset + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1 + ":1", "--to-offset", "5"}; + resetOffsetsAndAssert(args, appId, topic1, 5L, 10L, 1); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 5L, 10L, 1); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 5L, 5L, 1); + + resetForNextTest(appId, 10L, topic1); + + // reset both partitions of topic1 and topic2:1 to specific offset + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", + "--topic", topic1, "--topic", topic2 + ":1", "--to-offset", "5"}; + final Map expectedOffsets = Map.of( + new TopicPartition(topic1, 0), 5L, + new TopicPartition(topic1, 1), 5L, + new TopicPartition(topic2, 1), 5L); + + resetOffsetsAndAssert(args, appId, List.of(topic1, topic2), expectedOffsets, + Map.of( + new TopicPartition(topic1, 0), 10L, + new TopicPartition(topic1, 1), 10L, + new TopicPartition(topic2, 0), 10L, + new TopicPartition(topic2, 1), 10L)); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, List.of(topic1, topic2), expectedOffsets, + Map.of(new TopicPartition(topic1, 0), 5L, + new TopicPartition(topic1, 1), 5L, + new TopicPartition(topic2, 0), 10L, + new TopicPartition(topic2, 1), 5L)); + ///////////////////////////////////////// All topics (--all-topics) ///////////////////////////////////////// + resetForNextTest(appId, 10L, topic1, topic2); + + // reset to specific offset + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-topics", "--to-offset", "5"}; + resetOffsetsAndAssert(args, appId, topic1, topic2, 5L, 10L); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, topic2, 5L, 10L); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, topic2, 5L, 5L); + + resetForNextTest(appId, 10L, topic1, topic2); + + // reset to specific offset with two --topic options + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--topic", topic2, "--to-offset", "5"}; + resetOffsetsAndAssert(args, appId, topic1, topic2, 5L, 10L); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, topic2, 5L, 10L); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, topic2, 5L, 5L); + + resetForNextTest(appId, 10L, topic1, topic2); + + // export to file + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-topics", "--to-offset", "5", "--export"}; + file = TestUtils.tempFile("reset-all", ".csv"); + exp = Map.of(new TopicPartition(topic1, 0), 5L, + new TopicPartition(topic1, 1), 5L, + new TopicPartition(topic2, 0), 5L, + new TopicPartition(topic2, 1), 5L); + try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { + Map> exportedOffsets = service.resetOffsets(); + writeContentToFile(file, service.exportOffsetsToCsv(exportedOffsets)); + + assertEquals(exp, toOffsetMap(exportedOffsets.get(appId))); + } + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--from-file", file.getCanonicalPath()}; + try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { + Map> importedOffsets = service.resetOffsets(); + + assertEquals(exp, toOffsetMap(importedOffsets.get(appId))); + } + + adminClient.deleteTopics(List.of(topic1, topic2)).all().get(); + } + + private void resetForNextTest(String appId, long desiredOffset, String... topics) throws ExecutionException, InterruptedException { + Map offsets = new HashMap<>(); + for (String topic : topics) { + offsets.put(new TopicPartition(topic, 0), new OffsetAndMetadata(desiredOffset)); + offsets.put(new TopicPartition(topic, 1), new OffsetAndMetadata(desiredOffset)); + } + adminClient.alterStreamsGroupOffsets(appId, offsets).all().get(); + Map committedOffsets = committedOffsets(List.of(topics), appId); + for (TopicPartition tp: offsets.keySet()) { + assertEquals(desiredOffset, committedOffsets.get(tp)); + } + } + + private void AssertCommittedOffsets(String appId, + String topic, + long expectedCommittedOffset, + int... partitions) throws ExecutionException, InterruptedException { + List affectedTPs = Arrays.stream(partitions) + .mapToObj(partition -> new TopicPartition(topic, partition)) + .toList(); + Map committedOffsets = committedOffsets(List.of(topic), appId); + for (TopicPartition tp: affectedTPs) { + assertEquals(expectedCommittedOffset, committedOffsets.get(tp)); + } + } + + private void AssertCommittedOffsets(String appId, + String topic1, + String topic2, + long expectedCommittedOffset) throws ExecutionException, InterruptedException { + TopicPartition tp10 = new TopicPartition(topic1, 0); + TopicPartition tp11 = new TopicPartition(topic2, 0); + TopicPartition tp20 = new TopicPartition(topic1, 1); + TopicPartition tp21 = new TopicPartition(topic2, 1); + Map committedOffsets = committedOffsets(List.of(topic1, topic2), appId); + assertEquals(Map.of( + tp10, expectedCommittedOffset, + tp20, expectedCommittedOffset, + tp11, expectedCommittedOffset, + tp21, expectedCommittedOffset), committedOffsets); + } + + // Performs resetting offsets and assertion for one topic + private void resetOffsetsAndAssert(String[] args, + String appId, + String topic, + long expectedOffset, + long expectedCommittedOffset, + int... partitions) throws ExecutionException, InterruptedException { + Map> resetOffsetsResultByGroup; + Map expectedOffetMap = Arrays.stream(partitions) + .boxed() + .collect(Collectors.toMap( + partition -> new TopicPartition(topic, partition), + partition -> expectedOffset + )); + Map> expectedResetResults = Map.of(appId, expectedOffetMap); + + try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { + resetOffsetsResultByGroup = convertOffsetsToLong(service.resetOffsets()); + } + // assert that the reset offsets are as expected + assertEquals(expectedResetResults, resetOffsetsResultByGroup); + + // assert that the committed offsets are as expected + AssertCommittedOffsets(appId, topic, expectedCommittedOffset, partitions); + } + + // Performs resetting offsets and assertion for two topics + private void resetOffsetsAndAssert(String[] args, + String appId, + String topic1, + String topic2, + long expectedOffset, + long expectedCommittedOffset) throws ExecutionException, InterruptedException { + Map> resetOffsetsResultByGroup; + Map> expectedResetResults = Map.of( + appId, Map.of( + new TopicPartition(topic1, 0), expectedOffset, + new TopicPartition(topic2, 0), expectedOffset, + new TopicPartition(topic1, 1), expectedOffset, + new TopicPartition(topic2, 1), expectedOffset + ) + ); + + try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { + resetOffsetsResultByGroup = convertOffsetsToLong(service.resetOffsets()); + } + // assert that the reset offsets are as expected + assertEquals(expectedResetResults, resetOffsetsResultByGroup); + // assert that the committed offsets are as expected + AssertCommittedOffsets(appId, topic1, topic2, expectedCommittedOffset); + } + + // Performs resetting offsets and assertion for given topic partitions + private void resetOffsetsAndAssert(String[] args, + String appId, + List topics, + Map expectedOffsets, + Map expectedCommittedOffsets) throws ExecutionException, InterruptedException { + Map resetOffsetsResult; + + try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { + resetOffsetsResult = convertOffsetsToLong(service.resetOffsets()).get(appId); + } + // assert that the reset offsets are as expected + assertEquals(expectedOffsets, resetOffsetsResult); + // assert that the committed offsets are as expected + assertEquals(expectedCommittedOffsets, committedOffsets(topics, appId)); + } + + private Map committedOffsets(List topics, + String group) throws ExecutionException, InterruptedException { + return adminClient.listConsumerGroupOffsets(group) + .all().get() + .get(group).entrySet() + .stream() + .filter(e -> topics.contains(e.getKey().topic())) + .collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())); + } + + private static Map> convertOffsetsToLong(Map> map) { + return map.entrySet() + .stream() + .collect(toMap(Map.Entry::getKey, e -> e.getValue().entrySet() + .stream() + .collect(toMap(Map.Entry::getKey, e1 -> e1.getValue().offset())))); + } + + private StreamsGroupCommand.StreamsGroupService getStreamsGroupService(String[] args) { + StreamsGroupCommandOptions opts = StreamsGroupCommandOptions.fromArgs(args); + return new StreamsGroupCommand.StreamsGroupService( + opts, + Map.of(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE)) + ); + } + + private void writeContentToFile(File file, String content) throws IOException { + try (BufferedWriter bw = new BufferedWriter(new FileWriter(file))) { + bw.write(content); + } + } + + private Map toOffsetMap(Map map) { + return map.entrySet() + .stream() + .collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())); + } + + private String[] addTo(String[] args, String... extra) { + List res = new ArrayList<>(asList(args)); + res.addAll(asList(extra)); + return res.toArray(new String[0]); + } + + private String generateRandomTopic() { + return TOPIC_PREFIX + TestUtils.randomString(10); + } + + private String generateRandomAppId() { + return APP_ID_PREFIX + TestUtils.randomString(10); + } + + private void produceConsumeShutdown(String appId, String topic1, String topic2, long numOfCommittedMessages) throws Exception { + STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, appId); + STREAMS_CONFIG.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2); + + cluster.createTopic(topic1, 2); + cluster.createTopic(topic2, 2); + + final StreamsBuilder builder = new StreamsBuilder(); + + final KStream inputStream1 = builder.stream(topic1); + final KStream inputStream2 = builder.stream(topic2); + + final AtomicInteger recordCount = new AtomicInteger(0); + + final KTable valueCounts = inputStream1.merge(inputStream2) + .groupByKey() + .aggregate( + () -> "()", + (key, value, aggregate) -> aggregate + ",(" + key + ": " + value + ")", + Materialized.as("aggregated_value")); + + valueCounts.toStream().peek((key, value) -> { + if (recordCount.incrementAndGet() > numOfCommittedMessages) { + throw new IllegalStateException("Crash on the " + numOfCommittedMessages + " record"); + } + }); + + + final KafkaStreams streams = new KafkaStreams(builder.build(), STREAMS_CONFIG); + streams.cleanUp(); + streams.start(); + + produceMessagesOnTwoPartitions(RECORD_TOTAL, topic1); + produceMessagesOnTwoPartitions(RECORD_TOTAL, topic2); + + + TestUtils.waitForCondition(() -> streams.state().equals(KafkaStreams.State.RUNNING), + "Expected RUNNING state but streams is on " + streams.state()); + + + try { + TestUtils.waitForCondition(() -> recordCount.get() == numOfCommittedMessages, + "Expected " + numOfCommittedMessages + " records processed but only got " + recordCount.get()); + } catch (final Exception e) { + e.printStackTrace(); + } finally { + assertEquals(numOfCommittedMessages, recordCount.get(), "Expected " + numOfCommittedMessages + " records processed but only got " + recordCount.get()); + streams.close(); +// IntegrationTestUtils.waitForEmptyConsumerGroup(CLUSTER.createAdminClient(), appId, 60000); + } + } + + private static void produceMessagesOnTwoPartitions(final int numOfMessages, final String topic) { + + // partition 0 + List> data = new ArrayList<>(numOfMessages); + for (long v = 0; v < numOfMessages; ++v) { + data.add(new KeyValueTimestamp<>(v + "0" + topic, v + "0", cluster.time.milliseconds())); + } + + IntegrationTestUtils.produceSynchronously( + TestUtils.producerConfig(bootstrapServers, StringSerializer.class, StringSerializer.class), + false, + topic, + Optional.of(0), + data + ); + + // partition 1 + data = new ArrayList<>(numOfMessages); + for (long v = 0; v < 10; ++v) { + data.add(new KeyValueTimestamp<>(v + "1" + topic, v + "1", cluster.time.milliseconds())); + } + + IntegrationTestUtils.produceSynchronously( + TestUtils.producerConfig(bootstrapServers, StringSerializer.class, StringSerializer.class), + false, + topic, + Optional.of(1), + data + ); + } +} diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java index 65027ee3b79fb..3f499a0ba672b 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java @@ -17,30 +17,37 @@ package org.apache.kafka.tools.streams; import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientTestUtils; import org.apache.kafka.clients.admin.DescribeStreamsGroupsResult; +import org.apache.kafka.clients.admin.DescribeTopicsResult; import org.apache.kafka.clients.admin.GroupListing; import org.apache.kafka.clients.admin.KafkaAdminClient; -import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult; import org.apache.kafka.clients.admin.ListGroupsOptions; import org.apache.kafka.clients.admin.ListGroupsResult; import org.apache.kafka.clients.admin.ListOffsetsResult; -import org.apache.kafka.clients.admin.MockAdminClient; +import org.apache.kafka.clients.admin.ListStreamsGroupOffsetsResult; import org.apache.kafka.clients.admin.StreamsGroupDescription; import org.apache.kafka.clients.admin.StreamsGroupMemberAssignment; import org.apache.kafka.clients.admin.StreamsGroupMemberDescription; import org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription; +import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartitionInfo; +import org.apache.kafka.common.internals.KafkaFutureImpl; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; import org.mockito.ArgumentMatchers; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -48,7 +55,9 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.IntStream; import joptsimple.OptionException; @@ -58,25 +67,28 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class StreamsGroupCommandTest { + private static final Admin ADMIN_CLIENT = mock(KafkaAdminClient.class); + private static final String BOOTSTRAP_SERVERS = "localhost:9092"; + @Test public void testListStreamsGroups() throws Exception { String firstGroup = "first-group"; String secondGroup = "second-group"; - String bootstrapServer = "localhost:9092"; - String[] cgcArgs = new String[]{"--bootstrap-server", bootstrapServer, "--list"}; - Admin adminClient = mock(KafkaAdminClient.class); + String[] cgcArgs = new String[]{"--bootstrap-server", BOOTSTRAP_SERVERS, "--list"}; ListGroupsResult result = mock(ListGroupsResult.class); when(result.all()).thenReturn(KafkaFuture.completedFuture(Arrays.asList( new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)), new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY)) ))); - when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(result); - StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs, adminClient); + when(ADMIN_CLIENT.listGroups(any(ListGroupsOptions.class))).thenReturn(result); + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs); Set expectedGroups = new HashSet<>(Arrays.asList(firstGroup, secondGroup)); final Set[] foundGroups = new Set[]{Set.of()}; @@ -89,10 +101,9 @@ public void testListStreamsGroups() throws Exception { @Test public void testListWithUnrecognizedOption() { - String bootstrapServer = "localhost:9092"; - String[] cgcArgs = new String[]{"--frivolous-nonsense", "--bootstrap-server", bootstrapServer, "--list"}; + String[] cgcArgs = new String[]{"--frivolous-nonsense", "--bootstrap-server", BOOTSTRAP_SERVERS, "--list"}; final Exception exception = assertThrows(OptionException.class, () -> { - getStreamsGroupService(cgcArgs, new MockAdminClient()); + getStreamsGroupService(cgcArgs); }); assertEquals("frivolous-nonsense is not a recognized option", exception.getMessage()); } @@ -101,17 +112,15 @@ public void testListWithUnrecognizedOption() { public void testListStreamsGroupsWithStates() throws Exception { String firstGroup = "first-group"; String secondGroup = "second-group"; - String bootstrapServer = "localhost:9092"; - String[] cgcArgs = new String[]{"--bootstrap-server", bootstrapServer, "--list", "--state"}; - Admin adminClient = mock(KafkaAdminClient.class); + String[] cgcArgs = new String[]{"--bootstrap-server", BOOTSTRAP_SERVERS, "--list", "--state"}; ListGroupsResult resultWithAllStates = mock(ListGroupsResult.class); when(resultWithAllStates.all()).thenReturn(KafkaFuture.completedFuture(Arrays.asList( new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)), new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY)) ))); - when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(resultWithAllStates); - StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs, adminClient); + when(ADMIN_CLIENT.listGroups(any(ListGroupsOptions.class))).thenReturn(resultWithAllStates); + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs); Set expectedListing = new HashSet<>(Arrays.asList( new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)), new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY)))); @@ -126,7 +135,7 @@ public void testListStreamsGroupsWithStates() throws Exception { when(resultWithStableState.all()).thenReturn(KafkaFuture.completedFuture(List.of( new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)) ))); - when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(resultWithStableState); + when(ADMIN_CLIENT.listGroups(any(ListGroupsOptions.class))).thenReturn(resultWithStableState); Set expectedListingStable = Set.of( new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE))); @@ -141,8 +150,7 @@ public void testListStreamsGroupsWithStates() throws Exception { @Test public void testDescribeStreamsGroups() throws Exception { - String firstGroup = "group1"; - Admin adminClient = mock(KafkaAdminClient.class); + String firstGroup = "foo-group"; DescribeStreamsGroupsResult result = mock(DescribeStreamsGroupsResult.class); Map resultMap = new HashMap<>(); StreamsGroupDescription exp = new StreamsGroupDescription( @@ -158,16 +166,14 @@ public void testDescribeStreamsGroups() throws Exception { resultMap.put(firstGroup, exp); when(result.all()).thenReturn(KafkaFuture.completedFuture(resultMap)); - when(adminClient.describeStreamsGroups(ArgumentMatchers.anyCollection())).thenReturn(result); - StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(null, adminClient); + when(ADMIN_CLIENT.describeStreamsGroups(ArgumentMatchers.anyCollection())).thenReturn(result); + StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(null, ADMIN_CLIENT); assertEquals(exp, service.getDescribeGroup(firstGroup)); service.close(); } @Test public void testDescribeStreamsGroupsGetOffsets() throws Exception { - Admin adminClient = mock(KafkaAdminClient.class); - ListOffsetsResult startOffset = mock(ListOffsetsResult.class); Map startOffsetResultMap = new HashMap<>(); startOffsetResultMap.put(new TopicPartition("topic1", 0), new ListOffsetsResult.ListOffsetsResultInfo(10, -1, Optional.empty())); @@ -179,13 +185,13 @@ public void testDescribeStreamsGroupsGetOffsets() throws Exception { when(startOffset.all()).thenReturn(KafkaFuture.completedFuture(startOffsetResultMap)); when(endOffset.all()).thenReturn(KafkaFuture.completedFuture(endOffsetResultMap)); - when(adminClient.listOffsets(ArgumentMatchers.anyMap())).thenReturn(startOffset, endOffset); + when(ADMIN_CLIENT.listOffsets(ArgumentMatchers.anyMap())).thenReturn(startOffset, endOffset); - ListConsumerGroupOffsetsResult result = mock(ListConsumerGroupOffsetsResult.class); + ListStreamsGroupOffsetsResult result = mock(ListStreamsGroupOffsetsResult.class); Map committedOffsetsMap = new HashMap<>(); committedOffsetsMap.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(12, Optional.of(0), "")); - when(adminClient.listConsumerGroupOffsets(ArgumentMatchers.anyMap())).thenReturn(result); + when(ADMIN_CLIENT.listStreamsGroupOffsets(ArgumentMatchers.anyMap())).thenReturn(result); when(result.partitionsToOffsetAndMetadata(ArgumentMatchers.anyString())).thenReturn(KafkaFuture.completedFuture(committedOffsetsMap)); StreamsGroupMemberDescription description = new StreamsGroupMemberDescription("foo", 0, Optional.empty(), @@ -203,7 +209,7 @@ public void testDescribeStreamsGroupsGetOffsets() throws Exception { GroupState.STABLE, new Node(0, "host", 0), null); - StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(null, adminClient); + StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(null, ADMIN_CLIENT); Map lags = service.getOffsets(x); assertEquals(1, lags.size()); assertEquals(new StreamsGroupCommand.OffsetsInfo(Optional.of(12L), Optional.of(0), 30L, 18L), lags.get(new TopicPartition("topic1", 0))); @@ -258,9 +264,46 @@ public void testGroupStatesFromString() { assertThrow(" , ,"); } - StreamsGroupCommand.StreamsGroupService getStreamsGroupService(String[] args, Admin adminClient) { + @Test + public void testAdminRequestsForResetOffsets() { + String groupId = "foo-group"; + List args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--reset-offsets", "--topic", "topic1", "--to-latest")); + List topics = List.of("topic1"); + + + when(ADMIN_CLIENT.describeStreamsGroups(List.of(groupId))) + .thenReturn(describeStreamsResult(groupId, GroupState.DEAD)); + when(ADMIN_CLIENT.describeTopics(topics)) + .thenReturn(describeTopicsResult(topics, 1)); + when(ADMIN_CLIENT.listOffsets(any())) + .thenReturn(listOffsetsResult()); + when(ADMIN_CLIENT.listGroups(any())).thenReturn(listGroupResult(groupId)); + + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args.toArray(new String[0])); + Map> resetResult = service.resetOffsets(); + + assertEquals(Collections.singleton(groupId), resetResult.keySet()); + assertEquals(new HashSet<>(List.of(new TopicPartition(topics.get(0), 0))), + resetResult.get(groupId).keySet()); + + verify(ADMIN_CLIENT, times(1)).describeStreamsGroups(List.of(groupId)); + verify(ADMIN_CLIENT, times(1)).describeTopics(topics); + verify(ADMIN_CLIENT, times(1)).listOffsets(any()); + verify(ADMIN_CLIENT, times(1)).listGroups(any()); + + service.close(); + } + + private ListGroupsResult listGroupResult(String groupId) { + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + GroupListing groupListing = new GroupListing(groupId, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.DEAD)); + future.complete(List.of(groupListing)); + return new ListGroupsResult(future); + } + + StreamsGroupCommand.StreamsGroupService getStreamsGroupService(String[] args) { StreamsGroupCommandOptions opts = new StreamsGroupCommandOptions(args); - return new StreamsGroupCommand.StreamsGroupService(opts, adminClient); + return new StreamsGroupCommand.StreamsGroupService(opts, ADMIN_CLIENT); } private static void assertThrow(final String wrongState) { @@ -276,4 +319,46 @@ private static void assertThrow(final String wrongState) { .map(String::trim) .collect(Collectors.toSet()), validStates); } + + private DescribeStreamsGroupsResult describeStreamsResult(String groupId, GroupState groupState) { + StreamsGroupMemberDescription memberDescription = new StreamsGroupMemberDescription("foo", 0, Optional.empty(), + Optional.empty(), "bar", "baz", 0, "qux", + Optional.empty(), Map.of(), List.of(), List.of(), + new StreamsGroupMemberAssignment(List.of(), List.of(), List.of()), new StreamsGroupMemberAssignment(List.of(), List.of(), List.of()), + false); + StreamsGroupDescription description = new StreamsGroupDescription(groupId, + 0, + 0, + 0, + Collections.singletonList(new StreamsGroupSubtopologyDescription("subtopologyId", Collections.emptyList(), Collections.emptyList(), Map.of(), Map.of())), + List.of(memberDescription), + groupState, + new Node(1, "localhost", 9092), + Set.of()); + KafkaFutureImpl future = new KafkaFutureImpl<>(); + future.complete(description); + return new DescribeStreamsGroupsResult(Collections.singletonMap(groupId, future)); + } + + private DescribeTopicsResult describeTopicsResult(Collection topics, int numOfPartitions) { + Map topicDescriptions = new HashMap<>(); + + topics.forEach(topic -> { + List partitions = IntStream.range(0, numOfPartitions) + .mapToObj(i -> new TopicPartitionInfo(i, null, Collections.emptyList(), Collections.emptyList())) + .collect(Collectors.toList()); + topicDescriptions.put(topic, new TopicDescription(topic, false, partitions)); + }); + return AdminClientTestUtils.describeTopicsResult(topicDescriptions); + } + + private ListOffsetsResult listOffsetsResult() { + List topicPartitions = new ArrayList<>(); + topicPartitions.add(new TopicPartition("topic1", 0)); + ListOffsetsResult.ListOffsetsResultInfo resultInfo = new ListOffsetsResult.ListOffsetsResultInfo(100, System.currentTimeMillis(), Optional.of(1)); + Map> futures = topicPartitions.stream().collect(Collectors.toMap( + Function.identity(), + __ -> KafkaFuture.completedFuture(resultInfo))); + return new ListOffsetsResult(futures); + } } From 42bfde43d0d4dfb37cb21d9ccea4f84e3e4d565c Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Mon, 5 May 2025 21:28:17 +0200 Subject: [PATCH 03/26] refactor and adding tests --- .../streams/ResetStreamsGroupOffsetTest.java | 174 +++++++++++++----- 1 file changed, 124 insertions(+), 50 deletions(-) diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java index afd2342efb28e..2ccf142340576 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java @@ -121,9 +121,17 @@ public void testResetOffset() throws Exception { /////////////////////////////////////////////// Specific topic (--topic topic1) //////////////////////////////////////////////// // reset to specific offset, offset already on 10 args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-offset", "5"}; - resetOffsetsAndAssert(args, appId, topic1, 5L, 10L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 5L, 10L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 5L, 5L, 0, 1); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 10L, 0, 1); + + resetForNextTest(appId, 10L, topic1); + + // reset to specific offset when after end offset, offset already on 10 + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-offset", "30"}; + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 20L, 10L, 0, 1); + + // reset to specific offset when before begin offset, offset already on 20 + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-offset", "-30"}; + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 20L, 0, 1); resetForNextTest(appId, 10L, topic1); @@ -131,65 +139,47 @@ public void testResetOffset() throws Exception { DateTimeFormatter format = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS"); LocalDateTime dateTime = now().minusDays(1); args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-datetime", format.format(dateTime)}; - resetOffsetsAndAssert(args, appId, topic1, 0L, 10L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 0L, 10L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 0L, 0L, 0, 1); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 10L, 0, 1); resetForNextTest(appId, 10L, topic1); // reset by duration to earliest args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--by-duration", "PT5M"}; - resetOffsetsAndAssert(args, appId, topic1, 0L, 10L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 0L, 10L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 0L, 0L, 0, 1); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 10L, 0, 1); resetForNextTest(appId, 10L, topic1); // reset to earliest args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-earliest"}; - resetOffsetsAndAssert(args, appId, topic1, 0L, 10L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 0L, 10L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 0L, 0L, 0, 1); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 10L, 0, 1); resetForNextTest(appId, 10L, topic1); // reset to latest args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-latest"}; - resetOffsetsAndAssert(args, appId, topic1, 20L, 10L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 20L, 10L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 20L, 20L, 0, 1); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 20L, 10L, 0, 1); resetForNextTest(appId, 5L, topic1); // reset to current args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-current"}; - resetOffsetsAndAssert(args, appId, topic1, 5L, 5L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 5L, 5L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 5L, 5L, 0, 1); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 5L, 0, 1); // reset offset shift+. The current offset is 5, as of the prev test is executed (by --execute) args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "3"}; - resetOffsetsAndAssert(args, appId, topic1, 8L, 5L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 8L, 5L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 8L, 8L, 0, 1); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 8L, 5L, 0, 1); // reset offset shift-. The current offset is 8, as of the prev test is executed (by --execute) args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "-3"}; - resetOffsetsAndAssert(args, appId, topic1, 5L, 8L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 5L, 8L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 5L, 5L, 0, 1); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 8L, 0, 1); // reset offset shift by lower than earliest. The current offset is 5, as of the prev test is executed (by --execute) args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "-150"}; - resetOffsetsAndAssert(args, appId, topic1, 0L, 5L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 0L, 5L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 0L, 0L, 0, 1); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 5L, 0, 1); // reset offset shift by higher than latest. The current offset is 0, as of the prev test is executed (by --execute) args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "150"}; - resetOffsetsAndAssert(args, appId, topic1, 20L, 0L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 20L, 0L, 0, 1); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 20L, 20L, 0, 1); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 20L, 0L, 0, 1); // export to file args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-offset", "5", "--export"}; @@ -206,14 +196,13 @@ public void testResetOffset() throws Exception { Map> importedOffsets = service.resetOffsets(); assertEquals(exp, toOffsetMap(importedOffsets.get(appId))); } + ///////////////////////////////////////// Specific topic and partition (--topic topic1, --topic topic2) ///////////////////////////////////////// resetForNextTest(appId, 10L, topic1); // reset to specific offset args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1 + ":1", "--to-offset", "5"}; - resetOffsetsAndAssert(args, appId, topic1, 5L, 10L, 1); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, 5L, 10L, 1); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, 5L, 5L, 1); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 10L, 1); resetForNextTest(appId, 10L, topic1); @@ -236,22 +225,19 @@ public void testResetOffset() throws Exception { new TopicPartition(topic1, 1), 5L, new TopicPartition(topic2, 0), 10L, new TopicPartition(topic2, 1), 5L)); - ///////////////////////////////////////// All topics (--all-topics) ///////////////////////////////////////// + + ///////////////////////////////////////// All topics (--all-topics) ///////////////////////////////////////// resetForNextTest(appId, 10L, topic1, topic2); // reset to specific offset args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-topics", "--to-offset", "5"}; - resetOffsetsAndAssert(args, appId, topic1, topic2, 5L, 10L); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, topic2, 5L, 10L); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, topic2, 5L, 5L); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, topic2, 5L, 10L); resetForNextTest(appId, 10L, topic1, topic2); // reset to specific offset with two --topic options args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--topic", topic2, "--to-offset", "5"}; - resetOffsetsAndAssert(args, appId, topic1, topic2, 5L, 10L); - resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, topic2, 5L, 10L); - resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, topic2, 5L, 5L); + resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, topic2, 5L, 10L); resetForNextTest(appId, 10L, topic1, topic2); @@ -320,7 +306,25 @@ private void AssertCommittedOffsets(String appId, tp21, expectedCommittedOffset), committedOffsets); } - // Performs resetting offsets and assertion for one topic + /** + * Resets offsets for a specific topic and partition(s) and verifies the results. + * + *

This method performs the following steps:

+ *
    + *
  • Resets offsets for the specified topic and partitions using the provided arguments.
  • + *
  • Asserts that the reset offsets match the expected offsets.
  • + *
  • Asserts that the committed offsets match the expected committed offsets.
  • + *
+ * + * @param args The command-line arguments for resetting offsets. + * @param appId The application ID for the Kafka Streams application. + * @param topic The topic for which offsets will be reset. + * @param expectedOffset The expected offset value after the reset. + * @param expectedCommittedOffset The expected committed offset value after the reset. + * @param partitions The partitions of the topic to reset offsets for. + * @throws ExecutionException If an error occurs during the execution of the reset operation. + * @throws InterruptedException If the thread is interrupted during the reset operation. + */ private void resetOffsetsAndAssert(String[] args, String appId, String topic, @@ -346,7 +350,25 @@ private void resetOffsetsAndAssert(String[] args, AssertCommittedOffsets(appId, topic, expectedCommittedOffset, partitions); } - // Performs resetting offsets and assertion for two topics + /** + * Resets offsets for two topics and verifies the results. + * + *

This method performs the following steps:

+ *
    + *
  • Resets offsets for the specified topics using the provided arguments.
  • + *
  • Asserts that the reset offsets match the expected offsets.
  • + *
  • Asserts that the committed offsets match the expected committed offsets.
  • + *
+ * + * @param args The command-line arguments for resetting offsets. + * @param appId The application ID for the Kafka Streams application. + * @param topic1 The first topic for which offsets will be reset. + * @param topic2 The second topic for which offsets will be reset. + * @param expectedOffset The expected offset value after the reset. + * @param expectedCommittedOffset The expected committed offset value after the reset. + * @throws ExecutionException If an error occurs during the execution of the reset operation. + * @throws InterruptedException If the thread is interrupted during the reset operation. + */ private void resetOffsetsAndAssert(String[] args, String appId, String topic1, @@ -372,7 +394,24 @@ private void resetOffsetsAndAssert(String[] args, AssertCommittedOffsets(appId, topic1, topic2, expectedCommittedOffset); } - // Performs resetting offsets and assertion for given topic partitions + /** + * Resets offsets for the specified topics and verifies the results. + * + *

This method performs the following steps:

+ *
    + *
  • Resets offsets for the given topics using the provided arguments.
  • + *
  • Asserts that the reset offsets match the expected offsets.
  • + *
  • Asserts that the committed offsets match the expected committed offsets.
  • + *
+ * + * @param args The command-line arguments for resetting offsets. + * @param appId The application ID for the Kafka Streams application. + * @param topics The list of topics for which offsets will be reset. + * @param expectedOffsets A map of expected offsets for each topic partition after the reset. + * @param expectedCommittedOffsets A map of expected committed offsets for each topic partition after the reset. + * @throws ExecutionException If an error occurs during the execution of the reset operation. + * @throws InterruptedException If the thread is interrupted during the reset operation. + */ private void resetOffsetsAndAssert(String[] args, String appId, List topics, @@ -389,6 +428,28 @@ private void resetOffsetsAndAssert(String[] args, assertEquals(expectedCommittedOffsets, committedOffsets(topics, appId)); } + private void resetOffsetsAndAssertForDryRunAndExecute(String[] args, + String appId, + String topic, + long expectedOffset, + long expectedCommittedOffset, + int... partitions) throws ExecutionException, InterruptedException { + resetOffsetsAndAssert(args, appId, topic, expectedOffset, expectedCommittedOffset, partitions); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic, expectedOffset, expectedCommittedOffset, partitions); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic, expectedOffset, expectedOffset, partitions); + } + + private void resetOffsetsAndAssertForDryRunAndExecute(String[] args, + String appId, + String topic1, + String topic2, + long expectedOffset, + long expectedCommittedOffset) throws ExecutionException, InterruptedException { + resetOffsetsAndAssert(args, appId, topic1, topic2, expectedOffset, expectedCommittedOffset); + resetOffsetsAndAssert(addTo(args, "--dry-run"), appId, topic1, topic2, expectedOffset, expectedCommittedOffset); + resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic1, topic2, expectedOffset, expectedOffset); + } + private Map committedOffsets(List topics, String group) throws ExecutionException, InterruptedException { return adminClient.listConsumerGroupOffsets(group) @@ -407,6 +468,12 @@ private static Map> convertOffsetsToLong(Map e1.getValue().offset())))); } + private Map toOffsetMap(Map map) { + return map.entrySet() + .stream() + .collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())); + } + private StreamsGroupCommand.StreamsGroupService getStreamsGroupService(String[] args) { StreamsGroupCommandOptions opts = StreamsGroupCommandOptions.fromArgs(args); return new StreamsGroupCommand.StreamsGroupService( @@ -421,12 +488,6 @@ private void writeContentToFile(File file, String content) throws IOException { } } - private Map toOffsetMap(Map map) { - return map.entrySet() - .stream() - .collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())); - } - private String[] addTo(String[] args, String... extra) { List res = new ArrayList<>(asList(args)); res.addAll(asList(extra)); @@ -441,6 +502,14 @@ private String generateRandomAppId() { return APP_ID_PREFIX + TestUtils.randomString(10); } + /** + * Produces messages to two partitions of the specified topic and consumes them. + * + * @param appId The application ID for the Kafka Streams application. + * @param topic1 The first topic to produce and consume messages from. + * @param topic2 The second topic to produce and consume messages from. + * @param numOfCommittedMessages The number of committed messages to process before shutting down. + */ private void produceConsumeShutdown(String appId, String topic1, String topic2, long numOfCommittedMessages) throws Exception { STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, appId); STREAMS_CONFIG.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2); @@ -489,10 +558,15 @@ private void produceConsumeShutdown(String appId, String topic1, String topic2, } finally { assertEquals(numOfCommittedMessages, recordCount.get(), "Expected " + numOfCommittedMessages + " records processed but only got " + recordCount.get()); streams.close(); -// IntegrationTestUtils.waitForEmptyConsumerGroup(CLUSTER.createAdminClient(), appId, 60000); } } + /** + * Produces messages to two partitions of the specified topic. + * + * @param numOfMessages The number of messages to produce for each partition. + * @param topic The topic to which the messages will be produced. + */ private static void produceMessagesOnTwoPartitions(final int numOfMessages, final String topic) { // partition 0 From 204cd032362f96025b1b07870f0f5ca104b3371d Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Mon, 5 May 2025 21:57:06 +0200 Subject: [PATCH 04/26] revert irrelavent changes --- .../kafka/clients/admin/KafkaAdminClient.java | 20 +------------------ .../consumer/group/ConsumerGroupCommand.java | 8 +++----- 2 files changed, 4 insertions(+), 24 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java index f09b8cff4cae7..3206d6f19ed96 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java @@ -3771,17 +3771,6 @@ public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map groupSpecs, -// ListStreamsGroupOffsetsOptions options) { -// SimpleAdminApiFuture> future = -// ListStteamsGroupOffsetsHandler.newFuture(groupSpecs.keySet()); -// ListConsumerGroupOffsetsHandler handler = -// new ListConsumerGroupOffsetsHandler(groupSpecs, options.requireStable(), logContext); -// invokeDriver(handler, future, options.timeoutMs); -// return new ListConsumerGroupOffsetsResult(future.all()); -// } - @Override public ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map groupSpecs, ListStreamsGroupOffsetsOptions options) { @@ -3790,14 +3779,7 @@ public ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map new ListConsumerGroupOffsetsSpec().topicPartitions(entry.getValue().topicPartitions()) )); -// Map consumerGroupSpecs = new HashMap<>(); -// for (Map.Entry entry : groupSpecs.entrySet()) { -// ListConsumerGroupOffsetsSpec spec = new ListConsumerGroupOffsetsSpec(); -// spec.topicPartitions(entry.getValue().topicPartitions()); -// consumerGroupSpecs.put(entry.getKey(), spec); -// } - ListConsumerGroupOffsetsResult res = listConsumerGroupOffsets(consumerGroupSpecs, new ListConsumerGroupOffsetsOptions()); - return new ListStreamsGroupOffsetsResult(res); + return new ListStreamsGroupOffsetsResult(listConsumerGroupOffsets(consumerGroupSpecs, new ListConsumerGroupOffsetsOptions())); } @Override diff --git a/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java index 7db6ed5fdfade..b001ae7c6f7fd 100644 --- a/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java @@ -29,7 +29,6 @@ import org.apache.kafka.clients.admin.DescribeTopicsResult; import org.apache.kafka.clients.admin.GroupListing; import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions; -import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult; import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec; import org.apache.kafka.clients.admin.ListGroupsOptions; import org.apache.kafka.clients.admin.ListGroupsResult; @@ -1090,11 +1089,10 @@ private Collection getPartitionsToReset(String groupId) throws E private Map getCommittedOffsets(String groupId) { try { - ListConsumerGroupOffsetsResult res = adminClient.listConsumerGroupOffsets( + return adminClient.listConsumerGroupOffsets( Collections.singletonMap(groupId, new ListConsumerGroupOffsetsSpec()), - withTimeoutMs(new ListConsumerGroupOffsetsOptions())); - return res. - partitionsToOffsetAndMetadata(groupId).get(); + withTimeoutMs(new ListConsumerGroupOffsetsOptions()) + ).partitionsToOffsetAndMetadata(groupId).get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } From 52d709b8739804b44d8c6e1e1a04746c563a8c39 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Tue, 6 May 2025 01:11:41 +0200 Subject: [PATCH 05/26] clean-up --- .../tools/streams/ResetStreamsGroupOffsetTest.java | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java index 2ccf142340576..2120d9e54e6a0 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java @@ -73,6 +73,8 @@ public class ResetStreamsGroupOffsetTest { private static final String TOPIC_PREFIX = "foo-"; private static final String APP_ID_PREFIX = "streams-group-command-test"; + private static final Properties STREAMS_CONFIG = new Properties(); + private static final int RECORD_TOTAL = 10; public static EmbeddedKafkaCluster cluster; private static String bootstrapServers; private static Admin adminClient; @@ -87,11 +89,16 @@ public static void startCluster() { bootstrapServers = cluster.bootstrapServers(); adminClient = cluster.createAdminClient(); + createStreamsConfig(bootstrapServers); + } + + private static void createStreamsConfig(String bootstrapServers) { STREAMS_CONFIG.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); STREAMS_CONFIG.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); STREAMS_CONFIG.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.StringSerde.class); STREAMS_CONFIG.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.StringSerde.class); STREAMS_CONFIG.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.STREAMS.name().toLowerCase(Locale.getDefault())); + STREAMS_CONFIG.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2); } @AfterAll @@ -99,9 +106,6 @@ public static void closeCluster() { cluster.stop(); } - private static final Properties STREAMS_CONFIG = new Properties(); - private static final int RECORD_TOTAL = 10; - @Test public void testResetWithUnrecognizedOption() { String[] args = new String[]{"--unrecognized-option", "--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-topics", "--to-offset", "5"}; @@ -512,7 +516,6 @@ private String generateRandomAppId() { */ private void produceConsumeShutdown(String appId, String topic1, String topic2, long numOfCommittedMessages) throws Exception { STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, appId); - STREAMS_CONFIG.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2); cluster.createTopic(topic1, 2); cluster.createTopic(topic2, 2); From 557a76c6cd03eccdc8c1fe6b06a1ec5055311817 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Mon, 26 May 2025 12:48:29 +0200 Subject: [PATCH 06/26] address reviews --- .../kafka/tools/streams/StreamsGroupCommand.java | 11 +++++++---- .../tools/streams/StreamsGroupCommandOptions.java | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index 5cfa386df6127..9c66104bbbe71 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -73,6 +73,8 @@ public class StreamsGroupCommand { + private static final String TOPIC_PARTITION_SEPARATOR = ":"; + public static void main(String[] args) { StreamsGroupCommandOptions opts = new StreamsGroupCommandOptions(args); try { @@ -118,8 +120,9 @@ public static void run(StreamsGroupCommandOptions opts) { static void printOffsetsToReset(Map> groupAssignmentsToReset) { String format = "%n%-30s %-30s %-10s %-15s"; - if (!groupAssignmentsToReset.isEmpty()) + if (!groupAssignmentsToReset.isEmpty()) { System.out.printf(format, "GROUP", "TOPIC", "PARTITION", "NEW-OFFSET"); + } groupAssignmentsToReset.forEach((groupId, assignment) -> assignment.forEach((streamsAssignment, offsetAndMetadata) -> @@ -487,7 +490,7 @@ private List parseTopicPartitionsToReset(List topicArgs) List topics = new ArrayList<>(); topicArgs.forEach(topicArg -> { - if (topicArg.contains(":")) + if (topicArg.contains(TOPIC_PARTITION_SEPARATOR)) topicsWithPartitions.add(topicArg); else topics.add(topicArg); @@ -837,9 +840,9 @@ private static class Ignore implements LogOffsetResult { } */ private static void maybePrintEmptyGroupState(String group, GroupState state) { if (state == GroupState.DEAD) { - printError("Streams group '" + group + "' does not exist.", Optional.empty()); + printError("streams group '" + group + "' does not exist.", Optional.empty()); } else if (state == GroupState.EMPTY) { - printError("Streams group '" + group + "' has no active members.", Optional.empty()); + printError("streams group '" + group + "' has no active members.", Optional.empty()); } } diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index ee70c27a1c3ee..645e13b0bc2d3 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -52,7 +52,7 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public static final String MEMBERS_DOC = "Describe members of the group. This option may be used with the '--describe' option only."; public static final String OFFSETS_DOC = "Describe the group and list all topic partitions in the group along with their offset information." + "This is the default sub-action and may be used with the '--describe' option only."; - private static final String RESET_OFFSETS_DOC = "Reset offsets of Streams group. The instances should be inactive" + NL + + private static final String RESET_OFFSETS_DOC = "Reset offsets of streams group. The instances should be inactive" + NL + "Has 2 execution options: --dry-run (the default) to plan which offsets to reset, and --execute to update the offsets." + NL + "You must choose one of the following reset specifications: --to-datetime, --by-duration, --to-earliest, " + "--to-latest, --shift-by, --from-file, --to-current, --to-offset." + NL + From ab7d858c630699c04ddbb32bff6d11ac5141ce89 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Mon, 26 May 2025 23:16:24 +0200 Subject: [PATCH 07/26] correct comments/msgs --- .../kafka/clients/admin/ListStreamsGroupOffsetsSpec.java | 4 ++-- .../org/apache/kafka/tools/streams/StreamsGroupCommand.java | 4 ++-- .../kafka/tools/streams/StreamsGroupCommandOptions.java | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java index dc49942f5c05b..6daef6b0b0746 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java @@ -33,7 +33,7 @@ public class ListStreamsGroupOffsetsSpec { private Collection topicPartitions; /** - * Set the topic partitions whose offsets are to be listed for a Streams group. + * Set the topic partitions whose offsets are to be listed for a streams group. */ public ListStreamsGroupOffsetsSpec topicPartitions(Collection topicPartitions) { this.topicPartitions = topicPartitions; @@ -41,7 +41,7 @@ public ListStreamsGroupOffsetsSpec topicPartitions(Collection to } /** - * Returns the topic partitions whose offsets are to be listed for a Streams group. + * Returns the topic partitions whose offsets are to be listed for a streams group. */ public Collection topicPartitions() { return topicPartitions; diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index 9c66104bbbe71..a350180c0a4dc 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -840,9 +840,9 @@ private static class Ignore implements LogOffsetResult { } */ private static void maybePrintEmptyGroupState(String group, GroupState state) { if (state == GroupState.DEAD) { - printError("streams group '" + group + "' does not exist.", Optional.empty()); + printError("Streams group '" + group + "' does not exist.", Optional.empty()); } else if (state == GroupState.EMPTY) { - printError("streams group '" + group + "' has no active members.", Optional.empty()); + printError("Streams group '" + group + "' has no active members.", Optional.empty()); } } diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index 645e13b0bc2d3..37f79bf84d2b7 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -57,7 +57,7 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { "You must choose one of the following reset specifications: --to-datetime, --by-duration, --to-earliest, " + "--to-latest, --shift-by, --from-file, --to-current, --to-offset." + NL + "To define the scope use --all-topics or --topic. One scope must be specified unless you use '--from-file'."; - private static final String DRY_RUN_DOC = "Only show results without executing changes on Streams Group. Supported operations: reset-offsets."; + private static final String DRY_RUN_DOC = "Only show results without executing changes on streams group. Supported operations: reset-offsets."; private static final String EXECUTE_DOC = "Execute operation. Supported operations: reset-offsets."; private static final String EXPORT_DOC = "Export operation execution to a CSV file. Supported operations: reset-offsets."; private static final String RESET_TO_OFFSET_DOC = "Reset offsets to a specific offset."; From 78aed575f6909b49301a6d672af89c9d7da75a09 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Mon, 2 Jun 2025 12:44:00 +0200 Subject: [PATCH 08/26] reset offsets of src topics --- .../tools/streams/StreamsGroupCommand.java | 81 ++++++++++++++++++- .../streams/ResetStreamsGroupOffsetTest.java | 12 ++- 2 files changed, 90 insertions(+), 3 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index a350180c0a4dc..82ec993ca0694 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -18,6 +18,7 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.DeleteTopicsResult; import org.apache.kafka.clients.admin.DescribeStreamsGroupsResult; import org.apache.kafka.clients.admin.GroupListing; import org.apache.kafka.clients.admin.ListGroupsOptions; @@ -37,6 +38,7 @@ import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.GroupIdNotFoundException; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.requests.ListOffsetsResponse; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.server.util.CommandLineUtils; @@ -403,8 +405,17 @@ Map getOffsets(StreamsGroupDescription description) Map getCommittedOffsets(String groupId) { try { - return adminClient.listStreamsGroupOffsets( - Map.of(groupId, new ListStreamsGroupOffsetsSpec())).partitionsToOffsetAndMetadata(groupId).get(); + var sourceTopics = adminClient.describeStreamsGroups(List.of(groupId)) + .all().get().get(groupId) + .subtopologies().stream() + .flatMap(subtopology -> subtopology.sourceTopics().stream()) + .collect(Collectors.toSet()); + + var allTopicPartitions = adminClient.listStreamsGroupOffsets(Map.of(groupId, new ListStreamsGroupOffsetsSpec())) + .partitionsToOffsetAndMetadata(groupId).get(); + + allTopicPartitions.keySet().removeIf(tp -> !sourceTopics.contains(tp.topic())); + return allTopicPartitions; } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } @@ -424,7 +435,17 @@ Map> resetOffsets() { switch (state) { case "Empty": case "Dead": + // reset offsets in source topics result.put(groupId, resetOffsetsForInactiveGroup(groupId)); + // delete internal topics + List internalTopics = retrieveInternalTopics(List.of(groupId)).get(groupId); + if (internalTopics != null && !internalTopics.isEmpty()) { + try { + adminClient.deleteTopics(internalTopics).all().get(); + } catch (InterruptedException | ExecutionException e) { + printError("Deleting internal topics for group '" + groupId + "' failed due to " + e.getMessage(), Optional.of(e)); + } + } break; default: printError("Assignments can only be reset if the group '" + groupId + "' is inactive, but the current state is " + state + ".", Optional.empty()); @@ -444,6 +465,50 @@ Map> resetOffsets() { return result; } + // Visibility for testing + Map> retrieveInternalTopics(List groupIds) { + Map> groupToInternalTopics = new HashMap<>(); + try { + Map descriptionMap = adminClient.describeStreamsGroups(groupIds).all().get(); + for (StreamsGroupDescription description : descriptionMap.values()) { + + List nonInternalTopics = description.subtopologies().stream() + .flatMap(subtopology -> Stream.concat( + subtopology.sourceTopics().stream(), + subtopology.repartitionSinkTopics().stream())) + .distinct() + .toList(); + + + List internalTopics = description.subtopologies().stream() + .flatMap(subtopology -> Stream.concat( + subtopology.repartitionSourceTopics().keySet().stream(), + subtopology.stateChangelogTopics().keySet().stream())) + .filter(topic -> !nonInternalTopics.contains(topic)) + .collect(Collectors.toList()); + internalTopics.removeIf(topic -> { + if (!isInferredInternalTopic(topic, description.groupId())) { + printError("The internal topic '" + topic + "' is not inferred as internal " + + "and thus will not be deleted with the group '" + description.groupId() + "'.", Optional.empty()); + return true; + } + return false; + }); + if (!internalTopics.isEmpty()) { + groupToInternalTopics.put(description.groupId(), internalTopics); + } + } + } catch (InterruptedException | ExecutionException e) { + if (e.getCause() instanceof UnsupportedVersionException) { + printError("Retrieving internal topics is not supported by the broker version. " + + "Use 'kafka-topics.sh' to list and delete the group's internal topics.", Optional.of(e.getCause())); + } else { + printError("Retrieving internal topics failed due to " + e.getMessage(), Optional.of(e)); + } + } + return groupToInternalTopics; + } + private Map resetOffsetsForInactiveGroup(String groupId) { try { Collection partitionsToReset = getPartitionsToReset(groupId); @@ -816,6 +881,18 @@ private Map getLogOffsets(Collection inputStream2 = builder.stream(topic2); final AtomicInteger recordCount = new AtomicInteger(0); +// +// final KTable valueCounts = inputStream1.merge(inputStream2) +// .groupByKey() +// .aggregate( +// () -> "()", +// (key, value, aggregate) -> aggregate + ",(" + key + ": " + value + ")", +// Materialized.as("aggregated_value")); final KTable valueCounts = inputStream1.merge(inputStream2) - .groupByKey() + // Explicit repartition step with a custom internal topic name + .groupBy((key, value) -> key, Grouped.with(Serdes.String(), Serdes.String())) .aggregate( () -> "()", (key, value, aggregate) -> aggregate + ",(" + key + ": " + value + ")", From a5fe8c8f3b664ca09448624907b99b65eb94bedd Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Tue, 3 Jun 2025 07:50:00 +0200 Subject: [PATCH 09/26] backup --- .../org/apache/kafka/tools/OffsetsUtils.java | 4 + .../tools/streams/StreamsGroupCommand.java | 78 ++++++++++--- .../streams/StreamsGroupCommandOptions.java | 12 ++ .../streams/ResetStreamsGroupOffsetTest.java | 104 ++++++++++++------ 4 files changed, 149 insertions(+), 49 deletions(-) create mode 100644 tools/src/main/java/org/apache/kafka/tools/OffsetsUtils.java diff --git a/tools/src/main/java/org/apache/kafka/tools/OffsetsUtils.java b/tools/src/main/java/org/apache/kafka/tools/OffsetsUtils.java new file mode 100644 index 0000000000000..6541cb67b6ac7 --- /dev/null +++ b/tools/src/main/java/org/apache/kafka/tools/OffsetsUtils.java @@ -0,0 +1,4 @@ +package org.apache.kafka.tools; + +public class OffsetsUtils { +} diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index 82ec993ca0694..59d99b39a2085 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.admin.Admin; -import org.apache.kafka.clients.admin.DeleteTopicsResult; import org.apache.kafka.clients.admin.DescribeStreamsGroupsResult; import org.apache.kafka.clients.admin.GroupListing; import org.apache.kafka.clients.admin.ListGroupsOptions; @@ -38,10 +37,12 @@ import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.GroupIdNotFoundException; +import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.requests.ListOffsetsResponse; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.server.util.CommandLineUtils; +import org.apache.kafka.tools.consumer.group.ConsumerGroupCommand; import org.apache.kafka.tools.consumer.group.CsvUtils; import com.fasterxml.jackson.core.JsonProcessingException; @@ -421,9 +422,30 @@ Map getCommittedOffsets(String groupId) { } } + private List filterExistingGroupTopics(String groupId, List topicPartitions) { + try { + var allTopicPartitions = adminClient.listStreamsGroupOffsets(Map.of(groupId, new ListStreamsGroupOffsetsSpec())) + .partitionsToOffsetAndMetadata(groupId).get(); + boolean allPresent = topicPartitions.stream().allMatch(allTopicPartitions::containsKey); + if (!allPresent) { + printError("One or more topics are not part of the group '" + groupId + "'.", Optional.empty()); + return Collections.emptyList(); + } + return topicPartitions; + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + + Map> resetOffsets() { + // Dry-run is the default behavior if --execute is not specified + boolean dryRun = opts.options.has(opts.dryRunOpt) || !opts.options.has(opts.executeOpt); + Map> result = new HashMap<>(); - List groupIds = listStreamsGroups(); + List groupIds = opts.options.has(opts.allGroupsOpt) + ? listStreamsGroups() + : opts.options.valuesOf(opts.groupOpt); if (!groupIds.isEmpty()) { Map> streamsGroups = adminClient.describeStreamsGroups( groupIds @@ -436,14 +458,23 @@ Map> resetOffsets() { case "Empty": case "Dead": // reset offsets in source topics - result.put(groupId, resetOffsetsForInactiveGroup(groupId)); + result.put(groupId, resetOffsetsForInactiveGroup(groupId, dryRun)); // delete internal topics - List internalTopics = retrieveInternalTopics(List.of(groupId)).get(groupId); - if (internalTopics != null && !internalTopics.isEmpty()) { - try { - adminClient.deleteTopics(internalTopics).all().get(); - } catch (InterruptedException | ExecutionException e) { - printError("Deleting internal topics for group '" + groupId + "' failed due to " + e.getMessage(), Optional.of(e)); + if (!dryRun) { + List internalTopics = retrieveInternalTopics(List.of(groupId)).get(groupId); + if (internalTopics != null && !internalTopics.isEmpty()) { + try { + adminClient.deleteTopics(internalTopics).all().get(); + } catch (InterruptedException | ExecutionException e) { + if (e.getCause() instanceof UnknownTopicOrPartitionException) { + printError("Deleting internal topics for group '" + groupId + "' failed because the topics do not exist.", Optional.empty()); + } else if (e.getCause() instanceof UnsupportedVersionException) { + printError("Deleting internal topics is not supported by the broker version. " + + "Use 'kafka-topics.sh' to delete the group's internal topics.", Optional.of(e.getCause())); + } else { + printError("Deleting internal topics for group '" + groupId + "' failed due to " + e.getMessage(), Optional.of(e)); + } + } } } break; @@ -455,7 +486,7 @@ Map> resetOffsets() { throw new RuntimeException(ie); } catch (ExecutionException ee) { if (ee.getCause() instanceof GroupIdNotFoundException) { - result.put(groupId, resetOffsetsForInactiveGroup(groupId)); + result.put(groupId, resetOffsetsForInactiveGroup(groupId, dryRun)); } else { throw new RuntimeException(ee); } @@ -509,13 +540,10 @@ Map> retrieveInternalTopics(List groupIds) { return groupToInternalTopics; } - private Map resetOffsetsForInactiveGroup(String groupId) { + private Map resetOffsetsForInactiveGroup(String groupId, boolean dryRun) { try { Collection partitionsToReset = getPartitionsToReset(groupId); Map preparedOffsets = prepareOffsetsToReset(groupId, partitionsToReset); - - // Dry-run is the default behavior if --execute is not specified - boolean dryRun = opts.options.has(opts.dryRunOpt) || !opts.options.has(opts.executeOpt); if (!dryRun) { adminClient.alterStreamsGroupOffsets( groupId, @@ -541,7 +569,11 @@ private Collection getPartitionsToReset(String groupId) throws E return getCommittedOffsets(groupId).keySet(); } else if (opts.options.has(opts.topicOpt)) { List topics = opts.options.valuesOf(opts.topicOpt); - return parseTopicPartitionsToReset(topics); + + List partitions = parseTopicPartitionsToReset(topics); + // if the user specified topics that do not belong to this group, we filter them out + partitions = filterExistingGroupTopics(groupId, partitions); + return partitions; } else { if (!opts.options.has(opts.resetFromFileOpt)) CommandLineUtils.printUsageAndExit(opts.parser, "One of the reset scopes should be defined: --all-topics, --topic."); @@ -893,6 +925,22 @@ public static boolean matchesInternalTopicFormat(final String topicName) { || topicName.matches(".+-KTABLE-FK-JOIN-SUBSCRIPTION-RESPONSE-\\d+-topic"); } + List collectAllTopics(String groupId) { + try { + return adminClient.describeStreamsGroups(List.of(groupId)) + .all().get().get(groupId) + .subtopologies().stream() + .flatMap(subtopology -> Stream.of( + subtopology.sourceTopics().stream(), + subtopology.repartitionSinkTopics().stream(), + subtopology.repartitionSourceTopics().keySet().stream(), + subtopology.stateChangelogTopics().keySet().stream() + ).flatMap(s -> s)).distinct().collect(Collectors.toList()); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + interface LogOffsetResult { } private static class LogOffset implements LogOffsetResult { diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index 37f79bf84d2b7..fd496d6816dc1 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -38,6 +38,7 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public static final String BOOTSTRAP_SERVER_DOC = "REQUIRED: The server(s) to connect to."; public static final String GROUP_DOC = "The streams group we wish to act on."; + private static final String ALL_GROUPS_DOC = "Apply to all streams groups."; private static final String TOPIC_DOC = "The topic whose streams group information should be deleted or topic whose should be included in the reset offset process. " + "In `reset-offsets` case, partitions can be specified using this format: `topic1:0,1,2`, where 0,1,2 are the partition to be included in the process. " + "Reset-offsets also supports multiple topic inputs."; @@ -79,6 +80,7 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { final OptionSpec allTopicsOpt; public final OptionSpec listOpt; public final OptionSpec describeOpt; + final OptionSpec allGroupsOpt; public final OptionSpec timeoutMsOpt; public final OptionSpec commandConfigOpt; public final OptionSpec stateOpt; @@ -99,6 +101,7 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public final OptionSpec verboseOpt; final Set> allResetOffsetScenarioOpts; + final Set> allGroupSelectionScopeOpts; public static StreamsGroupCommandOptions fromArgs(String[] args) { @@ -125,6 +128,7 @@ public StreamsGroupCommandOptions(String[] args) { allTopicsOpt = parser.accepts("all-topics", ALL_TOPICS_DOC); listOpt = parser.accepts("list", LIST_DOC); describeOpt = parser.accepts("describe", DESCRIBE_DOC); + allGroupsOpt = parser.accepts("all-groups", ALL_GROUPS_DOC); timeoutMsOpt = parser.accepts("timeout", TIMEOUT_MS_DOC) .availableIf(describeOpt) .withRequiredArg() @@ -178,6 +182,7 @@ public StreamsGroupCommandOptions(String[] args) { allResetOffsetScenarioOpts = new HashSet<>(Arrays.asList(resetToOffsetOpt, resetShiftByOpt, resetToDatetimeOpt, resetByDurationOpt, resetToEarliestOpt, resetToLatestOpt, resetToCurrentOpt, resetFromFileOpt)); + allGroupSelectionScopeOpts = new HashSet<>(Arrays.asList(groupOpt, allGroupsOpt)); } public void checkArgs() { @@ -186,6 +191,9 @@ public void checkArgs() { CommandLineUtils.checkRequiredArgs(parser, options, bootstrapServerOpt); if (options.has(describeOpt)) { + if (!options.has(groupOpt) && !options.has(allGroupsOpt)) + CommandLineUtils.printUsageAndExit(parser, + "Option " + describeOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); List> mutuallyExclusiveOpts = Arrays.asList(membersOpt, offsetsOpt, stateOpt); if (mutuallyExclusiveOpts.stream().mapToInt(o -> options.has(o) ? 1 : 0).sum() > 1) { CommandLineUtils.printUsageAndExit(parser, @@ -199,6 +207,10 @@ public void checkArgs() { LOGGER.debug("Option " + timeoutMsOpt + " is applicable only when " + describeOpt + " is used."); } + if (!options.has(groupOpt) && !options.has(allGroupsOpt)) + CommandLineUtils.printUsageAndExit(parser, + "Option " + resetOffsetsOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); + if (options.has(resetOffsetsOpt)) { if (options.has(dryRunOpt) && options.has(executeOpt)) CommandLineUtils.printUsageAndExit(parser, "Option " + resetOffsetsOpt + " only accepts one of " + executeOpt + " and " + dryRunOpt); diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java index c27fb342601f7..4f4956d446a29 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java @@ -26,7 +26,6 @@ import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; import org.apache.kafka.streams.GroupProtocol; import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.KeyValueTimestamp; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.StreamsConfig; @@ -110,15 +109,15 @@ public static void closeCluster() { @Test public void testResetWithUnrecognizedOption() { - String[] args = new String[]{"--unrecognized-option", "--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-topics", "--to-offset", "5"}; + String[] args = new String[]{"--unrecognized-option", "--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-group", "--all-topics", "--to-offset", "5"}; assertThrows(OptionException.class, () -> getStreamsGroupService(args)); } @Test public void testResetOffset() throws Exception { - final String appId = generateRandomAppId(); - final String topic1 = generateRandomTopic(); - final String topic2 = generateRandomTopic(); + final String appId = "app2";//generateRandomAppId(); + final String topic1 = "test2-in";//generateRandomTopic(); + final String topic2 = "test2-out";//generateRandomTopic(); final int numOfPartitions = 2; String[] args; produceConsumeShutdown(appId, topic1, topic2, RECORD_TOTAL * numOfPartitions * 2); @@ -126,17 +125,17 @@ public void testResetOffset() throws Exception { produceMessagesOnTwoPartitions(RECORD_TOTAL, topic2); /////////////////////////////////////////////// Specific topic (--topic topic1) //////////////////////////////////////////////// // reset to specific offset, offset already on 10 - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-offset", "5"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-offset", "5"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 10L, 0, 1); resetForNextTest(appId, 10L, topic1); // reset to specific offset when after end offset, offset already on 10 - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-offset", "30"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-offset", "30"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 20L, 10L, 0, 1); // reset to specific offset when before begin offset, offset already on 20 - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-offset", "-30"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-offset", "-30"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 20L, 0, 1); resetForNextTest(appId, 10L, topic1); @@ -144,51 +143,51 @@ public void testResetOffset() throws Exception { // reset to specific date time DateTimeFormatter format = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS"); LocalDateTime dateTime = now().minusDays(1); - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-datetime", format.format(dateTime)}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-datetime", format.format(dateTime)}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 10L, 0, 1); resetForNextTest(appId, 10L, topic1); // reset by duration to earliest - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--by-duration", "PT5M"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--by-duration", "PT5M"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 10L, 0, 1); resetForNextTest(appId, 10L, topic1); // reset to earliest - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-earliest"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-earliest"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 10L, 0, 1); resetForNextTest(appId, 10L, topic1); // reset to latest - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-latest"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-latest"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 20L, 10L, 0, 1); resetForNextTest(appId, 5L, topic1); // reset to current - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-current"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-current"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 5L, 0, 1); // reset offset shift+. The current offset is 5, as of the prev test is executed (by --execute) - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "3"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--shift-by", "3"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 8L, 5L, 0, 1); // reset offset shift-. The current offset is 8, as of the prev test is executed (by --execute) - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "-3"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--shift-by", "-3"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 8L, 0, 1); // reset offset shift by lower than earliest. The current offset is 5, as of the prev test is executed (by --execute) - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "-150"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--shift-by", "-150"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 5L, 0, 1); // reset offset shift by higher than latest. The current offset is 0, as of the prev test is executed (by --execute) - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--shift-by", "150"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--shift-by", "150"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 20L, 0L, 0, 1); // export to file - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--to-offset", "5", "--export"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-offset", "5", "--export"}; File file = TestUtils.tempFile("reset", ".csv"); Map exp = Map.of(new TopicPartition(topic1, 0), 5L, new TopicPartition(topic1, 1), 5L); try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { @@ -197,7 +196,7 @@ public void testResetOffset() throws Exception { assertEquals(exp, toOffsetMap(exportedOffsets.get(appId))); } - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--from-file", file.getCanonicalPath()}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--from-file", file.getCanonicalPath()}; try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { Map> importedOffsets = service.resetOffsets(); assertEquals(exp, toOffsetMap(importedOffsets.get(appId))); @@ -207,13 +206,13 @@ public void testResetOffset() throws Exception { resetForNextTest(appId, 10L, topic1); // reset to specific offset - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1 + ":1", "--to-offset", "5"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1 + ":1", "--to-offset", "5"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 10L, 1); resetForNextTest(appId, 10L, topic1); // reset both partitions of topic1 and topic2:1 to specific offset - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--topic", topic2 + ":1", "--to-offset", "5"}; final Map expectedOffsets = Map.of( new TopicPartition(topic1, 0), 5L, @@ -236,19 +235,19 @@ public void testResetOffset() throws Exception { resetForNextTest(appId, 10L, topic1, topic2); // reset to specific offset - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-topics", "--to-offset", "5"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-topics", "--to-offset", "5"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, topic2, 5L, 10L); resetForNextTest(appId, 10L, topic1, topic2); // reset to specific offset with two --topic options - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--topic", topic2, "--to-offset", "5"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--topic", topic2, "--to-offset", "5"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, topic2, 5L, 10L); resetForNextTest(appId, 10L, topic1, topic2); // export to file - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-topics", "--to-offset", "5", "--export"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-topics", "--to-offset", "5", "--export"}; file = TestUtils.tempFile("reset-all", ".csv"); exp = Map.of(new TopicPartition(topic1, 0), 5L, new TopicPartition(topic1, 1), 5L, @@ -260,7 +259,7 @@ public void testResetOffset() throws Exception { assertEquals(exp, toOffsetMap(exportedOffsets.get(appId))); } - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--topic", topic1, "--from-file", file.getCanonicalPath()}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--from-file", file.getCanonicalPath()}; try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { Map> importedOffsets = service.resetOffsets(); @@ -270,6 +269,23 @@ public void testResetOffset() throws Exception { adminClient.deleteTopics(List.of(topic1, topic2)).all().get(); } + @Test + public void testTopicsWhenResettingOffset() throws Exception { + final String appId = "app3";//generateRandomAppId(); + final String topic1 = "test3-in";//generateRandomTopic(); + final String topic2 = "test3-out";//generateRandomTopic(); + final int numOfPartitions = 2; + String[] args; + produceConsumeShutdown(appId, topic1, topic2, RECORD_TOTAL * numOfPartitions * 2); + produceMessagesOnTwoPartitions(RECORD_TOTAL, topic1); + produceMessagesOnTwoPartitions(RECORD_TOTAL, topic2); + + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-topics", "--to-offset", "5"}; + resetOffsetsAndAssertInternalTopicDeletionForDryRunAndExecute(args, appId); + + adminClient.deleteTopics(List.of(topic1, topic2)).all().get(); + } + private void resetForNextTest(String appId, long desiredOffset, String... topics) throws ExecutionException, InterruptedException { Map offsets = new HashMap<>(); for (String topic : topics) { @@ -345,17 +361,35 @@ private void resetOffsetsAndAssert(String[] args, partition -> expectedOffset )); Map> expectedResetResults = Map.of(appId, expectedOffetMap); - try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { resetOffsetsResultByGroup = convertOffsetsToLong(service.resetOffsets()); } // assert that the reset offsets are as expected assertEquals(expectedResetResults, resetOffsetsResultByGroup); - + assertEquals(expectedResetResults.values().size(), resetOffsetsResultByGroup.values().size()); // assert that the committed offsets are as expected AssertCommittedOffsets(appId, topic, expectedCommittedOffset, partitions); } + private void resetOffsetsAndAssertInternalTopicDeletion(String[] args, + String appId) throws InterruptedException { + final boolean executeMode = Arrays.asList(args).contains("--execute"); + List internalTopics; + List allTopics; + try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { + internalTopics = service.retrieveInternalTopics(List.of(appId)).get(appId); + allTopics = service.collectAllTopics(appId); + service.resetOffsets(); + } + + // assert that the internal topics are deleted in --execute mode and not in --dry-run mode + allTopics.addAll(List.of("__consumer_offsets", "__transaction_state")); + if (executeMode) { + allTopics.removeAll(internalTopics); + } + cluster.waitForRemainingTopics(30000, allTopics.toArray(new String[0])); + } + /** * Resets offsets for two topics and verifies the results. * @@ -396,6 +430,7 @@ private void resetOffsetsAndAssert(String[] args, } // assert that the reset offsets are as expected assertEquals(expectedResetResults, resetOffsetsResultByGroup); + assertEquals(expectedResetResults.values().size(), resetOffsetsResultByGroup.values().size()); // assert that the committed offsets are as expected AssertCommittedOffsets(appId, topic1, topic2, expectedCommittedOffset); } @@ -430,6 +465,7 @@ private void resetOffsetsAndAssert(String[] args, } // assert that the reset offsets are as expected assertEquals(expectedOffsets, resetOffsetsResult); + assertEquals(expectedOffsets.values().size(), resetOffsetsResult.values().size()); // assert that the committed offsets are as expected assertEquals(expectedCommittedOffsets, committedOffsets(topics, appId)); } @@ -445,6 +481,13 @@ private void resetOffsetsAndAssertForDryRunAndExecute(String[] args, resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic, expectedOffset, expectedOffset, partitions); } + private void resetOffsetsAndAssertInternalTopicDeletionForDryRunAndExecute(String[] args, + String appId) throws InterruptedException { + resetOffsetsAndAssertInternalTopicDeletion(args, appId); + resetOffsetsAndAssertInternalTopicDeletion(addTo(args, "--dry-run"), appId); + resetOffsetsAndAssertInternalTopicDeletion(addTo(args, "--execute"), appId); + } + private void resetOffsetsAndAssertForDryRunAndExecute(String[] args, String appId, String topic1, @@ -528,13 +571,6 @@ private void produceConsumeShutdown(String appId, String topic1, String topic2, final KStream inputStream2 = builder.stream(topic2); final AtomicInteger recordCount = new AtomicInteger(0); -// -// final KTable valueCounts = inputStream1.merge(inputStream2) -// .groupByKey() -// .aggregate( -// () -> "()", -// (key, value, aggregate) -> aggregate + ",(" + key + ": " + value + ")", -// Materialized.as("aggregated_value")); final KTable valueCounts = inputStream1.merge(inputStream2) // Explicit repartition step with a custom internal topic name From 12c9f078ad9d52d3273393e7a71f2876446b5009 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Tue, 3 Jun 2025 09:33:05 +0200 Subject: [PATCH 10/26] address reviews --- .../java/org/apache/kafka/tools/OffsetsUtils.java | 4 ---- .../kafka/tools/streams/StreamsGroupCommand.java | 1 - .../tools/streams/StreamsGroupCommandOptions.java | 6 +++--- .../tools/streams/ResetStreamsGroupOffsetTest.java | 12 ++++++------ 4 files changed, 9 insertions(+), 14 deletions(-) delete mode 100644 tools/src/main/java/org/apache/kafka/tools/OffsetsUtils.java diff --git a/tools/src/main/java/org/apache/kafka/tools/OffsetsUtils.java b/tools/src/main/java/org/apache/kafka/tools/OffsetsUtils.java deleted file mode 100644 index 6541cb67b6ac7..0000000000000 --- a/tools/src/main/java/org/apache/kafka/tools/OffsetsUtils.java +++ /dev/null @@ -1,4 +0,0 @@ -package org.apache.kafka.tools; - -public class OffsetsUtils { -} diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index 59d99b39a2085..cfe1dbaa20367 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -42,7 +42,6 @@ import org.apache.kafka.common.requests.ListOffsetsResponse; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.server.util.CommandLineUtils; -import org.apache.kafka.tools.consumer.group.ConsumerGroupCommand; import org.apache.kafka.tools.consumer.group.CsvUtils; import com.fasterxml.jackson.core.JsonProcessingException; diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index fd496d6816dc1..33034dd0ae543 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -207,9 +207,9 @@ public void checkArgs() { LOGGER.debug("Option " + timeoutMsOpt + " is applicable only when " + describeOpt + " is used."); } - if (!options.has(groupOpt) && !options.has(allGroupsOpt)) - CommandLineUtils.printUsageAndExit(parser, - "Option " + resetOffsetsOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); +// if (!options.has(groupOpt) && !options.has(allGroupsOpt)) +// CommandLineUtils.printUsageAndExit(parser, +// "Option " + resetOffsetsOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); if (options.has(resetOffsetsOpt)) { if (options.has(dryRunOpt) && options.has(executeOpt)) diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java index 4f4956d446a29..3f4246d6e45be 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java @@ -115,9 +115,9 @@ public void testResetWithUnrecognizedOption() { @Test public void testResetOffset() throws Exception { - final String appId = "app2";//generateRandomAppId(); - final String topic1 = "test2-in";//generateRandomTopic(); - final String topic2 = "test2-out";//generateRandomTopic(); + final String appId = generateRandomAppId(); + final String topic1 = generateRandomTopic(); + final String topic2 = generateRandomTopic(); final int numOfPartitions = 2; String[] args; produceConsumeShutdown(appId, topic1, topic2, RECORD_TOTAL * numOfPartitions * 2); @@ -271,9 +271,9 @@ public void testResetOffset() throws Exception { @Test public void testTopicsWhenResettingOffset() throws Exception { - final String appId = "app3";//generateRandomAppId(); - final String topic1 = "test3-in";//generateRandomTopic(); - final String topic2 = "test3-out";//generateRandomTopic(); + final String appId = generateRandomAppId(); + final String topic1 = generateRandomTopic(); + final String topic2 = generateRandomTopic(); final int numOfPartitions = 2; String[] args; produceConsumeShutdown(appId, topic1, topic2, RECORD_TOTAL * numOfPartitions * 2); From 35d319ef4a385311db0c73181fceacc011003279 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Tue, 3 Jun 2025 10:06:24 +0200 Subject: [PATCH 11/26] fix utests --- .../tools/streams/StreamsGroupCommand.java | 11 +++------- .../streams/StreamsGroupCommandOptions.java | 14 +++++++----- .../streams/StreamsGroupCommandTest.java | 22 +++++++++++++++---- 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index cfe1dbaa20367..d90f20dad1443 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -502,19 +502,14 @@ Map> retrieveInternalTopics(List groupIds) { Map descriptionMap = adminClient.describeStreamsGroups(groupIds).all().get(); for (StreamsGroupDescription description : descriptionMap.values()) { - List nonInternalTopics = description.subtopologies().stream() - .flatMap(subtopology -> Stream.concat( - subtopology.sourceTopics().stream(), - subtopology.repartitionSinkTopics().stream())) - .distinct() - .toList(); - + List sourceTopics = description.subtopologies().stream() + .flatMap(subtopology -> subtopology.sourceTopics().stream()).toList(); List internalTopics = description.subtopologies().stream() .flatMap(subtopology -> Stream.concat( subtopology.repartitionSourceTopics().keySet().stream(), subtopology.stateChangelogTopics().keySet().stream())) - .filter(topic -> !nonInternalTopics.contains(topic)) + .filter(topic -> !sourceTopics.contains(topic)) .collect(Collectors.toList()); internalTopics.removeIf(topic -> { if (!isInferredInternalTopic(topic, description.groupId())) { diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index 33034dd0ae543..3e1dfd6b429f7 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -207,10 +207,16 @@ public void checkArgs() { LOGGER.debug("Option " + timeoutMsOpt + " is applicable only when " + describeOpt + " is used."); } -// if (!options.has(groupOpt) && !options.has(allGroupsOpt)) -// CommandLineUtils.printUsageAndExit(parser, -// "Option " + resetOffsetsOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); + if (!options.has(groupOpt) && !options.has(allGroupsOpt)) + CommandLineUtils.printUsageAndExit(parser, + "Option " + resetOffsetsOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); + checkOffsetResetArgs(); + + CommandLineUtils.checkInvalidArgs(parser, options, listOpt, membersOpt, offsetsOpt); + } + + private void checkOffsetResetArgs() { if (options.has(resetOffsetsOpt)) { if (options.has(dryRunOpt) && options.has(executeOpt)) CommandLineUtils.printUsageAndExit(parser, "Option " + resetOffsetsOpt + " only accepts one of " + executeOpt + " and " + dryRunOpt); @@ -231,7 +237,5 @@ public void checkArgs() { CommandLineUtils.checkInvalidArgs(parser, options, resetShiftByOpt, minus(allResetOffsetScenarioOpts, resetShiftByOpt)); CommandLineUtils.checkInvalidArgs(parser, options, resetFromFileOpt, minus(allResetOffsetScenarioOpts, resetFromFileOpt)); } - - CommandLineUtils.checkInvalidArgs(parser, options, listOpt, membersOpt, offsetsOpt); } } diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java index 3f499a0ba672b..984bcf423821d 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java @@ -174,6 +174,8 @@ public void testDescribeStreamsGroups() throws Exception { @Test public void testDescribeStreamsGroupsGetOffsets() throws Exception { + String groupId = "group1"; + ListOffsetsResult startOffset = mock(ListOffsetsResult.class); Map startOffsetResultMap = new HashMap<>(); startOffsetResultMap.put(new TopicPartition("topic1", 0), new ListOffsetsResult.ListOffsetsResultInfo(10, -1, Optional.empty())); @@ -194,13 +196,22 @@ public void testDescribeStreamsGroupsGetOffsets() throws Exception { when(ADMIN_CLIENT.listStreamsGroupOffsets(ArgumentMatchers.anyMap())).thenReturn(result); when(result.partitionsToOffsetAndMetadata(ArgumentMatchers.anyString())).thenReturn(KafkaFuture.completedFuture(committedOffsetsMap)); + // Java + DescribeStreamsGroupsResult describeResult = mock(DescribeStreamsGroupsResult.class); + StreamsGroupDescription groupDescription = mock(StreamsGroupDescription.class); + StreamsGroupSubtopologyDescription subtopology = mock(StreamsGroupSubtopologyDescription.class); + when(ADMIN_CLIENT.describeStreamsGroups(List.of(groupId))).thenReturn(describeResult); + when(describeResult.all()).thenReturn(KafkaFuture.completedFuture(Map.of(groupId, groupDescription))); + when(groupDescription.subtopologies()).thenReturn(List.of(subtopology)); + when(subtopology.sourceTopics()).thenReturn(List.of("topic1")); + StreamsGroupMemberDescription description = new StreamsGroupMemberDescription("foo", 0, Optional.empty(), Optional.empty(), "bar", "baz", 0, "qux", Optional.empty(), Map.of(), List.of(), List.of(), new StreamsGroupMemberAssignment(List.of(), List.of(), List.of()), new StreamsGroupMemberAssignment(List.of(), List.of(), List.of()), false); StreamsGroupDescription x = new StreamsGroupDescription( - "group1", + groupId, 0, 0, 0, @@ -270,7 +281,6 @@ public void testAdminRequestsForResetOffsets() { List args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--reset-offsets", "--topic", "topic1", "--to-latest")); List topics = List.of("topic1"); - when(ADMIN_CLIENT.describeStreamsGroups(List.of(groupId))) .thenReturn(describeStreamsResult(groupId, GroupState.DEAD)); when(ADMIN_CLIENT.describeTopics(topics)) @@ -278,6 +288,11 @@ public void testAdminRequestsForResetOffsets() { when(ADMIN_CLIENT.listOffsets(any())) .thenReturn(listOffsetsResult()); when(ADMIN_CLIENT.listGroups(any())).thenReturn(listGroupResult(groupId)); + ListStreamsGroupOffsetsResult result = mock(ListStreamsGroupOffsetsResult.class); + Map committedOffsetsMap = new HashMap<>(); + committedOffsetsMap.put(new TopicPartition("topic1", 0), mock(OffsetAndMetadata.class)); + when(ADMIN_CLIENT.listStreamsGroupOffsets(ArgumentMatchers.anyMap())).thenReturn(result); + when(result.partitionsToOffsetAndMetadata(ArgumentMatchers.anyString())).thenReturn(KafkaFuture.completedFuture(committedOffsetsMap)); StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args.toArray(new String[0])); Map> resetResult = service.resetOffsets(); @@ -289,8 +304,7 @@ public void testAdminRequestsForResetOffsets() { verify(ADMIN_CLIENT, times(1)).describeStreamsGroups(List.of(groupId)); verify(ADMIN_CLIENT, times(1)).describeTopics(topics); verify(ADMIN_CLIENT, times(1)).listOffsets(any()); - verify(ADMIN_CLIENT, times(1)).listGroups(any()); - + verify(ADMIN_CLIENT, times(1)).listStreamsGroupOffsets(any()); service.close(); } From cd88721c72f40114960f30531933c14f938087a7 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Tue, 3 Jun 2025 11:24:06 +0200 Subject: [PATCH 12/26] throw exception --- .../org/apache/kafka/tools/streams/StreamsGroupCommand.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index d90f20dad1443..55266265ba20a 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -783,8 +783,7 @@ private Map> parseResetPlan(Strin isOldCsvFormat = true; } } catch (IOException e) { - e.printStackTrace(); - // Ignore. + throw new RuntimeException("Invalid CSV format in reset plan file: " + e.getMessage()); } Map> dataMap = new HashMap<>(); From 125f33a69614ca7fb39170eb09642c11420d7e61 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Wed, 4 Jun 2025 18:11:16 +0200 Subject: [PATCH 13/26] topic to input-topic --- .../kafka/tools/streams/StreamsGroupCommand.java | 6 +++--- .../tools/streams/StreamsGroupCommandOptions.java | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index 55266265ba20a..ec2188da701fc 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -559,10 +559,10 @@ private Map resetOffsetsForInactiveGroup(Stri } private Collection getPartitionsToReset(String groupId) throws ExecutionException, InterruptedException { - if (opts.options.has(opts.allTopicsOpt)) { + if (opts.options.has(opts.allInputTopicsOpt)) { return getCommittedOffsets(groupId).keySet(); - } else if (opts.options.has(opts.topicOpt)) { - List topics = opts.options.valuesOf(opts.topicOpt); + } else if (opts.options.has(opts.inputTopicOpt)) { + List topics = opts.options.valuesOf(opts.inputTopicOpt); List partitions = parseTopicPartitionsToReset(topics); // if the user specified topics that do not belong to this group, we filter them out diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index 3e1dfd6b429f7..a2095b3a47f90 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -39,10 +39,10 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public static final String BOOTSTRAP_SERVER_DOC = "REQUIRED: The server(s) to connect to."; public static final String GROUP_DOC = "The streams group we wish to act on."; private static final String ALL_GROUPS_DOC = "Apply to all streams groups."; - private static final String TOPIC_DOC = "The topic whose streams group information should be deleted or topic whose should be included in the reset offset process. " + + private static final String INPUT_TOPIC_DOC = "The input topic whose streams group information should be deleted or topic whose should be included in the reset offset process. " + "In `reset-offsets` case, partitions can be specified using this format: `topic1:0,1,2`, where 0,1,2 are the partition to be included in the process. " + "Reset-offsets also supports multiple topic inputs."; - private static final String ALL_TOPICS_DOC = "Consider all topics assigned to a group in the `reset-offsets` process."; + private static final String ALL_INPUT_TOPICS_DOC = "Consider all input topics assigned to a group in the `reset-offsets` process."; public static final String LIST_DOC = "List all streams groups."; public static final String DESCRIBE_DOC = "Describe streams group and list offset lag related to given group."; public static final String TIMEOUT_MS_DOC = "The timeout that can be set for some use cases. For example, it can be used when describing the group " + @@ -76,8 +76,8 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public final OptionSpec bootstrapServerOpt; public final OptionSpec groupOpt; - final OptionSpec topicOpt; - final OptionSpec allTopicsOpt; + final OptionSpec inputTopicOpt; + final OptionSpec allInputTopicsOpt; public final OptionSpec listOpt; public final OptionSpec describeOpt; final OptionSpec allGroupsOpt; @@ -121,11 +121,11 @@ public StreamsGroupCommandOptions(String[] args) { .withRequiredArg() .describedAs("streams group") .ofType(String.class); - topicOpt = parser.accepts("topic", TOPIC_DOC) + inputTopicOpt = parser.accepts("topic", INPUT_TOPIC_DOC) .withRequiredArg() - .describedAs("topic") + .describedAs("input-topic") .ofType(String.class); - allTopicsOpt = parser.accepts("all-topics", ALL_TOPICS_DOC); + allInputTopicsOpt = parser.accepts("all-input-topics", ALL_INPUT_TOPICS_DOC); listOpt = parser.accepts("list", LIST_DOC); describeOpt = parser.accepts("describe", DESCRIBE_DOC); allGroupsOpt = parser.accepts("all-groups", ALL_GROUPS_DOC); From 626b0df537ec34a54c3ce867512f94bc4d093d98 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Wed, 4 Jun 2025 19:10:16 +0200 Subject: [PATCH 14/26] rebase --- .../tools/streams/StreamsGroupCommand.java | 164 +++++++++++++++++- .../streams/StreamsGroupCommandOptions.java | 35 ++-- .../streams/ResetStreamsGroupOffsetTest.java | 66 ++++--- .../streams/StreamsGroupCommandTest.java | 137 +++++++++++++-- 4 files changed, 352 insertions(+), 50 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index f5e7f92228370..05c87cd851861 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -17,7 +17,10 @@ package org.apache.kafka.tools.streams; import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.admin.AbstractOptions; import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.DeleteStreamsGroupsOptions; +import org.apache.kafka.clients.admin.DeleteTopicsResult; import org.apache.kafka.clients.admin.DescribeStreamsGroupsResult; import org.apache.kafka.clients.admin.GroupListing; import org.apache.kafka.clients.admin.ListGroupsOptions; @@ -37,6 +40,7 @@ import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.GroupIdNotFoundException; +import org.apache.kafka.common.errors.GroupNotEmptyException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.requests.ListOffsetsResponse; @@ -58,6 +62,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -83,9 +88,14 @@ public static void main(String[] args) { opts.checkArgs(); // should have exactly one action - long numberOfActions = Stream.of(opts.listOpt, opts.describeOpt, opts.deleteOpt, opts.resetOffsetsOpt).filter(opts.options::has).count(); + long numberOfActions = Stream.of( + opts.listOpt, + opts.describeOpt, + opts.resetOffsetsOpt, + opts.deleteOpt + ).filter(opts.options::has).count(); if (numberOfActions != 1) - CommandLineUtils.printUsageAndExit(opts.parser, "Command must include exactly one action: --list, --describe, --delete, or -reset-offsets."); + CommandLineUtils.printUsageAndExit(opts.parser, "Command must include exactly one action: --list, --describe, --delete, or --reset-offsets."); run(opts); } catch (OptionException e) { @@ -106,6 +116,8 @@ public static void run(StreamsGroupCommandOptions opts) { System.out.println(exported); } else printOffsetsToReset(offsetsToReset); + } else if (opts.options.has(opts.deleteOpt)) { + streamsGroupService.deleteGroups(); } else { throw new IllegalArgumentException("Unknown action!"); } @@ -491,6 +503,139 @@ Map> resetOffsets() { return result; } + Map deleteGroups() { + List groupIds = opts.options.has(opts.allGroupsOpt) + ? new ArrayList<>(listStreamsGroups()) + : new ArrayList<>(opts.options.valuesOf(opts.groupOpt)); + + // pre admin call checks + Map failed = preAdminCallChecks(groupIds); + + groupIds.removeAll(failed.keySet()); + Map success = new HashMap<>(); + Map> internalTopics = new HashMap<>(); + Map internalTopicsDeletionFailures = new HashMap<>(); + if (!groupIds.isEmpty()) { + // retrieve internal topics before deleting groups + internalTopics = retrieveInternalTopics(groupIds); + + // delete streams groups + Map> groupsToDelete = adminClient.deleteStreamsGroups( + groupIds, + withTimeoutMs(new DeleteStreamsGroupsOptions()) + ).deletedGroups(); + + groupsToDelete.forEach((g, f) -> { + try { + f.get(); + success.put(g, null); + } catch (InterruptedException ie) { + failed.put(g, ie); + } catch (ExecutionException e) { + failed.put(g, e.getCause()); + } + }); + + // delete internal topics + if (!success.isEmpty()) { + for (String groupId : success.keySet()) { + List internalTopicsToDelete = internalTopics.get(groupId); + if (internalTopicsToDelete != null && !internalTopicsToDelete.isEmpty()) { + DeleteTopicsResult deleteTopicsResult = null; + try { + deleteTopicsResult = adminClient.deleteTopics(internalTopicsToDelete); + deleteTopicsResult.all().get(); + } catch (InterruptedException | ExecutionException e) { + if (deleteTopicsResult != null) { + deleteTopicsResult.topicNameValues().forEach((topic, future) -> { + try { + future.get(); + } catch (Exception topicException) { + System.out.println("Failed to delete internal topic: " + topic); + } + }); + } + internalTopicsDeletionFailures.put(groupId, e.getCause()); + } + } + } + } + } + + // display outcome messages based on the results + if (failed.isEmpty()) { + System.out.println("Deletion of requested streams groups (" + "'" + success.keySet().stream().map(Object::toString).collect(Collectors.joining("', '")) + "') was successful."); + } else { + printError("Deletion of some streams groups failed:", Optional.empty()); + failed.forEach((group, error) -> System.out.println("* Group '" + group + "' could not be deleted due to: " + error)); + + if (!success.isEmpty()) { + System.out.println("\nThese streams groups were deleted successfully: " + "'" + success.keySet().stream().map(Object::toString).collect(Collectors.joining("', '")) + "'."); + } + } + if (!internalTopics.keySet().isEmpty()) { + printInternalTopicErrors(internalTopicsDeletionFailures, success.keySet(), internalTopics.keySet()); + } + // for testing purpose: return all failures, including internal topics deletion failures + failed.putAll(success); + failed.putAll(internalTopicsDeletionFailures); + return failed; + } + + private Map preAdminCallChecks(List groupIds) { + List streamsGroupIds = listDetailedStreamsGroups(); + LinkedHashSet groupIdSet = new LinkedHashSet<>(groupIds); + + Map failed = new HashMap<>(); + + for (String groupId : groupIdSet) { + Optional listing = streamsGroupIds.stream().filter(item -> item.groupId().equals(groupId)).findAny(); + if (listing.isEmpty()) { + failed.put(groupId, new IllegalArgumentException("Group '" + groupId + "' does not exist or is not a streams group.")); + } else { + Optional groupState = listing.get().groupState(); + groupState.ifPresent(state -> { + if (state == GroupState.DEAD) { + failed.put(groupId, new IllegalStateException("Streams group '" + groupId + "' group state is DEAD.")); + } else if (state != GroupState.EMPTY) { + failed.put(groupId, new GroupNotEmptyException("Streams group '" + groupId + "' is not EMPTY.")); + } + }); + } + } + return failed; + } + + List listDetailedStreamsGroups() { + try { + ListGroupsResult result = adminClient.listGroups(new ListGroupsOptions() + .timeoutMs(opts.options.valueOf(opts.timeoutMsOpt).intValue()) + .withTypes(Set.of(GroupType.STREAMS))); + Collection listings = result.all().get(); + return listings.stream().toList(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + + private void printInternalTopicErrors(Map internalTopicsDeletionFailures, + Set deletedGroupIds, + Set groupIdsWithInternalTopics) { + if (!deletedGroupIds.isEmpty()) { + if (internalTopicsDeletionFailures.isEmpty()) { + List successfulGroups = deletedGroupIds.stream() + .filter(groupIdsWithInternalTopics::contains) + .collect(Collectors.toList()); + System.out.println("Deletion of associated internal topics of the streams groups ('" + + String.join("', '", successfulGroups) + "') was successful."); + } else { + System.out.println("Deletion of some associated internal topics failed:"); + internalTopicsDeletionFailures.forEach((group, error) -> + System.out.println("* Internal topics of the streams group '" + group + "' could not be deleted due to: " + error)); + } + } + } + // Visibility for testing Map> retrieveInternalTopics(List groupIds) { Map> groupToInternalTopics = new HashMap<>(); @@ -930,6 +1075,19 @@ List collectAllTopics(String groupId) { } } + Collection collectGroupMembers(String groupId) throws Exception { + return getDescribeGroup(groupId).members(); + } + + GroupState collectGroupState(String groupId) throws Exception { + return getDescribeGroup(groupId).groupState(); + } + + private > T withTimeoutMs(T options) { + int t = opts.options.valueOf(opts.timeoutMsOpt).intValue(); + return options.timeoutMs(t); + } + interface LogOffsetResult { } private static class LogOffset implements LogOffsetResult { @@ -1009,4 +1167,4 @@ protected Admin createAdminClient(Map configOverrides) throws IO public record OffsetsInfo(Optional currentOffset, Optional leaderEpoch, Long logEndOffset, Long lag) { } -} +} \ No newline at end of file diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index 69a2db9641594..144322fd9580a 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -39,13 +39,12 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public static final String BOOTSTRAP_SERVER_DOC = "REQUIRED: The server(s) to connect to."; public static final String GROUP_DOC = "The streams group we wish to act on."; private static final String ALL_GROUPS_DOC = "Apply to all streams groups."; - private static final String INPUT_TOPIC_DOC = "The input topic whose streams group information should be deleted or topic whose should be included in the reset offset process. " + + private static final String INPUT_TOPIC_DOC = "The input topic whose streams group information should be deleted or topic that should be included in the reset offset process. " + "In `reset-offsets` case, partitions can be specified using this format: `topic1:0,1,2`, where 0,1,2 are the partition to be included in the process. " + "Reset-offsets also supports multiple topic inputs."; - private static final String ALL_INPUT_TOPICS_DOC = "Consider all input topics assigned to a group in the `reset-offsets` process."; + private static final String ALL_INPUT_TOPICS_DOC = "Consider all topics assigned to a group in the `reset-offsets` process."; public static final String LIST_DOC = "List all streams groups."; public static final String DESCRIBE_DOC = "Describe streams group and list offset lag related to given group."; - private static final String ALL_GROUPS_DOC = "Apply to all streams groups."; private static final String DELETE_DOC = "Pass in groups to delete topic partition offsets and ownership information " + "over the entire streams group. For instance --group g1 --group g2"; public static final String TIMEOUT_MS_DOC = "The timeout that can be set for some use cases. For example, it can be used when describing the group " + @@ -79,12 +78,12 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public final OptionSpec bootstrapServerOpt; public final OptionSpec groupOpt; - final OptionSpec inputTopicOpt; - final OptionSpec allInputTopicsOpt; + public final OptionSpec inputTopicOpt; + public final OptionSpec allInputTopicsOpt; public final OptionSpec listOpt; public final OptionSpec describeOpt; + public final OptionSpec deleteOpt; final OptionSpec allGroupsOpt; - final OptionSpec deleteOpt; public final OptionSpec timeoutMsOpt; public final OptionSpec commandConfigOpt; public final OptionSpec stateOpt; @@ -104,6 +103,7 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public final OptionSpec exportOpt; public final OptionSpec verboseOpt; + final Set> allResetOffsetScenarioOpts; final Set> allGroupSelectionScopeOpts; final Set> allStreamsGroupLevelOpts; @@ -124,9 +124,9 @@ public StreamsGroupCommandOptions(String[] args) { .withRequiredArg() .describedAs("streams group") .ofType(String.class); - inputTopicOpt = parser.accepts("topic", INPUT_TOPIC_DOC) + inputTopicOpt = parser.accepts("input-topic", INPUT_TOPIC_DOC) .withRequiredArg() - .describedAs("input-topic") + .describedAs("topic") .ofType(String.class); allInputTopicsOpt = parser.accepts("all-input-topics", ALL_INPUT_TOPICS_DOC); listOpt = parser.accepts("list", LIST_DOC); @@ -143,6 +143,7 @@ public StreamsGroupCommandOptions(String[] args) { .withRequiredArg() .describedAs("command config property file") .ofType(String.class); + stateOpt = parser.accepts("state", STATE_DOC) .availableIf(listOpt, describeOpt) .withOptionalArg() @@ -178,14 +179,15 @@ public StreamsGroupCommandOptions(String[] args) { verboseOpt = parser.accepts("verbose", VERBOSE_DOC) .availableIf(describeOpt); - - allStreamsGroupLevelOpts = new HashSet<>(Arrays.asList(listOpt, describeOpt, deleteOpt)); - allGroupSelectionScopeOpts = new HashSet<>(Arrays.asList(groupOpt, allGroupsOpt)); + dryRunOpt = parser.accepts("dry-run", DRY_RUN_DOC); + executeOpt = parser.accepts("execute", EXECUTE_DOC); + exportOpt = parser.accepts("export", EXPORT_DOC); options = parser.parse(args); allResetOffsetScenarioOpts = new HashSet<>(Arrays.asList(resetToOffsetOpt, resetShiftByOpt, resetToDatetimeOpt, resetByDurationOpt, resetToEarliestOpt, resetToLatestOpt, resetToCurrentOpt, resetFromFileOpt)); allGroupSelectionScopeOpts = new HashSet<>(Arrays.asList(groupOpt, allGroupsOpt)); + allStreamsGroupLevelOpts = new HashSet<>(Arrays.asList(listOpt, describeOpt, deleteOpt)); } public void checkArgs() { @@ -210,13 +212,22 @@ public void checkArgs() { LOGGER.debug("Option " + timeoutMsOpt + " is applicable only when " + describeOpt + " is used."); } + if (options.has(resetOffsetsOpt)) { + if (!options.has(groupOpt) && !options.has(allGroupsOpt)) + CommandLineUtils.printUsageAndExit(parser, + "Option " + resetOffsetsOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); + } + if (options.has(deleteOpt)) { if (!options.has(groupOpt) && !options.has(allGroupsOpt)) CommandLineUtils.printUsageAndExit(parser, "Option " + deleteOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); } + checkOffsetResetArgs(); + CommandLineUtils.checkInvalidArgs(parser, options, listOpt, membersOpt, offsetsOpt); + CommandLineUtils.checkInvalidArgs(parser, options, groupOpt, minus(allStreamsGroupLevelOpts, describeOpt, deleteOpt)); } private void checkOffsetResetArgs() { @@ -241,4 +252,4 @@ private void checkOffsetResetArgs() { CommandLineUtils.checkInvalidArgs(parser, options, resetFromFileOpt, minus(allResetOffsetScenarioOpts, resetFromFileOpt)); } } -} +} \ No newline at end of file diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java index 3f4246d6e45be..14d96f674c7bb 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java @@ -23,6 +23,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.utils.Exit; import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; import org.apache.kafka.streams.GroupProtocol; import org.apache.kafka.streams.KafkaStreams; @@ -58,6 +59,7 @@ import java.util.Optional; import java.util.Properties; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -67,7 +69,9 @@ import static java.util.Arrays.asList; import static java.util.stream.Collectors.toMap; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; @Timeout(600) @Tag("integration") @@ -109,10 +113,26 @@ public static void closeCluster() { @Test public void testResetWithUnrecognizedOption() { - String[] args = new String[]{"--unrecognized-option", "--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-group", "--all-topics", "--to-offset", "5"}; + String[] args = new String[]{"--unrecognized-option", "--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-groups", "--all-input-topics", "--to-offset", "5"}; assertThrows(OptionException.class, () -> getStreamsGroupService(args)); } + @Test + public void testResetOffsetsWithoutGroupOption() { + final String[] args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--to-offset", "5"}; + AtomicBoolean exited = new AtomicBoolean(false); + Exit.setExitProcedure(((statusCode, message) -> { + assertNotEquals(0, statusCode); + assertTrue(message.contains("Option [reset-offsets] takes one of these options: [all-groups], [group]")); + exited.set(true); + })); + try { + getStreamsGroupService(args); + } finally { + assertTrue(exited.get()); + } + } + @Test public void testResetOffset() throws Exception { final String appId = generateRandomAppId(); @@ -125,17 +145,17 @@ public void testResetOffset() throws Exception { produceMessagesOnTwoPartitions(RECORD_TOTAL, topic2); /////////////////////////////////////////////// Specific topic (--topic topic1) //////////////////////////////////////////////// // reset to specific offset, offset already on 10 - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-offset", "5"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--to-offset", "5"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 10L, 0, 1); resetForNextTest(appId, 10L, topic1); // reset to specific offset when after end offset, offset already on 10 - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-offset", "30"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--to-offset", "30"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 20L, 10L, 0, 1); // reset to specific offset when before begin offset, offset already on 20 - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-offset", "-30"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--to-offset", "-30"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 20L, 0, 1); resetForNextTest(appId, 10L, topic1); @@ -143,51 +163,51 @@ public void testResetOffset() throws Exception { // reset to specific date time DateTimeFormatter format = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS"); LocalDateTime dateTime = now().minusDays(1); - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-datetime", format.format(dateTime)}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--to-datetime", format.format(dateTime)}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 10L, 0, 1); resetForNextTest(appId, 10L, topic1); // reset by duration to earliest - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--by-duration", "PT5M"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--by-duration", "PT5M"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 10L, 0, 1); resetForNextTest(appId, 10L, topic1); // reset to earliest - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-earliest"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--to-earliest"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 10L, 0, 1); resetForNextTest(appId, 10L, topic1); // reset to latest - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-latest"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--to-latest"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 20L, 10L, 0, 1); resetForNextTest(appId, 5L, topic1); // reset to current - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-current"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--to-current"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 5L, 0, 1); // reset offset shift+. The current offset is 5, as of the prev test is executed (by --execute) - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--shift-by", "3"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--shift-by", "3"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 8L, 5L, 0, 1); // reset offset shift-. The current offset is 8, as of the prev test is executed (by --execute) - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--shift-by", "-3"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--shift-by", "-3"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 8L, 0, 1); // reset offset shift by lower than earliest. The current offset is 5, as of the prev test is executed (by --execute) - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--shift-by", "-150"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--shift-by", "-150"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 0L, 5L, 0, 1); // reset offset shift by higher than latest. The current offset is 0, as of the prev test is executed (by --execute) - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--shift-by", "150"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--shift-by", "150"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 20L, 0L, 0, 1); // export to file - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--to-offset", "5", "--export"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--to-offset", "5", "--export"}; File file = TestUtils.tempFile("reset", ".csv"); Map exp = Map.of(new TopicPartition(topic1, 0), 5L, new TopicPartition(topic1, 1), 5L); try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { @@ -196,7 +216,7 @@ public void testResetOffset() throws Exception { assertEquals(exp, toOffsetMap(exportedOffsets.get(appId))); } - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--from-file", file.getCanonicalPath()}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--from-file", file.getCanonicalPath()}; try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { Map> importedOffsets = service.resetOffsets(); assertEquals(exp, toOffsetMap(importedOffsets.get(appId))); @@ -206,14 +226,14 @@ public void testResetOffset() throws Exception { resetForNextTest(appId, 10L, topic1); // reset to specific offset - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1 + ":1", "--to-offset", "5"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1 + ":1", "--to-offset", "5"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, 5L, 10L, 1); resetForNextTest(appId, 10L, topic1); // reset both partitions of topic1 and topic2:1 to specific offset args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, - "--topic", topic1, "--topic", topic2 + ":1", "--to-offset", "5"}; + "--input-topic", topic1, "--input-topic", topic2 + ":1", "--to-offset", "5"}; final Map expectedOffsets = Map.of( new TopicPartition(topic1, 0), 5L, new TopicPartition(topic1, 1), 5L, @@ -231,23 +251,23 @@ public void testResetOffset() throws Exception { new TopicPartition(topic2, 0), 10L, new TopicPartition(topic2, 1), 5L)); - ///////////////////////////////////////// All topics (--all-topics) ///////////////////////////////////////// + ///////////////////////////////////////// All topics (--all-input-topics) ///////////////////////////////////////// resetForNextTest(appId, 10L, topic1, topic2); // reset to specific offset - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-topics", "--to-offset", "5"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-input-topics", "--to-offset", "5"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, topic2, 5L, 10L); resetForNextTest(appId, 10L, topic1, topic2); // reset to specific offset with two --topic options - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--topic", topic2, "--to-offset", "5"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--input-topic", topic2, "--to-offset", "5"}; resetOffsetsAndAssertForDryRunAndExecute(args, appId, topic1, topic2, 5L, 10L); resetForNextTest(appId, 10L, topic1, topic2); // export to file - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-topics", "--to-offset", "5", "--export"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-input-topics", "--to-offset", "5", "--export"}; file = TestUtils.tempFile("reset-all", ".csv"); exp = Map.of(new TopicPartition(topic1, 0), 5L, new TopicPartition(topic1, 1), 5L, @@ -259,7 +279,7 @@ public void testResetOffset() throws Exception { assertEquals(exp, toOffsetMap(exportedOffsets.get(appId))); } - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--topic", topic1, "--from-file", file.getCanonicalPath()}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--input-topic", topic1, "--from-file", file.getCanonicalPath()}; try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { Map> importedOffsets = service.resetOffsets(); @@ -280,7 +300,7 @@ public void testTopicsWhenResettingOffset() throws Exception { produceMessagesOnTwoPartitions(RECORD_TOTAL, topic1); produceMessagesOnTwoPartitions(RECORD_TOTAL, topic2); - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-topics", "--to-offset", "5"}; + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-input-topics", "--to-offset", "5"}; resetOffsetsAndAssertInternalTopicDeletionForDryRunAndExecute(args, appId); adminClient.deleteTopics(List.of(topic1, topic2)).all().get(); diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java index fdd434cf5b05d..acccf7ff6634a 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java @@ -17,6 +17,7 @@ package org.apache.kafka.tools.streams; import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientTestUtils; import org.apache.kafka.clients.admin.DeleteStreamsGroupsOptions; import org.apache.kafka.clients.admin.DeleteStreamsGroupsResult; import org.apache.kafka.clients.admin.DeleteTopicsResult; @@ -151,6 +152,7 @@ public void testListStreamsGroupsWithStates() throws Exception { foundListing[0] = new HashSet<>(service.listStreamsGroupsInStates(Set.of(GroupState.STABLE))); return Objects.equals(expectedListingStable, foundListing[0]); }, "Expected to show groups " + expectedListingStable + ", but found " + foundListing[0]); + service.close(); } @@ -175,6 +177,7 @@ public void testDescribeStreamsGroups() throws Exception { when(ADMIN_CLIENT.describeStreamsGroups(ArgumentMatchers.anyCollection())).thenReturn(result); StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(null, ADMIN_CLIENT); assertEquals(exp, service.getDescribeGroup(firstGroup)); + service.close(); } @@ -230,6 +233,7 @@ public void testDescribeStreamsGroupsGetOffsets() throws Exception { Map lags = service.getOffsets(x); assertEquals(1, lags.size()); assertEquals(new StreamsGroupCommand.OffsetsInfo(Optional.of(12L), Optional.of(0), 30L, 18L), lags.get(new TopicPartition("topic1", 0))); + service.close(); } @@ -283,34 +287,138 @@ public void testGroupStatesFromString() { @Test public void testAdminRequestsForResetOffsets() { + Admin adminClient = mock(KafkaAdminClient.class); String groupId = "foo-group"; - List args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--reset-offsets", "--topic", "topic1", "--to-latest")); + List args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--reset-offsets", "--input-topic", "topic1", "--to-latest")); List topics = List.of("topic1"); - when(ADMIN_CLIENT.describeStreamsGroups(List.of(groupId))) + when(adminClient.describeStreamsGroups(List.of(groupId))) .thenReturn(describeStreamsResult(groupId, GroupState.DEAD)); - when(ADMIN_CLIENT.describeTopics(topics)) + when(adminClient.describeTopics(topics)) .thenReturn(describeTopicsResult(topics, 1)); - when(ADMIN_CLIENT.listOffsets(any())) + when(adminClient.listOffsets(any())) .thenReturn(listOffsetsResult()); - when(ADMIN_CLIENT.listGroups(any())).thenReturn(listGroupResult(groupId)); + when(adminClient.listGroups(any())).thenReturn(listGroupResult(groupId)); ListStreamsGroupOffsetsResult result = mock(ListStreamsGroupOffsetsResult.class); Map committedOffsetsMap = new HashMap<>(); committedOffsetsMap.put(new TopicPartition("topic1", 0), mock(OffsetAndMetadata.class)); - when(ADMIN_CLIENT.listStreamsGroupOffsets(ArgumentMatchers.anyMap())).thenReturn(result); + when(adminClient.listStreamsGroupOffsets(ArgumentMatchers.anyMap())).thenReturn(result); when(result.partitionsToOffsetAndMetadata(ArgumentMatchers.anyString())).thenReturn(KafkaFuture.completedFuture(committedOffsetsMap)); - StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args.toArray(new String[0])); + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args.toArray(new String[0]), adminClient); Map> resetResult = service.resetOffsets(); assertEquals(Collections.singleton(groupId), resetResult.keySet()); assertEquals(new HashSet<>(List.of(new TopicPartition(topics.get(0), 0))), resetResult.get(groupId).keySet()); - verify(ADMIN_CLIENT, times(1)).describeStreamsGroups(List.of(groupId)); - verify(ADMIN_CLIENT, times(1)).describeTopics(topics); - verify(ADMIN_CLIENT, times(1)).listOffsets(any()); - verify(ADMIN_CLIENT, times(1)).listStreamsGroupOffsets(any()); + verify(adminClient, times(1)).describeStreamsGroups(List.of(groupId)); + verify(adminClient, times(1)).describeTopics(topics); + verify(adminClient, times(1)).listOffsets(any()); + verify(adminClient, times(1)).listStreamsGroupOffsets(any()); + + service.close(); + } + + @Test + public void testRetrieveInternalTopics() { + String groupId = "foo-group"; + List args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete")); + List sourceTopics = List.of("source-topic1", "source-topic2"); + List repartitionSinkTopics = List.of("rep-sink-topic1", "rep-sink-topic2"); + Map stateChangelogTopics = Map.of( + groupId + "-1-changelog", mock(StreamsGroupSubtopologyDescription.TopicInfo.class), + "some-pre-fix" + "-changelog", mock(StreamsGroupSubtopologyDescription.TopicInfo.class), + groupId + "-2-changelog", mock(StreamsGroupSubtopologyDescription.TopicInfo.class)); + Map repartitionSourceTopics = Map.of( + groupId + "-1-repartition", mock(StreamsGroupSubtopologyDescription.TopicInfo.class), + groupId + "-some-thing", mock(StreamsGroupSubtopologyDescription.TopicInfo.class), + groupId + "-2-repartition", mock(StreamsGroupSubtopologyDescription.TopicInfo.class)); + + + Map resultMap = new HashMap<>(); + resultMap.put(groupId, new StreamsGroupDescription( + groupId, + 0, + 0, + 0, + List.of(new StreamsGroupSubtopologyDescription("subtopology1", sourceTopics, repartitionSinkTopics, stateChangelogTopics, repartitionSourceTopics)), + List.of(), + GroupState.DEAD, + new Node(0, "localhost", 9092), + null)); + DescribeStreamsGroupsResult result = mock(DescribeStreamsGroupsResult.class); + when(result.all()).thenReturn(KafkaFuture.completedFuture(resultMap)); + when(ADMIN_CLIENT.describeStreamsGroups(ArgumentMatchers.anyCollection())).thenReturn(result); + + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args.toArray(new String[0])); + Map> internalTopics = service.retrieveInternalTopics(List.of(groupId)); + + assertNotNull(internalTopics.get(groupId)); + assertEquals(4, internalTopics.get(groupId).size()); + assertEquals(new HashSet<>(List.of(groupId + "-1-changelog", groupId + "-2-changelog", groupId + "-1-repartition", groupId + "-2-repartition")), + new HashSet<>(internalTopics.get(groupId))); + assertFalse(internalTopics.get(groupId).stream().anyMatch(List.of("some-pre-fix-changelog", groupId + "-some-thing")::contains)); + assertFalse(internalTopics.get(groupId).stream().anyMatch(sourceTopics::contains)); + assertFalse(internalTopics.get(groupId).stream().anyMatch(repartitionSinkTopics::contains)); + + service.close(); + } + + @Test + public void testDeleteStreamsGroup() { + Admin adminClient = mock(KafkaAdminClient.class); + String groupId = "foo-group"; + List args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete")); + + DeleteStreamsGroupsResult deleteStreamsGroupsResult = mock(DeleteStreamsGroupsResult.class); + when(adminClient.deleteStreamsGroups(eq(List.of(groupId)), any(DeleteStreamsGroupsOptions.class))).thenReturn(deleteStreamsGroupsResult); + when(deleteStreamsGroupsResult.deletedGroups()).thenReturn(Map.of(groupId, KafkaFuture.completedFuture(null))); + DeleteTopicsResult deleteTopicsResult = mock(DeleteTopicsResult.class); + when(deleteTopicsResult.all()).thenReturn(KafkaFuture.completedFuture(null)); + when(adminClient.deleteTopics(ArgumentMatchers.anyCollection())).thenReturn(deleteTopicsResult); + DescribeStreamsGroupsResult describeStreamsGroupsResult = mock(DescribeStreamsGroupsResult.class); + when(describeStreamsGroupsResult.all()).thenReturn(KafkaFuture.completedFuture(Map.of(groupId, mock(StreamsGroupDescription.class)))); + when(adminClient.describeStreamsGroups(any())).thenReturn(describeStreamsGroupsResult); + ListGroupsResult listGroupsResult = mock(ListGroupsResult.class); + when(adminClient.listGroups(any())).thenReturn(listGroupsResult); + when(listGroupsResult.all()).thenReturn(KafkaFuture.completedFuture(List.of(new GroupListing(groupId, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY))))); + + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args.toArray(new String[0]), adminClient); + service.deleteGroups(); + + verify(adminClient, times(1)).listGroups(any(ListGroupsOptions.class)); + verify(adminClient, times(1)).deleteStreamsGroups(eq(List.of(groupId)), any(DeleteStreamsGroupsOptions.class)); + verify(adminClient, times(1)).describeStreamsGroups(any()); + // because of having 0 internal topics, we do not expect deleteTopics to be called + verify(adminClient, times(0)).deleteTopics(ArgumentMatchers.anyCollection()); + + service.close(); + } + + @Test + public void testDeleteNonStreamsGroup() { + Admin adminClient = mock(KafkaAdminClient.class); + String groupId = "foo-group"; + List args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete")); + + ListGroupsResult listGroupsResult = mock(ListGroupsResult.class); + when(adminClient.listGroups(any())).thenReturn(listGroupsResult); + when(listGroupsResult.all()).thenReturn(KafkaFuture.completedFuture(List.of())); + + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args.toArray(new String[0]), adminClient); + Map result = service.deleteGroups(); + + assertNotNull(result.get(groupId)); + assertEquals(result.get(groupId).getMessage(), + "Group '" + groupId + "' does not exist or is not a streams group."); + assertInstanceOf(IllegalArgumentException.class, result.get(groupId)); + verify(adminClient, times(1)).listGroups(any(ListGroupsOptions.class)); + // we do not expect any further API to be called + verify(adminClient, times(0)).deleteStreamsGroups(eq(List.of(groupId)), any(DeleteStreamsGroupsOptions.class)); + verify(adminClient, times(0)).describeStreamsGroups(any()); + verify(adminClient, times(0)).deleteTopics(ArgumentMatchers.anyCollection()); + service.close(); } @@ -326,6 +434,11 @@ StreamsGroupCommand.StreamsGroupService getStreamsGroupService(String[] args) { return new StreamsGroupCommand.StreamsGroupService(opts, ADMIN_CLIENT); } + StreamsGroupCommand.StreamsGroupService getStreamsGroupService(String[] args, Admin adminClient) { + StreamsGroupCommandOptions opts = new StreamsGroupCommandOptions(args); + return new StreamsGroupCommand.StreamsGroupService(opts, adminClient); + } + private static void assertThrow(final String wrongState) { final Set validStates = new HashSet<>(Arrays.asList("Assigning", "Dead", "Empty", "Reconciling", "Stable", "NotReady")); @@ -381,4 +494,4 @@ private ListOffsetsResult listOffsetsResult() { __ -> KafkaFuture.completedFuture(resultInfo))); return new ListOffsetsResult(futures); } -} +} \ No newline at end of file From b9be3eba35a26c4f8367613d3be6d800feb06b17 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Wed, 4 Jun 2025 19:16:45 +0200 Subject: [PATCH 15/26] fix cs issue --- .../streams/StreamsGroupCommandOptions.java | 51 +++++++++---------- 1 file changed, 24 insertions(+), 27 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index 144322fd9580a..161596f2c5c84 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -212,44 +212,41 @@ public void checkArgs() { LOGGER.debug("Option " + timeoutMsOpt + " is applicable only when " + describeOpt + " is used."); } - if (options.has(resetOffsetsOpt)) { - if (!options.has(groupOpt) && !options.has(allGroupsOpt)) - CommandLineUtils.printUsageAndExit(parser, - "Option " + resetOffsetsOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); - } - if (options.has(deleteOpt)) { if (!options.has(groupOpt) && !options.has(allGroupsOpt)) CommandLineUtils.printUsageAndExit(parser, "Option " + deleteOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); } - - checkOffsetResetArgs(); + if (options.has(resetOffsetsOpt)) { + checkOffsetResetArgs(); + } CommandLineUtils.checkInvalidArgs(parser, options, listOpt, membersOpt, offsetsOpt); CommandLineUtils.checkInvalidArgs(parser, options, groupOpt, minus(allStreamsGroupLevelOpts, describeOpt, deleteOpt)); } private void checkOffsetResetArgs() { - if (options.has(resetOffsetsOpt)) { - if (options.has(dryRunOpt) && options.has(executeOpt)) - CommandLineUtils.printUsageAndExit(parser, "Option " + resetOffsetsOpt + " only accepts one of " + executeOpt + " and " + dryRunOpt); - - if (!options.has(dryRunOpt) && !options.has(executeOpt)) { - System.err.println("WARN: No action will be performed as the --execute option is missing. " + - "In a future major release, the default behavior of this command will be to prompt the user before " + - "executing the reset rather than doing a dry run. You should add the --dry-run option explicitly " + - "if you are scripting this command and want to keep the current default behavior without prompting."); - } - - CommandLineUtils.checkInvalidArgs(parser, options, resetToOffsetOpt, minus(allResetOffsetScenarioOpts, resetToOffsetOpt)); - CommandLineUtils.checkInvalidArgs(parser, options, resetToDatetimeOpt, minus(allResetOffsetScenarioOpts, resetToDatetimeOpt)); - CommandLineUtils.checkInvalidArgs(parser, options, resetByDurationOpt, minus(allResetOffsetScenarioOpts, resetByDurationOpt)); - CommandLineUtils.checkInvalidArgs(parser, options, resetToEarliestOpt, minus(allResetOffsetScenarioOpts, resetToEarliestOpt)); - CommandLineUtils.checkInvalidArgs(parser, options, resetToLatestOpt, minus(allResetOffsetScenarioOpts, resetToLatestOpt)); - CommandLineUtils.checkInvalidArgs(parser, options, resetToCurrentOpt, minus(allResetOffsetScenarioOpts, resetToCurrentOpt)); - CommandLineUtils.checkInvalidArgs(parser, options, resetShiftByOpt, minus(allResetOffsetScenarioOpts, resetShiftByOpt)); - CommandLineUtils.checkInvalidArgs(parser, options, resetFromFileOpt, minus(allResetOffsetScenarioOpts, resetFromFileOpt)); + if (options.has(dryRunOpt) && options.has(executeOpt)) + CommandLineUtils.printUsageAndExit(parser, "Option " + resetOffsetsOpt + " only accepts one of " + executeOpt + " and " + dryRunOpt); + + if (!options.has(dryRunOpt) && !options.has(executeOpt)) { + System.err.println("WARN: No action will be performed as the --execute option is missing. " + + "In a future major release, the default behavior of this command will be to prompt the user before " + + "executing the reset rather than doing a dry run. You should add the --dry-run option explicitly " + + "if you are scripting this command and want to keep the current default behavior without prompting."); } + + if (!options.has(groupOpt) && !options.has(allGroupsOpt)) + CommandLineUtils.printUsageAndExit(parser, + "Option " + resetOffsetsOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); + + CommandLineUtils.checkInvalidArgs(parser, options, resetToOffsetOpt, minus(allResetOffsetScenarioOpts, resetToOffsetOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetToDatetimeOpt, minus(allResetOffsetScenarioOpts, resetToDatetimeOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetByDurationOpt, minus(allResetOffsetScenarioOpts, resetByDurationOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetToEarliestOpt, minus(allResetOffsetScenarioOpts, resetToEarliestOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetToLatestOpt, minus(allResetOffsetScenarioOpts, resetToLatestOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetToCurrentOpt, minus(allResetOffsetScenarioOpts, resetToCurrentOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetShiftByOpt, minus(allResetOffsetScenarioOpts, resetShiftByOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, resetFromFileOpt, minus(allResetOffsetScenarioOpts, resetFromFileOpt)); } } \ No newline at end of file From 03240c5d19cdde1f5e72cdbda1fe1de72d449371 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Thu, 5 Jun 2025 08:23:46 +0200 Subject: [PATCH 16/26] backup --- .../streams/StreamsGroupCommandOptions.java | 29 ++++++++++++------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index 161596f2c5c84..fc65f88e74ed6 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -196,17 +196,7 @@ public void checkArgs() { CommandLineUtils.checkRequiredArgs(parser, options, bootstrapServerOpt); if (options.has(describeOpt)) { - if (!options.has(groupOpt) && !options.has(allGroupsOpt)) - CommandLineUtils.printUsageAndExit(parser, - "Option " + describeOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); - List> mutuallyExclusiveOpts = Arrays.asList(membersOpt, offsetsOpt, stateOpt); - if (mutuallyExclusiveOpts.stream().mapToInt(o -> options.has(o) ? 1 : 0).sum() > 1) { - CommandLineUtils.printUsageAndExit(parser, - "Option " + describeOpt + " takes at most one of these options: " + mutuallyExclusiveOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); - } - if (options.has(stateOpt) && options.valueOf(stateOpt) != null) - CommandLineUtils.printUsageAndExit(parser, - "Option " + describeOpt + " does not take a value for " + stateOpt); + checkDescribeArgs(); } else { if (options.has(timeoutMsOpt)) LOGGER.debug("Option " + timeoutMsOpt + " is applicable only when " + describeOpt + " is used."); @@ -217,14 +207,31 @@ public void checkArgs() { CommandLineUtils.printUsageAndExit(parser, "Option " + deleteOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); } + if (options.has(resetOffsetsOpt)) { checkOffsetResetArgs(); } + if ((options.has(dryRunOpt) || options.has(executeOpt)) && !options.has(resetOffsetsOpt)) + CommandLineUtils.printUsageAndExit(parser, "Only Option " + resetOffsetsOpt + "accepts " + executeOpt + " or " + dryRunOpt); + CommandLineUtils.checkInvalidArgs(parser, options, listOpt, membersOpt, offsetsOpt); CommandLineUtils.checkInvalidArgs(parser, options, groupOpt, minus(allStreamsGroupLevelOpts, describeOpt, deleteOpt)); } + private void checkDescribeArgs() { + if (!options.has(groupOpt) && !options.has(allGroupsOpt)) + CommandLineUtils.printUsageAndExit(parser, + "Option " + describeOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); + List> mutuallyExclusiveOpts = Arrays.asList(membersOpt, offsetsOpt, stateOpt); + if (mutuallyExclusiveOpts.stream().mapToInt(o -> options.has(o) ? 1 : 0).sum() > 1) { + CommandLineUtils.printUsageAndExit(parser, + "Option " + describeOpt + " takes at most one of these options: " + mutuallyExclusiveOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); + } + if (options.has(stateOpt) && options.valueOf(stateOpt) != null) + CommandLineUtils.printUsageAndExit(parser, + "Option " + describeOpt + " does not take a value for " + stateOpt); + } private void checkOffsetResetArgs() { if (options.has(dryRunOpt) && options.has(executeOpt)) CommandLineUtils.printUsageAndExit(parser, "Option " + resetOffsetsOpt + " only accepts one of " + executeOpt + " and " + dryRunOpt); From b0024008195eb009db00c139cd52359b1a007e7a Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Fri, 6 Jun 2025 02:18:23 +0200 Subject: [PATCH 17/26] add input to options + rebase --- .../org/apache/kafka/tools/streams/StreamsGroupCommand.java | 1 - .../apache/kafka/tools/streams/StreamsGroupCommandOptions.java | 2 +- .../org/apache/kafka/tools/streams/DeleteStreamsGroupTest.java | 2 -- .../apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java | 2 -- 4 files changed, 1 insertion(+), 6 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index 05c87cd851861..0af7767bbbae3 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -164,7 +164,6 @@ public static void printError(String msg, Optional e) { static class StreamsGroupService implements AutoCloseable { final StreamsGroupCommandOptions opts; private final Admin adminClient; - public StreamsGroupService(StreamsGroupCommandOptions opts, Map configOverrides) { this.opts = opts; try { diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index fc65f88e74ed6..ff0da36282f7f 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -83,7 +83,7 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public final OptionSpec listOpt; public final OptionSpec describeOpt; public final OptionSpec deleteOpt; - final OptionSpec allGroupsOpt; + public final OptionSpec allGroupsOpt; public final OptionSpec timeoutMsOpt; public final OptionSpec commandConfigOpt; public final OptionSpec stateOpt; diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupTest.java index bb7fbe58677d5..394e4cf63d01a 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupTest.java @@ -28,7 +28,6 @@ import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Exit; import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; import org.apache.kafka.streams.GroupProtocol; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.KeyValueTimestamp; @@ -89,7 +88,6 @@ public class DeleteStreamsGroupTest { @BeforeAll public static void startCluster() { final Properties props = new Properties(); - props.setProperty(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer,streams"); cluster = new EmbeddedKafkaCluster(2, props); cluster.start(); diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java index 14d96f674c7bb..1fed51031d0b0 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java @@ -24,7 +24,6 @@ import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Exit; -import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; import org.apache.kafka.streams.GroupProtocol; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.KeyValueTimestamp; @@ -87,7 +86,6 @@ public class ResetStreamsGroupOffsetTest { @BeforeAll public static void startCluster() { final Properties props = new Properties(); - props.setProperty(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer,streams"); cluster = new EmbeddedKafkaCluster(2, props); cluster.start(); From abf8acb56b46b2f732b832ae720486438111be8a Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Fri, 6 Jun 2025 14:12:15 +0200 Subject: [PATCH 18/26] refactore --- .../kafka/tools/streams/OffsetsUtils.java | 465 ++++++++++++++++++ .../tools/streams/StreamsGroupCommand.java | 359 +------------- 2 files changed, 479 insertions(+), 345 deletions(-) create mode 100644 tools/src/main/java/org/apache/kafka/tools/streams/OffsetsUtils.java diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/OffsetsUtils.java b/tools/src/main/java/org/apache/kafka/tools/streams/OffsetsUtils.java new file mode 100644 index 0000000000000..09fb24578a5e7 --- /dev/null +++ b/tools/src/main/java/org/apache/kafka/tools/streams/OffsetsUtils.java @@ -0,0 +1,465 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.tools.streams; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.ListOffsetsResult; +import org.apache.kafka.clients.admin.OffsetSpec; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.requests.ListOffsetsResponse; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.server.util.CommandLineUtils; +import org.apache.kafka.tools.consumer.group.CsvUtils; + +import com.fasterxml.jackson.databind.ObjectReader; + +import java.io.IOException; +import java.text.ParseException; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.function.Function; +import java.util.function.ToIntFunction; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import joptsimple.OptionParser; + +import static org.apache.kafka.tools.streams.StreamsGroupCommandOptions.LOGGER; + +public class OffsetsUtils { + private static final String TOPIC_PARTITION_SEPARATOR = ":"; + private final Admin adminClient; + private final OffsetsUtilsOptions opts; + private final OptionParser parser; + public OffsetsUtils(Admin adminClient, StreamsGroupCommandOptions options, OptionParser parser) { + this.adminClient = adminClient; + this.opts = fromStreamsGroupCommandOptions(options); + this.parser = parser; + } + + private OffsetsUtilsOptions fromStreamsGroupCommandOptions(StreamsGroupCommandOptions opts) { + return new OffsetsUtilsOptions( + opts.options.has(opts.groupOpt) ? opts.options.valuesOf(opts.groupOpt) : null, + opts.options.has(opts.resetToOffsetOpt) ? opts.options.valuesOf(opts.resetToOffsetOpt) : null, + opts.options.has(opts.resetFromFileOpt) ? opts.options.valuesOf(opts.resetFromFileOpt) : null, + opts.options.has(opts.resetToDatetimeOpt) ? opts.options.valuesOf(opts.resetToDatetimeOpt) : null, + opts.options.has(opts.resetByDurationOpt) ? opts.options.valuesOf(opts.resetByDurationOpt) : null, + opts.options.has(opts.resetShiftByOpt) ? opts.options.valuesOf(opts.resetShiftByOpt) : null + ); + } + + Optional>> resetPlanFromFile() { + if (opts.resetFromFileOpt != null && !opts.resetFromFileOpt.isEmpty()) { + try { + String resetPlanPath = opts.resetFromFileOpt.get(0); + String resetPlanCsv = Utils.readFileAsString(resetPlanPath); + Map> resetPlan = parseResetPlan(resetPlanCsv); + return Optional.of(resetPlan); + } catch (IOException e) { + throw new RuntimeException(e); + } + } else return Optional.empty(); + } + + private Map> parseResetPlan(String resetPlanCsv) { + ObjectReader csvReader = CsvUtils.readerFor(CsvUtils.CsvRecordNoGroup.class); + String[] lines = resetPlanCsv.split("\n"); + boolean isSingleGroupQuery = opts.groupOpt.size() == 1; + boolean isOldCsvFormat = false; + try { + if (lines.length > 0) { + csvReader.readValue(lines[0], CsvUtils.CsvRecordNoGroup.class); + isOldCsvFormat = true; + } + } catch (IOException e) { + throw new RuntimeException("Invalid CSV format in reset plan file: " + e.getMessage()); + } + + Map> dataMap = new HashMap<>(); + + try { + // Single group CSV format: "topic,partition,offset" + if (isSingleGroupQuery && isOldCsvFormat) { + String group = opts.groupOpt.get(0); + for (String line : lines) { + CsvUtils.CsvRecordNoGroup rec = csvReader.readValue(line, CsvUtils.CsvRecordNoGroup.class); + dataMap.computeIfAbsent(group, k -> new HashMap<>()) + .put(new TopicPartition(rec.getTopic(), rec.getPartition()), new OffsetAndMetadata(rec.getOffset())); + } + } else { + csvReader = CsvUtils.readerFor(CsvUtils.CsvRecordWithGroup.class); + for (String line : lines) { + CsvUtils.CsvRecordWithGroup rec = csvReader.readValue(line, CsvUtils.CsvRecordWithGroup.class); + dataMap.computeIfAbsent(rec.getGroup(), k -> new HashMap<>()) + .put(new TopicPartition(rec.getTopic(), rec.getPartition()), new OffsetAndMetadata(rec.getOffset())); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + + return dataMap; + } + + Map checkOffsetsRange(Map requestedOffsets) { + Map logStartOffsets = getLogStartOffsets(requestedOffsets.keySet()); + Map logEndOffsets = getLogEndOffsets(requestedOffsets.keySet()); + + Map res = new HashMap<>(); + + requestedOffsets.forEach((topicPartition, offset) -> { + LogOffsetResult logEndOffset = logEndOffsets.get(topicPartition); + + if (logEndOffset != null) { + if (logEndOffset instanceof LogOffset && offset > ((LogOffset) logEndOffset).value) { + long endOffset = ((LogOffset) logEndOffset).value; + LOGGER.warn("New offset (" + offset + ") is higher than latest offset for topic partition " + topicPartition + ". Value will be set to " + endOffset); + res.put(topicPartition, endOffset); + } else { + LogOffsetResult logStartOffset = logStartOffsets.get(topicPartition); + + if (logStartOffset instanceof LogOffset && offset < ((LogOffset) logStartOffset).value) { + long startOffset = ((LogOffset) logStartOffset).value; + LOGGER.warn("New offset (" + offset + ") is lower than earliest offset for topic partition " + topicPartition + ". Value will be set to " + startOffset); + res.put(topicPartition, startOffset); + } else + res.put(topicPartition, offset); + } + } else { + // the control should not reach here + throw new IllegalStateException("Unexpected non-existing offset value for topic partition " + topicPartition); + } + }); + + return res; + } + + Map getLogTimestampOffsets(Collection topicPartitions, long timestamp) { + try { + Map timestampOffsets = topicPartitions.stream() + .collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.forTimestamp(timestamp))); + + Map offsets = adminClient.listOffsets( + timestampOffsets).all().get(); + + Map successfulOffsetsForTimes = new HashMap<>(); + Map unsuccessfulOffsetsForTimes = new HashMap<>(); + + offsets.forEach((tp, offsetsResultInfo) -> { + if (offsetsResultInfo.offset() != ListOffsetsResponse.UNKNOWN_OFFSET) + successfulOffsetsForTimes.put(tp, offsetsResultInfo); + else + unsuccessfulOffsetsForTimes.put(tp, offsetsResultInfo); + }); + + Map successfulLogTimestampOffsets = successfulOffsetsForTimes.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new LogOffset(e.getValue().offset()))); + + unsuccessfulOffsetsForTimes.forEach((tp, offsetResultInfo) -> + System.out.println("\nWarn: Partition " + tp.partition() + " from topic " + tp.topic() + + " is empty. Falling back to latest known offset.")); + + successfulLogTimestampOffsets.putAll(getLogEndOffsets(unsuccessfulOffsetsForTimes.keySet())); + + return successfulLogTimestampOffsets; + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + + Map getLogStartOffsets(Collection topicPartitions) { + return getLogOffsets(topicPartitions, OffsetSpec.earliest()); + } + + Map getLogEndOffsets(Collection topicPartitions) { + return getLogOffsets(topicPartitions, OffsetSpec.latest()); + } + + private Map getLogOffsets(Collection topicPartitions, OffsetSpec offsetSpec) { + try { + Map startOffsets = topicPartitions.stream() + .collect(Collectors.toMap(Function.identity(), tp -> offsetSpec)); + + Map offsets = adminClient.listOffsets( + startOffsets + ).all().get(); + + return topicPartitions.stream().collect(Collectors.toMap( + Function.identity(), + tp -> offsets.containsKey(tp) + ? new LogOffset(offsets.get(tp).offset()) + : new Unknown() + )); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + + List parseTopicPartitionsToReset(List topicArgs) throws ExecutionException, InterruptedException { + List topicsWithPartitions = new ArrayList<>(); + List topics = new ArrayList<>(); + + topicArgs.forEach(topicArg -> { + if (topicArg.contains(TOPIC_PARTITION_SEPARATOR)) + topicsWithPartitions.add(topicArg); + else + topics.add(topicArg); + }); + + List specifiedPartitions = + topicsWithPartitions.stream().flatMap(this::parseTopicsWithPartitions).collect(Collectors.toList()); + + List unspecifiedPartitions = new ArrayList<>(); + + if (!topics.isEmpty()) { + Map descriptionMap = adminClient.describeTopics( + topics + ).allTopicNames().get(); + + descriptionMap.forEach((topic, description) -> + description.partitions().forEach(tpInfo -> unspecifiedPartitions.add(new TopicPartition(topic, tpInfo.partition()))) + ); + } + + specifiedPartitions.addAll(unspecifiedPartitions); + + return specifiedPartitions; + } + + private Stream parseTopicsWithPartitions(String topicArg) { + ToIntFunction partitionNum = partition -> { + try { + return Integer.parseInt(partition); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid partition '" + partition + "' specified in topic arg '" + topicArg + "''"); + } + }; + + String[] arr = topicArg.split(":"); + + if (arr.length != 2) + throw new IllegalArgumentException("Invalid topic arg '" + topicArg + "', expected topic name and partitions"); + + String topic = arr[0]; + String partitions = arr[1]; + + return Arrays.stream(partitions.split(",")). + map(partition -> new TopicPartition(topic, partitionNum.applyAsInt(partition))); + } + + Map resetToOffset(Collection partitionsToReset) { + long offset = opts.resetToOffsetOpt != null && !opts.resetToOffsetOpt.isEmpty() + ? opts.resetToOffsetOpt.get(0) + : 0L; + return checkOffsetsRange(partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), tp -> offset))) + .entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); + } + + Map resetToEarliest(Collection partitionsToReset) { + Map logStartOffsets = getLogStartOffsets(partitionsToReset); + return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { + LogOffsetResult logOffsetResult = logStartOffsets.get(topicPartition); + + if (!(logOffsetResult instanceof LogOffset)) { + CommandLineUtils.printUsageAndExit(parser, "Error getting starting offset of topic partition: " + topicPartition); + } + + return new OffsetAndMetadata(((LogOffset) logOffsetResult).value); + })); + } + + Map resetToLatest(Collection partitionsToReset) { + Map logEndOffsets = getLogEndOffsets(partitionsToReset); + return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { + LogOffsetResult logOffsetResult = logEndOffsets.get(topicPartition); + + if (!(logOffsetResult instanceof LogOffset)) { + CommandLineUtils.printUsageAndExit(parser, "Error getting ending offset of topic partition: " + topicPartition); + } + + return new OffsetAndMetadata(((LogOffset) logOffsetResult).value); + })); + } + + Map resetByShiftBy( + Collection partitionsToReset, + Map currentCommittedOffsets) { + + Map requestedOffsets = partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { + long shiftBy = opts.resetShiftByOpt.get(0); + OffsetAndMetadata currentOffset = currentCommittedOffsets.get(topicPartition); + + if (currentOffset == null) { + throw new IllegalArgumentException("Cannot shift offset for partition " + topicPartition + " since there is no current committed offset"); + } + + return currentOffset.offset() + shiftBy; + })); + return checkOffsetsRange(requestedOffsets).entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); + } + + Map resetToDateTime(Collection partitionsToReset) { + try { + long timestamp = Utils.getDateTime(opts.resetToDatetimeOpt.get(0)); + Map logTimestampOffsets = + getLogTimestampOffsets(partitionsToReset, timestamp); + return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { + LogOffsetResult logTimestampOffset = logTimestampOffsets.get(topicPartition); + + if (!(logTimestampOffset instanceof LogOffset)) { + CommandLineUtils.printUsageAndExit(parser, "Error getting offset by timestamp of topic partition: " + topicPartition); + } + + return new OffsetAndMetadata(((LogOffset) logTimestampOffset).value); + })); + } catch (ParseException e) { + throw new RuntimeException(e); + } + } + + Map resetByDuration(Collection partitionsToReset) { + String duration = opts.resetByDurationOpt.get(0); + Duration durationParsed = Duration.parse(duration); + Instant now = Instant.now(); + durationParsed.negated().addTo(now); + long timestamp = now.minus(durationParsed).toEpochMilli(); + Map logTimestampOffsets = + getLogTimestampOffsets(partitionsToReset, timestamp); + return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { + OffsetsUtils.LogOffsetResult logTimestampOffset = logTimestampOffsets.get(topicPartition); + + if (!(logTimestampOffset instanceof OffsetsUtils.LogOffset)) { + CommandLineUtils.printUsageAndExit(parser, "Error getting offset by timestamp of topic partition: " + topicPartition); + } + + return new OffsetAndMetadata(((OffsetsUtils.LogOffset) logTimestampOffset).value); + })); + } + + Map resetFromFile(String groupId) { + return resetPlanFromFile().map(resetPlan -> { + Map resetPlanForGroup = resetPlan.get(groupId); + + if (resetPlanForGroup == null) { + printError("No reset plan for group " + groupId + " found", Optional.empty()); + return Collections.emptyMap(); + } + + Map requestedOffsets = resetPlanForGroup.keySet().stream().collect(Collectors.toMap( + Function.identity(), + topicPartition -> resetPlanForGroup.get(topicPartition).offset())); + + return checkOffsetsRange(requestedOffsets).entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); + }).orElseGet(Collections::emptyMap); + } + + Map resetToCurrent(Collection partitionsToReset, Map currentCommittedOffsets) { + Collection partitionsToResetWithCommittedOffset = new ArrayList<>(); + Collection partitionsToResetWithoutCommittedOffset = new ArrayList<>(); + + for (TopicPartition topicPartition : partitionsToReset) { + if (currentCommittedOffsets.containsKey(topicPartition)) + partitionsToResetWithCommittedOffset.add(topicPartition); + else + partitionsToResetWithoutCommittedOffset.add(topicPartition); + } + + Map preparedOffsetsForPartitionsWithCommittedOffset = partitionsToResetWithCommittedOffset.stream() + .collect(Collectors.toMap(Function.identity(), topicPartition -> { + OffsetAndMetadata committedOffset = currentCommittedOffsets.get(topicPartition); + + if (committedOffset == null) { + throw new IllegalStateException("Expected a valid current offset for topic partition: " + topicPartition); + } + + return new OffsetAndMetadata(committedOffset.offset()); + })); + + Map preparedOffsetsForPartitionsWithoutCommittedOffset = + getLogEndOffsets(partitionsToResetWithoutCommittedOffset) + .entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> { + if (!(e.getValue() instanceof OffsetsUtils.LogOffset)) { + CommandLineUtils.printUsageAndExit(parser, "Error getting ending offset of topic partition: " + e.getKey()); + } + return new OffsetAndMetadata(((OffsetsUtils.LogOffset) e.getValue()).value); + })); + + preparedOffsetsForPartitionsWithCommittedOffset.putAll(preparedOffsetsForPartitionsWithoutCommittedOffset); + + return preparedOffsetsForPartitionsWithCommittedOffset; + } + + + + public static void printError(String msg, Optional e) { + System.out.println("\nError: " + msg); + e.ifPresent(Throwable::printStackTrace); + } + + interface LogOffsetResult { } + + static class LogOffset implements LogOffsetResult { + final long value; + + LogOffset(long value) { + this.value = value; + } + } + + private static class Unknown implements LogOffsetResult { } + + private static class Ignore implements LogOffsetResult { } + + + public static class OffsetsUtilsOptions { + List groupOpt; + List resetToOffsetOpt; + List resetFromFileOpt; + List resetToDatetimeOpt; + List resetByDurationOpt; + List resetShiftByOpt; + + public OffsetsUtilsOptions( + List groupOpt, + List resetToOffsetOpt, + List resetFromFileOpt, + List resetToDatetimeOpt, + List resetByDurationOpt, + List resetShiftByOpt) { + + this.groupOpt = groupOpt; + this.resetToOffsetOpt = resetToOffsetOpt; + this.resetFromFileOpt = resetFromFileOpt; + this.resetToDatetimeOpt = resetToDatetimeOpt; + this.resetByDurationOpt = resetByDurationOpt; + this.resetShiftByOpt = resetShiftByOpt; + } + } +} diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index 0af7767bbbae3..768f8cbfdc880 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -32,7 +32,6 @@ import org.apache.kafka.clients.admin.StreamsGroupMemberAssignment; import org.apache.kafka.clients.admin.StreamsGroupMemberDescription; import org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription; -import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; @@ -43,19 +42,14 @@ import org.apache.kafka.common.errors.GroupNotEmptyException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.errors.UnsupportedVersionException; -import org.apache.kafka.common.requests.ListOffsetsResponse; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.server.util.CommandLineUtils; import org.apache.kafka.tools.consumer.group.CsvUtils; import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; import java.io.IOException; -import java.text.ParseException; -import java.time.Duration; -import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -69,24 +63,17 @@ import java.util.Properties; import java.util.Set; import java.util.concurrent.ExecutionException; -import java.util.function.Function; -import java.util.function.ToIntFunction; import java.util.stream.Collectors; import java.util.stream.Stream; import joptsimple.OptionException; -import static org.apache.kafka.tools.streams.StreamsGroupCommandOptions.LOGGER; - public class StreamsGroupCommand { - private static final String TOPIC_PARTITION_SEPARATOR = ":"; - public static void main(String[] args) { StreamsGroupCommandOptions opts = new StreamsGroupCommandOptions(args); try { opts.checkArgs(); - // should have exactly one action long numberOfActions = Stream.of( opts.listOpt, @@ -164,6 +151,7 @@ public static void printError(String msg, Optional e) { static class StreamsGroupService implements AutoCloseable { final StreamsGroupCommandOptions opts; private final Admin adminClient; + private OffsetsUtils offsetsUtils; public StreamsGroupService(StreamsGroupCommandOptions opts, Map configOverrides) { this.opts = opts; try { @@ -171,6 +159,7 @@ public StreamsGroupService(StreamsGroupCommandOptions opts, Map } catch (IOException e) { throw new RuntimeException(e); } + this.offsetsUtils = new OffsetsUtils(adminClient, opts, opts.parser); } public StreamsGroupService(StreamsGroupCommandOptions opts, Admin adminClient) { @@ -443,7 +432,6 @@ private List filterExistingGroupTopics(String groupId, List> resetOffsets() { // Dry-run is the default behavior if --execute is not specified boolean dryRun = opts.options.has(opts.dryRunOpt) || !opts.options.has(opts.executeOpt); @@ -517,7 +505,6 @@ Map deleteGroups() { if (!groupIds.isEmpty()) { // retrieve internal topics before deleting groups internalTopics = retrieveInternalTopics(groupIds); - // delete streams groups Map> groupsToDelete = adminClient.deleteStreamsGroups( groupIds, @@ -704,7 +691,7 @@ private Collection getPartitionsToReset(String groupId) throws E } else if (opts.options.has(opts.inputTopicOpt)) { List topics = opts.options.valuesOf(opts.inputTopicOpt); - List partitions = parseTopicPartitionsToReset(topics); + List partitions = offsetsUtils.parseTopicPartitionsToReset(topics); // if the user specified topics that do not belong to this group, we filter them out partitions = filterExistingGroupTopics(groupId, partitions); return partitions; @@ -716,336 +703,32 @@ private Collection getPartitionsToReset(String groupId) throws E } } - private List parseTopicPartitionsToReset(List topicArgs) throws ExecutionException, InterruptedException { - List topicsWithPartitions = new ArrayList<>(); - List topics = new ArrayList<>(); - - topicArgs.forEach(topicArg -> { - if (topicArg.contains(TOPIC_PARTITION_SEPARATOR)) - topicsWithPartitions.add(topicArg); - else - topics.add(topicArg); - }); - - List specifiedPartitions = - topicsWithPartitions.stream().flatMap(this::parseTopicsWithPartitions).collect(Collectors.toList()); - - List unspecifiedPartitions = new ArrayList<>(); - - if (!topics.isEmpty()) { - Map descriptionMap = adminClient.describeTopics( - topics - ).allTopicNames().get(); - - descriptionMap.forEach((topic, description) -> - description.partitions().forEach(tpInfo -> unspecifiedPartitions.add(new TopicPartition(topic, tpInfo.partition()))) - ); - } - - specifiedPartitions.addAll(unspecifiedPartitions); - - return specifiedPartitions; - } - - private Stream parseTopicsWithPartitions(String topicArg) { - ToIntFunction partitionNum = partition -> { - try { - return Integer.parseInt(partition); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("Invalid partition '" + partition + "' specified in topic arg '" + topicArg + "''"); - } - }; - - String[] arr = topicArg.split(":"); - - if (arr.length != 2) - throw new IllegalArgumentException("Invalid topic arg '" + topicArg + "', expected topic name and partitions"); - - String topic = arr[0]; - String partitions = arr[1]; - - return Arrays.stream(partitions.split(",")). - map(partition -> new TopicPartition(topic, partitionNum.applyAsInt(partition))); - } - - @SuppressWarnings("CyclomaticComplexity") private Map prepareOffsetsToReset(String groupId, Collection partitionsToReset) { if (opts.options.has(opts.resetToOffsetOpt)) { - long offset = opts.options.valueOf(opts.resetToOffsetOpt); - return checkOffsetsRange(partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), tp -> offset))) - .entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); + return offsetsUtils.resetToOffset(partitionsToReset); } else if (opts.options.has(opts.resetToEarliestOpt)) { - Map logStartOffsets = getLogStartOffsets(partitionsToReset); - return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { - LogOffsetResult logOffsetResult = logStartOffsets.get(topicPartition); - - if (!(logOffsetResult instanceof LogOffset)) { - CommandLineUtils.printUsageAndExit(opts.parser, "Error getting starting offset of topic partition: " + topicPartition); - } - - return new OffsetAndMetadata(((LogOffset) logOffsetResult).value); - })); + return offsetsUtils.resetToEarliest(partitionsToReset); } else if (opts.options.has(opts.resetToLatestOpt)) { - Map logEndOffsets = getLogEndOffsets(partitionsToReset); - return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { - LogOffsetResult logOffsetResult = logEndOffsets.get(topicPartition); - - if (!(logOffsetResult instanceof LogOffset)) { - CommandLineUtils.printUsageAndExit(opts.parser, "Error getting ending offset of topic partition: " + topicPartition); - } - - return new OffsetAndMetadata(((LogOffset) logOffsetResult).value); - })); + return offsetsUtils.resetToLatest(partitionsToReset); } else if (opts.options.has(opts.resetShiftByOpt)) { Map currentCommittedOffsets = getCommittedOffsets(groupId); - Map requestedOffsets = partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { - long shiftBy = opts.options.valueOf(opts.resetShiftByOpt); - OffsetAndMetadata currentOffset = currentCommittedOffsets.get(topicPartition); - - if (currentOffset == null) { - throw new IllegalArgumentException("Cannot shift offset for partition " + topicPartition + " since there is no current committed offset"); - } - - return currentOffset.offset() + shiftBy; - })); - return checkOffsetsRange(requestedOffsets).entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); + return offsetsUtils.resetByShiftBy(partitionsToReset, currentCommittedOffsets); } else if (opts.options.has(opts.resetToDatetimeOpt)) { - try { - long timestamp = Utils.getDateTime(opts.options.valueOf(opts.resetToDatetimeOpt)); - Map logTimestampOffsets = getLogTimestampOffsets(partitionsToReset, timestamp); - return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { - LogOffsetResult logTimestampOffset = logTimestampOffsets.get(topicPartition); - - if (!(logTimestampOffset instanceof LogOffset)) { - CommandLineUtils.printUsageAndExit(opts.parser, "Error getting offset by timestamp of topic partition: " + topicPartition); - } - - return new OffsetAndMetadata(((LogOffset) logTimestampOffset).value); - })); - } catch (ParseException e) { - throw new RuntimeException(e); - } + return offsetsUtils.resetToDateTime(partitionsToReset); } else if (opts.options.has(opts.resetByDurationOpt)) { - String duration = opts.options.valueOf(opts.resetByDurationOpt); - Duration durationParsed = Duration.parse(duration); - Instant now = Instant.now(); - durationParsed.negated().addTo(now); - long timestamp = now.minus(durationParsed).toEpochMilli(); - Map logTimestampOffsets = getLogTimestampOffsets(partitionsToReset, timestamp); - return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { - LogOffsetResult logTimestampOffset = logTimestampOffsets.get(topicPartition); - - if (!(logTimestampOffset instanceof LogOffset)) { - CommandLineUtils.printUsageAndExit(opts.parser, "Error getting offset by timestamp of topic partition: " + topicPartition); - } - - return new OffsetAndMetadata(((LogOffset) logTimestampOffset).value); - })); - } else if (resetPlanFromFile().isPresent()) { - return resetPlanFromFile().map(resetPlan -> { - Map resetPlanForGroup = resetPlan.get(groupId); - - if (resetPlanForGroup == null) { - printError("No reset plan for group " + groupId + " found", Optional.empty()); - return Collections.emptyMap(); - } - - Map requestedOffsets = resetPlanForGroup.keySet().stream().collect(Collectors.toMap( - Function.identity(), - topicPartition -> resetPlanForGroup.get(topicPartition).offset())); - - return checkOffsetsRange(requestedOffsets).entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); - }).orElseGet(Collections::emptyMap); + return offsetsUtils.resetByDuration(partitionsToReset); + } else if (offsetsUtils.resetPlanFromFile().isPresent()) { + return offsetsUtils.resetFromFile(groupId); } else if (opts.options.has(opts.resetToCurrentOpt)) { Map currentCommittedOffsets = getCommittedOffsets(groupId); - Collection partitionsToResetWithCommittedOffset = new ArrayList<>(); - Collection partitionsToResetWithoutCommittedOffset = new ArrayList<>(); - - for (TopicPartition topicPartition : partitionsToReset) { - if (currentCommittedOffsets.containsKey(topicPartition)) - partitionsToResetWithCommittedOffset.add(topicPartition); - else - partitionsToResetWithoutCommittedOffset.add(topicPartition); - } - - Map preparedOffsetsForPartitionsWithCommittedOffset = partitionsToResetWithCommittedOffset.stream() - .collect(Collectors.toMap(Function.identity(), topicPartition -> { - OffsetAndMetadata committedOffset = currentCommittedOffsets.get(topicPartition); - - if (committedOffset == null) { - throw new IllegalStateException("Expected a valid current offset for topic partition: " + topicPartition); - } - - return new OffsetAndMetadata(committedOffset.offset()); - })); - - Map preparedOffsetsForPartitionsWithoutCommittedOffset = getLogEndOffsets(partitionsToResetWithoutCommittedOffset) - .entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> { - if (!(e.getValue() instanceof LogOffset)) { - CommandLineUtils.printUsageAndExit(opts.parser, "Error getting ending offset of topic partition: " + e.getKey()); - } - - return new OffsetAndMetadata(((LogOffset) e.getValue()).value); - })); - - preparedOffsetsForPartitionsWithCommittedOffset.putAll(preparedOffsetsForPartitionsWithoutCommittedOffset); - - return preparedOffsetsForPartitionsWithCommittedOffset; + return offsetsUtils.resetToCurrent(partitionsToReset, currentCommittedOffsets); } - CommandLineUtils.printUsageAndExit(opts.parser, String.format("Option '%s' requires one of the following scenarios: %s", opts.resetOffsetsOpt, opts.allResetOffsetScenarioOpts)); + CommandLineUtils + .printUsageAndExit(opts.parser, String.format("Option '%s' requires one of the following scenarios: %s", opts.resetOffsetsOpt, opts.allResetOffsetScenarioOpts)); return null; } - Optional>> resetPlanFromFile() { - if (opts.options.has(opts.resetFromFileOpt)) { - try { - String resetPlanPath = opts.options.valueOf(opts.resetFromFileOpt); - String resetPlanCsv = Utils.readFileAsString(resetPlanPath); - Map> resetPlan = parseResetPlan(resetPlanCsv); - return Optional.of(resetPlan); - } catch (IOException e) { - throw new RuntimeException(e); - } - } else return Optional.empty(); - } - - private Map> parseResetPlan(String resetPlanCsv) { - ObjectReader csvReader = CsvUtils.readerFor(CsvUtils.CsvRecordNoGroup.class); - String[] lines = resetPlanCsv.split("\n"); - boolean isSingleGroupQuery = opts.options.valuesOf(opts.groupOpt).size() == 1; - boolean isOldCsvFormat = false; - try { - if (lines.length > 0) { - csvReader.readValue(lines[0], CsvUtils.CsvRecordNoGroup.class); - isOldCsvFormat = true; - } - } catch (IOException e) { - throw new RuntimeException("Invalid CSV format in reset plan file: " + e.getMessage()); - } - - Map> dataMap = new HashMap<>(); - - try { - // Single group CSV format: "topic,partition,offset" - if (isSingleGroupQuery && isOldCsvFormat) { - String group = opts.options.valueOf(opts.groupOpt); - for (String line : lines) { - CsvUtils.CsvRecordNoGroup rec = csvReader.readValue(line, CsvUtils.CsvRecordNoGroup.class); - dataMap.computeIfAbsent(group, k -> new HashMap<>()) - .put(new TopicPartition(rec.getTopic(), rec.getPartition()), new OffsetAndMetadata(rec.getOffset())); - } - } else { - csvReader = CsvUtils.readerFor(CsvUtils.CsvRecordWithGroup.class); - for (String line : lines) { - CsvUtils.CsvRecordWithGroup rec = csvReader.readValue(line, CsvUtils.CsvRecordWithGroup.class); - dataMap.computeIfAbsent(rec.getGroup(), k -> new HashMap<>()) - .put(new TopicPartition(rec.getTopic(), rec.getPartition()), new OffsetAndMetadata(rec.getOffset())); - } - } - } catch (IOException e) { - throw new RuntimeException(e); - } - - return dataMap; - } - - private Map checkOffsetsRange(Map requestedOffsets) { - Map logStartOffsets = getLogStartOffsets(requestedOffsets.keySet()); - Map logEndOffsets = getLogEndOffsets(requestedOffsets.keySet()); - - Map res = new HashMap<>(); - - requestedOffsets.forEach((topicPartition, offset) -> { - LogOffsetResult logEndOffset = logEndOffsets.get(topicPartition); - - if (logEndOffset != null) { - if (logEndOffset instanceof LogOffset && offset > ((LogOffset) logEndOffset).value) { - long endOffset = ((LogOffset) logEndOffset).value; - LOGGER.warn("New offset (" + offset + ") is higher than latest offset for topic partition " + topicPartition + ". Value will be set to " + endOffset); - res.put(topicPartition, endOffset); - } else { - LogOffsetResult logStartOffset = logStartOffsets.get(topicPartition); - - if (logStartOffset instanceof LogOffset && offset < ((LogOffset) logStartOffset).value) { - long startOffset = ((LogOffset) logStartOffset).value; - LOGGER.warn("New offset (" + offset + ") is lower than earliest offset for topic partition " + topicPartition + ". Value will be set to " + startOffset); - res.put(topicPartition, startOffset); - } else - res.put(topicPartition, offset); - } - } else { - // the control should not reach here - throw new IllegalStateException("Unexpected non-existing offset value for topic partition " + topicPartition); - } - }); - - return res; - } - - private Map getLogTimestampOffsets(Collection topicPartitions, long timestamp) { - try { - Map timestampOffsets = topicPartitions.stream() - .collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.forTimestamp(timestamp))); - - Map offsets = adminClient.listOffsets( - timestampOffsets).all().get(); - - Map successfulOffsetsForTimes = new HashMap<>(); - Map unsuccessfulOffsetsForTimes = new HashMap<>(); - - offsets.forEach((tp, offsetsResultInfo) -> { - if (offsetsResultInfo.offset() != ListOffsetsResponse.UNKNOWN_OFFSET) - successfulOffsetsForTimes.put(tp, offsetsResultInfo); - else - unsuccessfulOffsetsForTimes.put(tp, offsetsResultInfo); - }); - - Map successfulLogTimestampOffsets = successfulOffsetsForTimes.entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> new LogOffset(e.getValue().offset()))); - - unsuccessfulOffsetsForTimes.forEach((tp, offsetResultInfo) -> - System.out.println("\nWarn: Partition " + tp.partition() + " from topic " + tp.topic() + - " is empty. Falling back to latest known offset.")); - - successfulLogTimestampOffsets.putAll(getLogEndOffsets(unsuccessfulOffsetsForTimes.keySet())); - - return successfulLogTimestampOffsets; - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } - } - - private Map getLogStartOffsets(Collection topicPartitions) { - return getLogOffsets(topicPartitions, OffsetSpec.earliest()); - } - - private Map getLogEndOffsets(Collection topicPartitions) { - return getLogOffsets(topicPartitions, OffsetSpec.latest()); - } - - private Map getLogOffsets(Collection topicPartitions, OffsetSpec offsetSpec) { - try { - Map startOffsets = topicPartitions.stream() - .collect(Collectors.toMap(Function.identity(), tp -> offsetSpec)); - - Map offsets = adminClient.listOffsets( - startOffsets - ).all().get(); - - return topicPartitions.stream().collect(Collectors.toMap( - Function.identity(), - tp -> offsets.containsKey(tp) - ? new LogOffset(offsets.get(tp).offset()) - : new Unknown() - )); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } - } - private boolean isInferredInternalTopic(final String topicName, final String applicationId) { return topicName.startsWith(applicationId + "-") && matchesInternalTopicFormat(topicName); } @@ -1087,20 +770,6 @@ private > T withTimeoutMs(T options) { return options.timeoutMs(t); } - interface LogOffsetResult { } - - private static class LogOffset implements LogOffsetResult { - final long value; - - LogOffset(long value) { - this.value = value; - } - } - - private static class Unknown implements LogOffsetResult { } - - private static class Ignore implements LogOffsetResult { } - /** * Prints an error message if the group state indicates that the group is either dead or empty. * From 024d3feb6b3f66b8f3a16f45fbadcb5165d1acd8 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Sat, 7 Jun 2025 20:07:09 +0200 Subject: [PATCH 19/26] ListGroupsResult should be package private --- .../apache/kafka/clients/admin/ListGroupsResult.java | 2 +- .../kafka/tools/streams/StreamsGroupCommand.java | 3 ++- .../kafka/tools/streams/StreamsGroupCommandTest.java | 12 +++++++----- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsResult.java index 795d8523d52cf..b19c3e38e9cf5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsResult.java @@ -37,7 +37,7 @@ public class ListGroupsResult { private final KafkaFutureImpl> valid; private final KafkaFutureImpl> errors; - public ListGroupsResult(KafkaFuture> future) { + ListGroupsResult(KafkaFuture> future) { this.all = new KafkaFutureImpl<>(); this.valid = new KafkaFutureImpl<>(); this.errors = new KafkaFutureImpl<>(); diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index 768f8cbfdc880..34f90dfb8cfec 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -151,7 +151,7 @@ public static void printError(String msg, Optional e) { static class StreamsGroupService implements AutoCloseable { final StreamsGroupCommandOptions opts; private final Admin adminClient; - private OffsetsUtils offsetsUtils; + private final OffsetsUtils offsetsUtils; public StreamsGroupService(StreamsGroupCommandOptions opts, Map configOverrides) { this.opts = opts; try { @@ -165,6 +165,7 @@ public StreamsGroupService(StreamsGroupCommandOptions opts, Map public StreamsGroupService(StreamsGroupCommandOptions opts, Admin adminClient) { this.opts = opts; this.adminClient = adminClient; + this.offsetsUtils = new OffsetsUtils(adminClient, opts, opts.parser); } public void listGroups() throws ExecutionException, InterruptedException { diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java index acccf7ff6634a..41aaafddb1632 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java @@ -298,7 +298,8 @@ public void testAdminRequestsForResetOffsets() { .thenReturn(describeTopicsResult(topics, 1)); when(adminClient.listOffsets(any())) .thenReturn(listOffsetsResult()); - when(adminClient.listGroups(any())).thenReturn(listGroupResult(groupId)); + ListGroupsResult listGroupsResult = listGroupResult(groupId); + when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(listGroupsResult); ListStreamsGroupOffsetsResult result = mock(ListStreamsGroupOffsetsResult.class); Map committedOffsetsMap = new HashMap<>(); committedOffsetsMap.put(new TopicPartition("topic1", 0), mock(OffsetAndMetadata.class)); @@ -423,10 +424,11 @@ public void testDeleteNonStreamsGroup() { } private ListGroupsResult listGroupResult(String groupId) { - KafkaFutureImpl> future = new KafkaFutureImpl<>(); - GroupListing groupListing = new GroupListing(groupId, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.DEAD)); - future.complete(List.of(groupListing)); - return new ListGroupsResult(future); + ListGroupsResult listGroupsResult = mock(ListGroupsResult.class); + when(listGroupsResult.all()).thenReturn(KafkaFuture.completedFuture(List.of( + new GroupListing(groupId, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.DEAD)) + ))); + return listGroupsResult; } StreamsGroupCommand.StreamsGroupService getStreamsGroupService(String[] args) { From def165f97e70a44a95942d72d8989090481208e6 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Wed, 11 Jun 2025 19:53:45 +0200 Subject: [PATCH 20/26] complete refactoring --- .../tools/{streams => }/OffsetsUtils.java | 109 ++--- .../consumer/group/ConsumerGroupCommand.java | 375 ++---------------- .../tools/streams/StreamsGroupCommand.java | 17 +- .../streams/StreamsGroupCommandOptions.java | 12 +- .../streams/DescribeStreamsGroupTest.java | 22 +- .../streams/StreamsGroupCommandTest.java | 22 +- 6 files changed, 136 insertions(+), 421 deletions(-) rename tools/src/main/java/org/apache/kafka/tools/{streams => }/OffsetsUtils.java (83%) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/OffsetsUtils.java b/tools/src/main/java/org/apache/kafka/tools/OffsetsUtils.java similarity index 83% rename from tools/src/main/java/org/apache/kafka/tools/streams/OffsetsUtils.java rename to tools/src/main/java/org/apache/kafka/tools/OffsetsUtils.java index 09fb24578a5e7..4cbb3329aa188 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/OffsetsUtils.java +++ b/tools/src/main/java/org/apache/kafka/tools/OffsetsUtils.java @@ -14,9 +14,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.kafka.tools.streams; +package org.apache.kafka.tools; +import org.apache.kafka.clients.admin.AbstractOptions; import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.DescribeTopicsOptions; +import org.apache.kafka.clients.admin.ListOffsetsOptions; import org.apache.kafka.clients.admin.ListOffsetsResult; import org.apache.kafka.clients.admin.OffsetSpec; import org.apache.kafka.clients.admin.TopicDescription; @@ -29,6 +32,9 @@ import com.fasterxml.jackson.databind.ObjectReader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.IOException; import java.text.ParseException; import java.time.Duration; @@ -49,31 +55,21 @@ import joptsimple.OptionParser; -import static org.apache.kafka.tools.streams.StreamsGroupCommandOptions.LOGGER; public class OffsetsUtils { + public static final Logger LOGGER = LoggerFactory.getLogger(OffsetsUtils.class); private static final String TOPIC_PARTITION_SEPARATOR = ":"; private final Admin adminClient; private final OffsetsUtilsOptions opts; private final OptionParser parser; - public OffsetsUtils(Admin adminClient, StreamsGroupCommandOptions options, OptionParser parser) { + + public OffsetsUtils(Admin adminClient, OptionParser parser, OffsetsUtilsOptions opts) { this.adminClient = adminClient; - this.opts = fromStreamsGroupCommandOptions(options); + this.opts = opts; this.parser = parser; } - private OffsetsUtilsOptions fromStreamsGroupCommandOptions(StreamsGroupCommandOptions opts) { - return new OffsetsUtilsOptions( - opts.options.has(opts.groupOpt) ? opts.options.valuesOf(opts.groupOpt) : null, - opts.options.has(opts.resetToOffsetOpt) ? opts.options.valuesOf(opts.resetToOffsetOpt) : null, - opts.options.has(opts.resetFromFileOpt) ? opts.options.valuesOf(opts.resetFromFileOpt) : null, - opts.options.has(opts.resetToDatetimeOpt) ? opts.options.valuesOf(opts.resetToDatetimeOpt) : null, - opts.options.has(opts.resetByDurationOpt) ? opts.options.valuesOf(opts.resetByDurationOpt) : null, - opts.options.has(opts.resetShiftByOpt) ? opts.options.valuesOf(opts.resetShiftByOpt) : null - ); - } - - Optional>> resetPlanFromFile() { + public Optional>> resetPlanFromFile() { if (opts.resetFromFileOpt != null && !opts.resetFromFileOpt.isEmpty()) { try { String resetPlanPath = opts.resetFromFileOpt.get(0); @@ -97,7 +93,8 @@ private Map> parseResetPlan(Strin isOldCsvFormat = true; } } catch (IOException e) { - throw new RuntimeException("Invalid CSV format in reset plan file: " + e.getMessage()); + e.printStackTrace(); + // Ignore. } Map> dataMap = new HashMap<>(); @@ -126,7 +123,7 @@ private Map> parseResetPlan(Strin return dataMap; } - Map checkOffsetsRange(Map requestedOffsets) { + private Map checkOffsetsRange(Map requestedOffsets) { Map logStartOffsets = getLogStartOffsets(requestedOffsets.keySet()); Map logEndOffsets = getLogEndOffsets(requestedOffsets.keySet()); @@ -159,13 +156,15 @@ Map checkOffsetsRange(Map requestedO return res; } - Map getLogTimestampOffsets(Collection topicPartitions, long timestamp) { + private Map getLogTimestampOffsets(Collection topicPartitions, long timestamp) { try { Map timestampOffsets = topicPartitions.stream() .collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.forTimestamp(timestamp))); Map offsets = adminClient.listOffsets( - timestampOffsets).all().get(); + timestampOffsets, + withTimeoutMs(new ListOffsetsOptions()) + ).all().get(); Map successfulOffsetsForTimes = new HashMap<>(); Map unsuccessfulOffsetsForTimes = new HashMap<>(); @@ -192,21 +191,22 @@ Map getLogTimestampOffsets(Collection getLogStartOffsets(Collection topicPartitions) { + private Map getLogStartOffsets(Collection topicPartitions) { return getLogOffsets(topicPartitions, OffsetSpec.earliest()); } - Map getLogEndOffsets(Collection topicPartitions) { + public Map getLogEndOffsets(Collection topicPartitions) { return getLogOffsets(topicPartitions, OffsetSpec.latest()); } - private Map getLogOffsets(Collection topicPartitions, OffsetSpec offsetSpec) { + public Map getLogOffsets(Collection topicPartitions, OffsetSpec offsetSpec) { try { Map startOffsets = topicPartitions.stream() .collect(Collectors.toMap(Function.identity(), tp -> offsetSpec)); Map offsets = adminClient.listOffsets( - startOffsets + startOffsets, + withTimeoutMs(new ListOffsetsOptions()) ).all().get(); return topicPartitions.stream().collect(Collectors.toMap( @@ -220,7 +220,7 @@ private Map getLogOffsets(Collection parseTopicPartitionsToReset(List topicArgs) throws ExecutionException, InterruptedException { + public List parseTopicPartitionsToReset(List topicArgs) throws ExecutionException, InterruptedException { List topicsWithPartitions = new ArrayList<>(); List topics = new ArrayList<>(); @@ -238,7 +238,8 @@ List parseTopicPartitionsToReset(List topicArgs) throws if (!topics.isEmpty()) { Map descriptionMap = adminClient.describeTopics( - topics + topics, + withTimeoutMs(new DescribeTopicsOptions()) ).allTopicNames().get(); descriptionMap.forEach((topic, description) -> @@ -251,7 +252,7 @@ List parseTopicPartitionsToReset(List topicArgs) throws return specifiedPartitions; } - private Stream parseTopicsWithPartitions(String topicArg) { + public Stream parseTopicsWithPartitions(String topicArg) { ToIntFunction partitionNum = partition -> { try { return Integer.parseInt(partition); @@ -272,7 +273,7 @@ private Stream parseTopicsWithPartitions(String topicArg) { map(partition -> new TopicPartition(topic, partitionNum.applyAsInt(partition))); } - Map resetToOffset(Collection partitionsToReset) { + public Map resetToOffset(Collection partitionsToReset) { long offset = opts.resetToOffsetOpt != null && !opts.resetToOffsetOpt.isEmpty() ? opts.resetToOffsetOpt.get(0) : 0L; @@ -280,7 +281,7 @@ Map resetToOffset(Collection .entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); } - Map resetToEarliest(Collection partitionsToReset) { + public Map resetToEarliest(Collection partitionsToReset) { Map logStartOffsets = getLogStartOffsets(partitionsToReset); return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { LogOffsetResult logOffsetResult = logStartOffsets.get(topicPartition); @@ -293,7 +294,7 @@ Map resetToEarliest(Collection resetToLatest(Collection partitionsToReset) { + public Map resetToLatest(Collection partitionsToReset) { Map logEndOffsets = getLogEndOffsets(partitionsToReset); return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { LogOffsetResult logOffsetResult = logEndOffsets.get(topicPartition); @@ -306,12 +307,12 @@ Map resetToLatest(Collection })); } - Map resetByShiftBy( + public Map resetByShiftBy( Collection partitionsToReset, Map currentCommittedOffsets) { Map requestedOffsets = partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { - long shiftBy = opts.resetShiftByOpt.get(0); + long shiftBy = opts.resetShiftByOpt; OffsetAndMetadata currentOffset = currentCommittedOffsets.get(topicPartition); if (currentOffset == null) { @@ -324,18 +325,16 @@ Map resetByShiftBy( .collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); } - Map resetToDateTime(Collection partitionsToReset) { + public Map resetToDateTime(Collection partitionsToReset) { try { long timestamp = Utils.getDateTime(opts.resetToDatetimeOpt.get(0)); Map logTimestampOffsets = getLogTimestampOffsets(partitionsToReset, timestamp); return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { LogOffsetResult logTimestampOffset = logTimestampOffsets.get(topicPartition); - if (!(logTimestampOffset instanceof LogOffset)) { CommandLineUtils.printUsageAndExit(parser, "Error getting offset by timestamp of topic partition: " + topicPartition); } - return new OffsetAndMetadata(((LogOffset) logTimestampOffset).value); })); } catch (ParseException e) { @@ -343,8 +342,8 @@ Map resetToDateTime(Collection resetByDuration(Collection partitionsToReset) { - String duration = opts.resetByDurationOpt.get(0); + public Map resetByDuration(Collection partitionsToReset) { + String duration = opts.resetByDurationOpt; Duration durationParsed = Duration.parse(duration); Instant now = Instant.now(); durationParsed.negated().addTo(now); @@ -362,7 +361,7 @@ Map resetByDuration(Collection resetFromFile(String groupId) { + public Map resetFromFile(String groupId) { return resetPlanFromFile().map(resetPlan -> { Map resetPlanForGroup = resetPlan.get(groupId); @@ -380,7 +379,7 @@ Map resetFromFile(String groupId) { }).orElseGet(Collections::emptyMap); } - Map resetToCurrent(Collection partitionsToReset, Map currentCommittedOffsets) { + public Map resetToCurrent(Collection partitionsToReset, Map currentCommittedOffsets) { Collection partitionsToResetWithCommittedOffset = new ArrayList<>(); Collection partitionsToResetWithoutCommittedOffset = new ArrayList<>(); @@ -416,26 +415,33 @@ Map resetToCurrent(Collection return preparedOffsetsForPartitionsWithCommittedOffset; } + private > T withTimeoutMs(T options) { + int t = (int) opts.timeoutMsOpt; + return options.timeoutMs(t); + } - - public static void printError(String msg, Optional e) { + private static void printError(String msg, Optional e) { System.out.println("\nError: " + msg); e.ifPresent(Throwable::printStackTrace); } - interface LogOffsetResult { } + public interface LogOffsetResult { } - static class LogOffset implements LogOffsetResult { + public static class LogOffset implements LogOffsetResult { final long value; - LogOffset(long value) { + public LogOffset(long value) { this.value = value; } + + public long value() { + return value; + } } - private static class Unknown implements LogOffsetResult { } + public static class Unknown implements LogOffsetResult { } - private static class Ignore implements LogOffsetResult { } + public static class Ignore implements LogOffsetResult { } public static class OffsetsUtilsOptions { @@ -443,16 +449,18 @@ public static class OffsetsUtilsOptions { List resetToOffsetOpt; List resetFromFileOpt; List resetToDatetimeOpt; - List resetByDurationOpt; - List resetShiftByOpt; + String resetByDurationOpt; + Long resetShiftByOpt; + long timeoutMsOpt; public OffsetsUtilsOptions( List groupOpt, List resetToOffsetOpt, List resetFromFileOpt, List resetToDatetimeOpt, - List resetByDurationOpt, - List resetShiftByOpt) { + String resetByDurationOpt, + Long resetShiftByOpt, + long timeoutMsOpt) { this.groupOpt = groupOpt; this.resetToOffsetOpt = resetToOffsetOpt; @@ -460,6 +468,7 @@ public OffsetsUtilsOptions( this.resetToDatetimeOpt = resetToDatetimeOpt; this.resetByDurationOpt = resetByDurationOpt; this.resetShiftByOpt = resetShiftByOpt; + this.timeoutMsOpt = timeoutMsOpt; } } } diff --git a/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java index 51ba29974fa35..bd0f2dfb7f03f 100644 --- a/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java @@ -32,11 +32,7 @@ import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec; import org.apache.kafka.clients.admin.ListGroupsOptions; import org.apache.kafka.clients.admin.ListGroupsResult; -import org.apache.kafka.clients.admin.ListOffsetsOptions; -import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo; import org.apache.kafka.clients.admin.MemberDescription; -import org.apache.kafka.clients.admin.OffsetSpec; -import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; @@ -46,12 +42,11 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.GroupIdNotFoundException; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.ListOffsetsResponse; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.server.util.CommandLineUtils; +import org.apache.kafka.tools.OffsetsUtils; import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; import com.google.re2j.Pattern; import com.google.re2j.PatternSyntaxException; @@ -60,9 +55,6 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.text.ParseException; -import java.time.Duration; -import java.time.Instant; import java.util.AbstractMap.SimpleImmutableEntry; import java.util.ArrayList; import java.util.Arrays; @@ -83,7 +75,6 @@ import java.util.concurrent.ExecutionException; import java.util.function.BiFunction; import java.util.function.Function; -import java.util.function.ToIntFunction; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -211,6 +202,7 @@ static class ConsumerGroupService implements AutoCloseable { final ConsumerGroupCommandOptions opts; final Map configOverrides; private final Admin adminClient; + private final OffsetsUtils offsetsUtils; ConsumerGroupService(ConsumerGroupCommandOptions opts, Map configOverrides) { this.opts = opts; @@ -220,19 +212,18 @@ static class ConsumerGroupService implements AutoCloseable { } catch (IOException e) { throw new RuntimeException(e); } + this.offsetsUtils = new OffsetsUtils(adminClient, opts.parser, getOffsetsUtilsOptions(opts)); } - Optional>> resetPlanFromFile() { - if (opts.options.has(opts.resetFromFileOpt)) { - try { - String resetPlanPath = opts.options.valueOf(opts.resetFromFileOpt); - String resetPlanCsv = Utils.readFileAsString(resetPlanPath); - Map> resetPlan = parseResetPlan(resetPlanCsv); - return Optional.of(resetPlan); - } catch (IOException e) { - throw new RuntimeException(e); - } - } else return Optional.empty(); + private OffsetsUtils.OffsetsUtilsOptions getOffsetsUtilsOptions(ConsumerGroupCommandOptions opts) { + return + new OffsetsUtils.OffsetsUtilsOptions(opts.options.valuesOf(opts.groupOpt), + opts.options.valuesOf(opts.resetToOffsetOpt), + opts.options.valuesOf(opts.resetFromFileOpt), + opts.options.valuesOf(opts.resetToDatetimeOpt), + opts.options.valueOf(opts.resetByDurationOpt), + opts.options.valueOf(opts.resetShiftByOpt), + opts.options.valueOf(opts.timeoutMsOpt)); } void listGroups() throws ExecutionException, InterruptedException { @@ -628,15 +619,15 @@ private Collection describePartitions( consumerIdOpt, hostOpt, clientIdOpt, logEndOffsetOpt, leaderEpoch); }; - return getLogEndOffsets(topicPartitions).entrySet().stream().map(logEndOffsetResult -> { - if (logEndOffsetResult.getValue() instanceof LogOffset) + return offsetsUtils.getLogEndOffsets(topicPartitions).entrySet().stream().map(logEndOffsetResult -> { + if (logEndOffsetResult.getValue() instanceof OffsetsUtils.LogOffset) return getDescribePartitionResult.apply( logEndOffsetResult.getKey(), - Optional.of(((LogOffset) logEndOffsetResult.getValue()).value) + Optional.of(((OffsetsUtils.LogOffset) logEndOffsetResult.getValue()).value()) ); - else if (logEndOffsetResult.getValue() instanceof Unknown) + else if (logEndOffsetResult.getValue() instanceof OffsetsUtils.Unknown) return getDescribePartitionResult.apply(logEndOffsetResult.getKey(), Optional.empty()); - else if (logEndOffsetResult.getValue() instanceof Ignore) + else if (logEndOffsetResult.getValue() instanceof OffsetsUtils.Ignore) return null; throw new IllegalStateException("Unknown LogOffset subclass: " + logEndOffsetResult.getValue()); @@ -721,7 +712,7 @@ Entry> deleteOffsets(String groupId, List topicWithoutPartitions.add(topic); } - List knownPartitions = topicWithPartitions.stream().flatMap(this::parseTopicsWithPartitions).collect(Collectors.toList()); + List knownPartitions = topicWithPartitions.stream().flatMap(offsetsUtils::parseTopicsWithPartitions).toList(); // Get the partitions of topics that the user did not explicitly specify the partitions DescribeTopicsResult describeTopicsResult = adminClient.describeTopics( @@ -939,70 +930,6 @@ TreeMap collectGroupsState(Collection groupIds return res; } - private Map getLogEndOffsets(Collection topicPartitions) { - return getLogOffsets(topicPartitions, OffsetSpec.latest()); - } - - private Map getLogStartOffsets(Collection topicPartitions) { - return getLogOffsets(topicPartitions, OffsetSpec.earliest()); - } - - private Map getLogOffsets(Collection topicPartitions, OffsetSpec offsetSpec) { - try { - Map startOffsets = topicPartitions.stream() - .collect(Collectors.toMap(Function.identity(), tp -> offsetSpec)); - - Map offsets = adminClient.listOffsets( - startOffsets, - withTimeoutMs(new ListOffsetsOptions()) - ).all().get(); - - return topicPartitions.stream().collect(Collectors.toMap( - Function.identity(), - tp -> offsets.containsKey(tp) - ? new LogOffset(offsets.get(tp).offset()) - : new Unknown() - )); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } - } - - private Map getLogTimestampOffsets(Collection topicPartitions, long timestamp) { - try { - Map timestampOffsets = topicPartitions.stream() - .collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.forTimestamp(timestamp))); - - Map offsets = adminClient.listOffsets( - timestampOffsets, - withTimeoutMs(new ListOffsetsOptions()) - ).all().get(); - - Map successfulOffsetsForTimes = new HashMap<>(); - Map unsuccessfulOffsetsForTimes = new HashMap<>(); - - offsets.forEach((tp, offsetsResultInfo) -> { - if (offsetsResultInfo.offset() != ListOffsetsResponse.UNKNOWN_OFFSET) - successfulOffsetsForTimes.put(tp, offsetsResultInfo); - else - unsuccessfulOffsetsForTimes.put(tp, offsetsResultInfo); - }); - - Map successfulLogTimestampOffsets = successfulOffsetsForTimes.entrySet().stream() - .collect(Collectors.toMap(Entry::getKey, e -> new LogOffset(e.getValue().offset()))); - - unsuccessfulOffsetsForTimes.forEach((tp, offsetResultInfo) -> - System.out.println("\nWarn: Partition " + tp.partition() + " from topic " + tp.topic() + - " is empty. Falling back to latest known offset.")); - - successfulLogTimestampOffsets.putAll(getLogEndOffsets(unsuccessfulOffsetsForTimes.keySet())); - - return successfulLogTimestampOffsets; - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } - } - @Override public void close() { adminClient.close(); @@ -1021,64 +948,12 @@ private > T withTimeoutMs(T options) { return options.timeoutMs(t); } - private Stream parseTopicsWithPartitions(String topicArg) { - ToIntFunction partitionNum = partition -> { - try { - return Integer.parseInt(partition); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("Invalid partition '" + partition + "' specified in topic arg '" + topicArg + "''"); - } - }; - - String[] arr = topicArg.split(":"); - - if (arr.length != 2) - throw new IllegalArgumentException("Invalid topic arg '" + topicArg + "', expected topic name and partitions"); - - String topic = arr[0]; - String partitions = arr[1]; - - return Arrays.stream(partitions.split(",")). - map(partition -> new TopicPartition(topic, partitionNum.applyAsInt(partition))); - } - - private List parseTopicPartitionsToReset(List topicArgs) throws ExecutionException, InterruptedException { - List topicsWithPartitions = new ArrayList<>(); - List topics = new ArrayList<>(); - - topicArgs.forEach(topicArg -> { - if (topicArg.contains(":")) - topicsWithPartitions.add(topicArg); - else - topics.add(topicArg); - }); - - List specifiedPartitions = topicsWithPartitions.stream().flatMap(this::parseTopicsWithPartitions).collect(Collectors.toList()); - - List unspecifiedPartitions = new ArrayList<>(); - - if (!topics.isEmpty()) { - Map descriptionMap = adminClient.describeTopics( - topics, - withTimeoutMs(new DescribeTopicsOptions()) - ).allTopicNames().get(); - - descriptionMap.forEach((topic, description) -> - description.partitions().forEach(tpInfo -> unspecifiedPartitions.add(new TopicPartition(topic, tpInfo.partition()))) - ); - } - - specifiedPartitions.addAll(unspecifiedPartitions); - - return specifiedPartitions; - } - private Collection getPartitionsToReset(String groupId) throws ExecutionException, InterruptedException { if (opts.options.has(opts.allTopicsOpt)) { return getCommittedOffsets(groupId).keySet(); } else if (opts.options.has(opts.topicOpt)) { List topics = opts.options.valuesOf(opts.topicOpt); - return parseTopicPartitionsToReset(topics); + return offsetsUtils.parseTopicPartitionsToReset(topics); } else { if (!opts.options.has(opts.resetFromFileOpt)) CommandLineUtils.printUsageAndExit(opts.parser, "One of the reset scopes should be defined: --all-topics, --topic."); @@ -1098,211 +973,31 @@ private Map getCommittedOffsets(String groupI } } - private Map> parseResetPlan(String resetPlanCsv) { - ObjectReader csvReader = CsvUtils.readerFor(CsvUtils.CsvRecordNoGroup.class); - String[] lines = resetPlanCsv.split("\n"); - boolean isSingleGroupQuery = opts.options.valuesOf(opts.groupOpt).size() == 1; - boolean isOldCsvFormat = false; - try { - if (lines.length > 0) { - csvReader.readValue(lines[0], CsvUtils.CsvRecordNoGroup.class); - isOldCsvFormat = true; - } - } catch (IOException e) { - e.printStackTrace(); - // Ignore. - } - - Map> dataMap = new HashMap<>(); - - try { - // Single group CSV format: "topic,partition,offset" - if (isSingleGroupQuery && isOldCsvFormat) { - String group = opts.options.valueOf(opts.groupOpt); - for (String line : lines) { - CsvUtils.CsvRecordNoGroup rec = csvReader.readValue(line, CsvUtils.CsvRecordNoGroup.class); - dataMap.computeIfAbsent(group, k -> new HashMap<>()) - .put(new TopicPartition(rec.getTopic(), rec.getPartition()), new OffsetAndMetadata(rec.getOffset())); - } - } else { - csvReader = CsvUtils.readerFor(CsvUtils.CsvRecordWithGroup.class); - for (String line : lines) { - CsvUtils.CsvRecordWithGroup rec = csvReader.readValue(line, CsvUtils.CsvRecordWithGroup.class); - dataMap.computeIfAbsent(rec.getGroup(), k -> new HashMap<>()) - .put(new TopicPartition(rec.getTopic(), rec.getPartition()), new OffsetAndMetadata(rec.getOffset())); - } - } - } catch (IOException e) { - throw new RuntimeException(e); - } - - return dataMap; - } - - @SuppressWarnings("CyclomaticComplexity") private Map prepareOffsetsToReset(String groupId, Collection partitionsToReset) { if (opts.options.has(opts.resetToOffsetOpt)) { - long offset = opts.options.valueOf(opts.resetToOffsetOpt); - return checkOffsetsRange(partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), tp -> offset))) - .entrySet().stream().collect(Collectors.toMap(Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); + return offsetsUtils.resetToOffset(partitionsToReset); } else if (opts.options.has(opts.resetToEarliestOpt)) { - Map logStartOffsets = getLogStartOffsets(partitionsToReset); - return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { - LogOffsetResult logOffsetResult = logStartOffsets.get(topicPartition); - - if (!(logOffsetResult instanceof LogOffset)) { - CommandLineUtils.printUsageAndExit(opts.parser, "Error getting starting offset of topic partition: " + topicPartition); - } - - return new OffsetAndMetadata(((LogOffset) logOffsetResult).value); - })); + return offsetsUtils.resetToEarliest(partitionsToReset); } else if (opts.options.has(opts.resetToLatestOpt)) { - Map logEndOffsets = getLogEndOffsets(partitionsToReset); - return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { - LogOffsetResult logOffsetResult = logEndOffsets.get(topicPartition); - - if (!(logOffsetResult instanceof LogOffset)) { - CommandLineUtils.printUsageAndExit(opts.parser, "Error getting ending offset of topic partition: " + topicPartition); - } - - return new OffsetAndMetadata(((LogOffset) logOffsetResult).value); - })); + return offsetsUtils.resetToLatest(partitionsToReset); } else if (opts.options.has(opts.resetShiftByOpt)) { Map currentCommittedOffsets = getCommittedOffsets(groupId); - Map requestedOffsets = partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { - long shiftBy = opts.options.valueOf(opts.resetShiftByOpt); - OffsetAndMetadata currentOffset = currentCommittedOffsets.get(topicPartition); - - if (currentOffset == null) { - throw new IllegalArgumentException("Cannot shift offset for partition " + topicPartition + " since there is no current committed offset"); - } - - return currentOffset.offset() + shiftBy; - })); - return checkOffsetsRange(requestedOffsets).entrySet().stream() - .collect(Collectors.toMap(Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); + return offsetsUtils.resetByShiftBy(partitionsToReset, currentCommittedOffsets); } else if (opts.options.has(opts.resetToDatetimeOpt)) { - try { - long timestamp = Utils.getDateTime(opts.options.valueOf(opts.resetToDatetimeOpt)); - Map logTimestampOffsets = getLogTimestampOffsets(partitionsToReset, timestamp); - return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { - LogOffsetResult logTimestampOffset = logTimestampOffsets.get(topicPartition); - - if (!(logTimestampOffset instanceof LogOffset)) { - CommandLineUtils.printUsageAndExit(opts.parser, "Error getting offset by timestamp of topic partition: " + topicPartition); - } - - return new OffsetAndMetadata(((LogOffset) logTimestampOffset).value); - })); - } catch (ParseException e) { - throw new RuntimeException(e); - } + return offsetsUtils.resetToDateTime(partitionsToReset); } else if (opts.options.has(opts.resetByDurationOpt)) { - String duration = opts.options.valueOf(opts.resetByDurationOpt); - Duration durationParsed = Duration.parse(duration); - Instant now = Instant.now(); - durationParsed.negated().addTo(now); - long timestamp = now.minus(durationParsed).toEpochMilli(); - Map logTimestampOffsets = getLogTimestampOffsets(partitionsToReset, timestamp); - return partitionsToReset.stream().collect(Collectors.toMap(Function.identity(), topicPartition -> { - LogOffsetResult logTimestampOffset = logTimestampOffsets.get(topicPartition); - - if (!(logTimestampOffset instanceof LogOffset)) { - CommandLineUtils.printUsageAndExit(opts.parser, "Error getting offset by timestamp of topic partition: " + topicPartition); - } - - return new OffsetAndMetadata(((LogOffset) logTimestampOffset).value); - })); - } else if (resetPlanFromFile().isPresent()) { - return resetPlanFromFile().map(resetPlan -> { - Map resetPlanForGroup = resetPlan.get(groupId); - - if (resetPlanForGroup == null) { - printError("No reset plan for group " + groupId + " found", Optional.empty()); - return Collections.emptyMap(); - } - - Map requestedOffsets = resetPlanForGroup.keySet().stream().collect(Collectors.toMap( - Function.identity(), - topicPartition -> resetPlanForGroup.get(topicPartition).offset())); - - return checkOffsetsRange(requestedOffsets).entrySet().stream() - .collect(Collectors.toMap(Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))); - }).orElseGet(Collections::emptyMap); + return offsetsUtils.resetByDuration(partitionsToReset); + } else if (offsetsUtils.resetPlanFromFile().isPresent()) { + return offsetsUtils.resetFromFile(groupId); } else if (opts.options.has(opts.resetToCurrentOpt)) { Map currentCommittedOffsets = getCommittedOffsets(groupId); - Collection partitionsToResetWithCommittedOffset = new ArrayList<>(); - Collection partitionsToResetWithoutCommittedOffset = new ArrayList<>(); - - for (TopicPartition topicPartition : partitionsToReset) { - if (currentCommittedOffsets.containsKey(topicPartition)) - partitionsToResetWithCommittedOffset.add(topicPartition); - else - partitionsToResetWithoutCommittedOffset.add(topicPartition); - } - - Map preparedOffsetsForPartitionsWithCommittedOffset = partitionsToResetWithCommittedOffset.stream() - .collect(Collectors.toMap(Function.identity(), topicPartition -> { - OffsetAndMetadata committedOffset = currentCommittedOffsets.get(topicPartition); - - if (committedOffset == null) { - throw new IllegalStateException("Expected a valid current offset for topic partition: " + topicPartition); - } - - return new OffsetAndMetadata(committedOffset.offset()); - })); - - Map preparedOffsetsForPartitionsWithoutCommittedOffset = getLogEndOffsets(partitionsToResetWithoutCommittedOffset) - .entrySet().stream().collect(Collectors.toMap(Entry::getKey, e -> { - if (!(e.getValue() instanceof LogOffset)) { - CommandLineUtils.printUsageAndExit(opts.parser, "Error getting ending offset of topic partition: " + e.getKey()); - } - - return new OffsetAndMetadata(((LogOffset) e.getValue()).value); - })); - - preparedOffsetsForPartitionsWithCommittedOffset.putAll(preparedOffsetsForPartitionsWithoutCommittedOffset); - - return preparedOffsetsForPartitionsWithCommittedOffset; + return offsetsUtils.resetToCurrent(partitionsToReset, currentCommittedOffsets); } CommandLineUtils.printUsageAndExit(opts.parser, String.format("Option '%s' requires one of the following scenarios: %s", opts.resetOffsetsOpt, opts.allResetOffsetScenarioOpts)); return null; } - private Map checkOffsetsRange(Map requestedOffsets) { - Map logStartOffsets = getLogStartOffsets(requestedOffsets.keySet()); - Map logEndOffsets = getLogEndOffsets(requestedOffsets.keySet()); - - Map res = new HashMap<>(); - - requestedOffsets.forEach((topicPartition, offset) -> { - LogOffsetResult logEndOffset = logEndOffsets.get(topicPartition); - - if (logEndOffset != null) { - if (logEndOffset instanceof LogOffset && offset > ((LogOffset) logEndOffset).value) { - long endOffset = ((LogOffset) logEndOffset).value; - LOGGER.warn("New offset (" + offset + ") is higher than latest offset for topic partition " + topicPartition + ". Value will be set to " + endOffset); - res.put(topicPartition, endOffset); - } else { - LogOffsetResult logStartOffset = logStartOffsets.get(topicPartition); - - if (logStartOffset instanceof LogOffset && offset < ((LogOffset) logStartOffset).value) { - long startOffset = ((LogOffset) logStartOffset).value; - LOGGER.warn("New offset (" + offset + ") is lower than earliest offset for topic partition " + topicPartition + ". Value will be set to " + startOffset); - res.put(topicPartition, startOffset); - } else - res.put(topicPartition, offset); - } - } else { - // the control should not reach here - throw new IllegalStateException("Unexpected non-existing offset value for topic partition " + topicPartition); - } - }); - - return res; - } - String exportOffsetsToCsv(Map> assignments) { boolean isSingleGroupQuery = opts.options.valuesOf(opts.groupOpt).size() == 1; ObjectWriter csvWriter = isSingleGroupQuery @@ -1368,18 +1063,4 @@ Map deleteGroups() { return failed; } } - - interface LogOffsetResult { } - - private static class LogOffset implements LogOffsetResult { - final long value; - - LogOffset(long value) { - this.value = value; - } - } - - private static class Unknown implements LogOffsetResult { } - - private static class Ignore implements LogOffsetResult { } } diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index 34f90dfb8cfec..d123279d7f162 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -44,6 +44,7 @@ import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.server.util.CommandLineUtils; +import org.apache.kafka.tools.OffsetsUtils; import org.apache.kafka.tools.consumer.group.CsvUtils; import com.fasterxml.jackson.core.JsonProcessingException; @@ -152,6 +153,7 @@ static class StreamsGroupService implements AutoCloseable { final StreamsGroupCommandOptions opts; private final Admin adminClient; private final OffsetsUtils offsetsUtils; + public StreamsGroupService(StreamsGroupCommandOptions opts, Map configOverrides) { this.opts = opts; try { @@ -159,13 +161,24 @@ public StreamsGroupService(StreamsGroupCommandOptions opts, Map } catch (IOException e) { throw new RuntimeException(e); } - this.offsetsUtils = new OffsetsUtils(adminClient, opts, opts.parser); + this.offsetsUtils = new OffsetsUtils(adminClient, opts.parser, getOffsetsUtilsOptions(opts)); } public StreamsGroupService(StreamsGroupCommandOptions opts, Admin adminClient) { this.opts = opts; this.adminClient = adminClient; - this.offsetsUtils = new OffsetsUtils(adminClient, opts, opts.parser); + this.offsetsUtils = new OffsetsUtils(adminClient, opts.parser, getOffsetsUtilsOptions(opts)); + } + + private OffsetsUtils.OffsetsUtilsOptions getOffsetsUtilsOptions(StreamsGroupCommandOptions opts) { + return + new OffsetsUtils.OffsetsUtilsOptions(opts.options.valuesOf(opts.groupOpt), + opts.options.valuesOf(opts.resetToOffsetOpt), + opts.options.valuesOf(opts.resetFromFileOpt), + opts.options.valuesOf(opts.resetToDatetimeOpt), + opts.options.valueOf(opts.resetByDurationOpt), + opts.options.valueOf(opts.resetShiftByOpt), + opts.options.valueOf(opts.timeoutMsOpt)); } public void listGroups() throws ExecutionException, InterruptedException { diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index ff0da36282f7f..f756a71e823ea 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -39,9 +39,9 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public static final String BOOTSTRAP_SERVER_DOC = "REQUIRED: The server(s) to connect to."; public static final String GROUP_DOC = "The streams group we wish to act on."; private static final String ALL_GROUPS_DOC = "Apply to all streams groups."; - private static final String INPUT_TOPIC_DOC = "The input topic whose streams group information should be deleted or topic that should be included in the reset offset process. " + + private static final String INPUT_TOPIC_DOC = "The input topic whose committed offset should be deleted or reset. " + "In `reset-offsets` case, partitions can be specified using this format: `topic1:0,1,2`, where 0,1,2 are the partition to be included in the process. " + - "Reset-offsets also supports multiple topic inputs."; + "Multiple input topics can be specified. Supported operations: delete-offsets, reset-offsets."; private static final String ALL_INPUT_TOPICS_DOC = "Consider all topics assigned to a group in the `reset-offsets` process."; public static final String LIST_DOC = "List all streams groups."; public static final String DESCRIBE_DOC = "Describe streams group and list offset lag related to given group."; @@ -55,11 +55,12 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { public static final String MEMBERS_DOC = "Describe members of the group. This option may be used with the '--describe' option only."; public static final String OFFSETS_DOC = "Describe the group and list all topic partitions in the group along with their offset information." + "This is the default sub-action and may be used with the '--describe' option only."; - private static final String RESET_OFFSETS_DOC = "Reset offsets of streams group. The instances should be inactive" + NL + + private static final String RESET_OFFSETS_DOC = "Reset offsets of streams group. The instances should be inactive." + NL + "Has 2 execution options: --dry-run (the default) to plan which offsets to reset, and --execute to update the offsets." + NL + + "If you use --execute, all internal topics linked to the group will also be deleted." + NL + "You must choose one of the following reset specifications: --to-datetime, --by-duration, --to-earliest, " + "--to-latest, --shift-by, --from-file, --to-current, --to-offset." + NL + - "To define the scope use --all-topics or --topic. One scope must be specified unless you use '--from-file'."; + "To define the scope use --all-input-topics or --input-topic. One scope must be specified unless you use '--from-file'."; private static final String DRY_RUN_DOC = "Only show results without executing changes on streams group. Supported operations: reset-offsets."; private static final String EXECUTE_DOC = "Execute operation. Supported operations: reset-offsets."; private static final String EXPORT_DOC = "Export operation execution to a CSV file. Supported operations: reset-offsets."; @@ -206,6 +207,9 @@ public void checkArgs() { if (!options.has(groupOpt) && !options.has(allGroupsOpt)) CommandLineUtils.printUsageAndExit(parser, "Option " + deleteOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); + if (options.has(inputTopicOpt) || options.has(allInputTopicsOpt)) + CommandLineUtils.printUsageAndExit(parser, "Kafka Streams does not support topic-specific offset " + + "deletion from a streams group."); } if (options.has(resetOffsetsOpt)) { diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/DescribeStreamsGroupTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/DescribeStreamsGroupTest.java index 6af3232dd4c13..0f3515b552b88 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/DescribeStreamsGroupTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/DescribeStreamsGroupTest.java @@ -100,10 +100,10 @@ public void testDescribeStreamsGroup() throws Exception { List.of(APP_ID, "streams-group-command-test-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition", "1", "0")); validateDescribeOutput( - Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe"), expectedHeader, expectedRows, List.of()); + Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--group", APP_ID), expectedHeader, expectedRows, List.of()); // --describe --offsets has the same output as --describe validateDescribeOutput( - Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--offsets"), expectedHeader, expectedRows, List.of()); + Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--offsets", "--group", APP_ID), expectedHeader, expectedRows, List.of()); } @Test @@ -116,12 +116,12 @@ public void testDescribeStreamsGroupWithVerboseOption() throws Exception { List.of(APP_ID, "streams-group-command-test-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition", "1", "-", "-", "0", "0")); validateDescribeOutput( - Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--verbose"), expectedHeader, expectedRows, List.of()); + Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, List.of()); // --describe --offsets has the same output as --describe validateDescribeOutput( - Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--offsets", "--verbose"), expectedHeader, expectedRows, List.of()); + Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--offsets", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, List.of()); validateDescribeOutput( - Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--verbose", "--offsets"), expectedHeader, expectedRows, List.of()); + Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--verbose", "--offsets", "--group", APP_ID), expectedHeader, expectedRows, List.of()); } @Test @@ -131,7 +131,7 @@ public void testDescribeStreamsGroupWithStateOption() throws Exception { // The coordinator is not deterministic, so we don't care about it. final List dontCares = List.of(1, 2); validateDescribeOutput( - Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--state"), expectedHeader, expectedRows, dontCares); + Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--state", "--group", APP_ID), expectedHeader, expectedRows, dontCares); } @Test @@ -142,9 +142,9 @@ public void testDescribeStreamsGroupWithStateAndVerboseOptions() throws Exceptio final List dontCares = List.of(1, 2); validateDescribeOutput( - Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--state", "--verbose"), expectedHeader, expectedRows, dontCares); + Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--state", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, dontCares); validateDescribeOutput( - Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--verbose", "--state"), expectedHeader, expectedRows, dontCares); + Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--verbose", "--state", "--group", APP_ID), expectedHeader, expectedRows, dontCares); } @Test @@ -157,7 +157,7 @@ public void testDescribeStreamsGroupWithMembersOption() throws Exception { final List dontCares = List.of(1, 2, 3); validateDescribeOutput( - Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--members"), expectedHeader, expectedRows, dontCares); + Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--members", "--group", APP_ID), expectedHeader, expectedRows, dontCares); } @Test @@ -170,9 +170,9 @@ public void testDescribeStreamsGroupWithMembersAndVerboseOptions() throws Except final List dontCares = List.of(3, 6, 7); validateDescribeOutput( - Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--members", "--verbose"), expectedHeader, expectedRows, dontCares); + Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--members", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, dontCares); validateDescribeOutput( - Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--verbose", "--members"), expectedHeader, expectedRows, dontCares); + Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--describe", "--verbose", "--members", "--group", APP_ID), expectedHeader, expectedRows, dontCares); } private static Topology topology() { diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java index 41aaafddb1632..633701441e880 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java @@ -22,6 +22,7 @@ import org.apache.kafka.clients.admin.DeleteStreamsGroupsResult; import org.apache.kafka.clients.admin.DeleteTopicsResult; import org.apache.kafka.clients.admin.DescribeStreamsGroupsResult; +import org.apache.kafka.clients.admin.DescribeTopicsOptions; import org.apache.kafka.clients.admin.DescribeTopicsResult; import org.apache.kafka.clients.admin.GroupListing; import org.apache.kafka.clients.admin.KafkaAdminClient; @@ -172,10 +173,13 @@ public void testDescribeStreamsGroups() throws Exception { new Node(0, "bar", 0), null); resultMap.put(firstGroup, exp); - when(result.all()).thenReturn(KafkaFuture.completedFuture(resultMap)); when(ADMIN_CLIENT.describeStreamsGroups(ArgumentMatchers.anyCollection())).thenReturn(result); - StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(null, ADMIN_CLIENT); + + StreamsGroupCommandOptions streamsGroupCommandOptions = new StreamsGroupCommandOptions( + new String[]{"--bootstrap-server", BOOTSTRAP_SERVERS, "--group", firstGroup, "--describe"}); + StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(streamsGroupCommandOptions, ADMIN_CLIENT); + assertEquals(exp, service.getDescribeGroup(firstGroup)); service.close(); @@ -229,8 +233,12 @@ public void testDescribeStreamsGroupsGetOffsets() throws Exception { GroupState.STABLE, new Node(0, "host", 0), null); - StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(null, ADMIN_CLIENT); + StreamsGroupCommandOptions streamsGroupCommandOptions = new StreamsGroupCommandOptions( + new String[]{"--bootstrap-server", BOOTSTRAP_SERVERS, "--group", groupId, "--describe"}); + + StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(streamsGroupCommandOptions, ADMIN_CLIENT); Map lags = service.getOffsets(x); + assertEquals(1, lags.size()); assertEquals(new StreamsGroupCommand.OffsetsInfo(Optional.of(12L), Optional.of(0), 30L, 18L), lags.get(new TopicPartition("topic1", 0))); @@ -294,9 +302,9 @@ public void testAdminRequestsForResetOffsets() { when(adminClient.describeStreamsGroups(List.of(groupId))) .thenReturn(describeStreamsResult(groupId, GroupState.DEAD)); - when(adminClient.describeTopics(topics)) + when(adminClient.describeTopics(eq(topics), any(DescribeTopicsOptions.class))) .thenReturn(describeTopicsResult(topics, 1)); - when(adminClient.listOffsets(any())) + when(adminClient.listOffsets(any(), any())) .thenReturn(listOffsetsResult()); ListGroupsResult listGroupsResult = listGroupResult(groupId); when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(listGroupsResult); @@ -314,8 +322,8 @@ public void testAdminRequestsForResetOffsets() { resetResult.get(groupId).keySet()); verify(adminClient, times(1)).describeStreamsGroups(List.of(groupId)); - verify(adminClient, times(1)).describeTopics(topics); - verify(adminClient, times(1)).listOffsets(any()); + verify(adminClient, times(1)).describeTopics(eq(topics), any(DescribeTopicsOptions.class)); + verify(adminClient, times(1)).listOffsets(any(), any()); verify(adminClient, times(1)).listStreamsGroupOffsets(any()); service.close(); From cac273298a16167f2ba7cec72dbe165edc15399f Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Wed, 11 Jun 2025 22:40:33 +0200 Subject: [PATCH 21/26] fix cs bug --- .../streams/StreamsGroupCommandOptions.java | 6 +- .../streams/StreamsGroupCommandTest.java | 245 +++++++++++++----- 2 files changed, 183 insertions(+), 68 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index 4df2d0f5ce72d..d0c3849fb942b 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -228,10 +228,14 @@ public void checkArgs() { CommandLineUtils.printUsageAndExit(parser, "Only Option " + resetOffsetsOpt + "accepts " + executeOpt + " or " + dryRunOpt); CommandLineUtils.checkInvalidArgs(parser, options, listOpt, membersOpt, offsetsOpt); - CommandLineUtils.checkInvalidArgs(parser, options, groupOpt, minus(allStreamsGroupLevelOpts, describeOpt, deleteOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, groupOpt, minus(allGroupSelectionScopeOpts, groupOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, groupOpt, minus(allStreamsGroupLevelOpts, describeOpt, deleteOpt, resetOffsetsOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, inputTopicOpt, minus(allStreamsGroupLevelOpts, resetOffsetsOpt)); } private void checkDescribeArgs() { + CommandLineUtils.maybePrintHelpOrVersion(this, "This tool helps to list all streams groups, describe a streams group, delete streams group info, or reset streams group offsets."); + if (!options.has(groupOpt) && !options.has(allGroupsOpt)) CommandLineUtils.printUsageAndExit(parser, "Option " + describeOpt + " takes one of these options: " + allGroupSelectionScopeOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java index 7a2b28d8e15c9..5160456781ec4 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java @@ -17,27 +17,32 @@ package org.apache.kafka.tools.streams; import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientTestUtils; import org.apache.kafka.clients.admin.DeleteStreamsGroupsOptions; import org.apache.kafka.clients.admin.DeleteStreamsGroupsResult; import org.apache.kafka.clients.admin.DeleteTopicsResult; import org.apache.kafka.clients.admin.DescribeStreamsGroupsResult; +import org.apache.kafka.clients.admin.DescribeTopicsOptions; +import org.apache.kafka.clients.admin.DescribeTopicsResult; import org.apache.kafka.clients.admin.GroupListing; import org.apache.kafka.clients.admin.KafkaAdminClient; -import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult; import org.apache.kafka.clients.admin.ListGroupsOptions; import org.apache.kafka.clients.admin.ListGroupsResult; import org.apache.kafka.clients.admin.ListOffsetsResult; -import org.apache.kafka.clients.admin.MockAdminClient; +import org.apache.kafka.clients.admin.ListStreamsGroupOffsetsResult; import org.apache.kafka.clients.admin.StreamsGroupDescription; import org.apache.kafka.clients.admin.StreamsGroupMemberAssignment; import org.apache.kafka.clients.admin.StreamsGroupMemberDescription; import org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription; +import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartitionInfo; +import org.apache.kafka.common.internals.KafkaFutureImpl; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; @@ -45,6 +50,8 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -52,7 +59,9 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.IntStream; import joptsimple.OptionException; @@ -71,21 +80,22 @@ public class StreamsGroupCommandTest { + private static final Admin ADMIN_CLIENT = mock(KafkaAdminClient.class); + private static final String BOOTSTRAP_SERVERS = "localhost:9092"; + @Test public void testListStreamsGroups() throws Exception { String firstGroup = "first-group"; String secondGroup = "second-group"; - String bootstrapServer = "localhost:9092"; - String[] cgcArgs = new String[]{"--bootstrap-server", bootstrapServer, "--list"}; - Admin adminClient = mock(KafkaAdminClient.class); + String[] cgcArgs = new String[]{"--bootstrap-server", BOOTSTRAP_SERVERS, "--list"}; ListGroupsResult result = mock(ListGroupsResult.class); when(result.all()).thenReturn(KafkaFuture.completedFuture(Arrays.asList( new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)), new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY)) ))); - when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(result); - StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs, adminClient); + when(ADMIN_CLIENT.listGroups(any(ListGroupsOptions.class))).thenReturn(result); + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs); Set expectedGroups = new HashSet<>(Arrays.asList(firstGroup, secondGroup)); final Set[] foundGroups = new Set[]{Set.of()}; @@ -99,10 +109,9 @@ public void testListStreamsGroups() throws Exception { @Test public void testListWithUnrecognizedOption() { - String bootstrapServer = "localhost:9092"; - String[] cgcArgs = new String[]{"--frivolous-nonsense", "--bootstrap-server", bootstrapServer, "--list"}; + String[] cgcArgs = new String[]{"--frivolous-nonsense", "--bootstrap-server", BOOTSTRAP_SERVERS, "--list"}; final Exception exception = assertThrows(OptionException.class, () -> { - getStreamsGroupService(cgcArgs, new MockAdminClient()); + getStreamsGroupService(cgcArgs); }); assertEquals("frivolous-nonsense is not a recognized option", exception.getMessage()); } @@ -111,17 +120,15 @@ public void testListWithUnrecognizedOption() { public void testListStreamsGroupsWithStates() throws Exception { String firstGroup = "first-group"; String secondGroup = "second-group"; - String bootstrapServer = "localhost:9092"; - String[] cgcArgs = new String[]{"--bootstrap-server", bootstrapServer, "--list", "--state"}; - Admin adminClient = mock(KafkaAdminClient.class); + String[] cgcArgs = new String[]{"--bootstrap-server", BOOTSTRAP_SERVERS, "--list", "--state"}; ListGroupsResult resultWithAllStates = mock(ListGroupsResult.class); when(resultWithAllStates.all()).thenReturn(KafkaFuture.completedFuture(Arrays.asList( new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)), new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY)) ))); - when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(resultWithAllStates); - StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs, adminClient); + when(ADMIN_CLIENT.listGroups(any(ListGroupsOptions.class))).thenReturn(resultWithAllStates); + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs); Set expectedListing = new HashSet<>(Arrays.asList( new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)), new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY)))); @@ -136,7 +143,7 @@ public void testListStreamsGroupsWithStates() throws Exception { when(resultWithStableState.all()).thenReturn(KafkaFuture.completedFuture(List.of( new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)) ))); - when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(resultWithStableState); + when(ADMIN_CLIENT.listGroups(any(ListGroupsOptions.class))).thenReturn(resultWithStableState); Set expectedListingStable = Set.of( new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE))); @@ -152,8 +159,7 @@ public void testListStreamsGroupsWithStates() throws Exception { @Test public void testDescribeStreamsGroups() throws Exception { - String firstGroup = "group1"; - Admin adminClient = mock(KafkaAdminClient.class); + String firstGroup = "foo-group"; DescribeStreamsGroupsResult result = mock(DescribeStreamsGroupsResult.class); Map resultMap = new HashMap<>(); StreamsGroupDescription exp = new StreamsGroupDescription( @@ -167,10 +173,13 @@ public void testDescribeStreamsGroups() throws Exception { new Node(0, "bar", 0), null); resultMap.put(firstGroup, exp); - when(result.all()).thenReturn(KafkaFuture.completedFuture(resultMap)); - when(adminClient.describeStreamsGroups(ArgumentMatchers.anyCollection())).thenReturn(result); - StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(null, adminClient); + when(ADMIN_CLIENT.describeStreamsGroups(ArgumentMatchers.anyCollection())).thenReturn(result); + + StreamsGroupCommandOptions streamsGroupCommandOptions = new StreamsGroupCommandOptions( + new String[]{"--bootstrap-server", BOOTSTRAP_SERVERS, "--group", firstGroup, "--describe"}); + StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(streamsGroupCommandOptions, ADMIN_CLIENT); + assertEquals(exp, service.getDescribeGroup(firstGroup)); service.close(); @@ -178,7 +187,7 @@ public void testDescribeStreamsGroups() throws Exception { @Test public void testDescribeStreamsGroupsGetOffsets() throws Exception { - Admin adminClient = mock(KafkaAdminClient.class); + String groupId = "group1"; ListOffsetsResult startOffset = mock(ListOffsetsResult.class); Map startOffsetResultMap = new HashMap<>(); @@ -191,22 +200,30 @@ public void testDescribeStreamsGroupsGetOffsets() throws Exception { when(startOffset.all()).thenReturn(KafkaFuture.completedFuture(startOffsetResultMap)); when(endOffset.all()).thenReturn(KafkaFuture.completedFuture(endOffsetResultMap)); - when(adminClient.listOffsets(ArgumentMatchers.anyMap())).thenReturn(startOffset, endOffset); + when(ADMIN_CLIENT.listOffsets(ArgumentMatchers.anyMap())).thenReturn(startOffset, endOffset); - ListConsumerGroupOffsetsResult result = mock(ListConsumerGroupOffsetsResult.class); + ListStreamsGroupOffsetsResult result = mock(ListStreamsGroupOffsetsResult.class); Map committedOffsetsMap = new HashMap<>(); committedOffsetsMap.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(12, Optional.of(0), "")); - when(adminClient.listConsumerGroupOffsets(ArgumentMatchers.anyMap())).thenReturn(result); + when(ADMIN_CLIENT.listStreamsGroupOffsets(ArgumentMatchers.anyMap())).thenReturn(result); when(result.partitionsToOffsetAndMetadata(ArgumentMatchers.anyString())).thenReturn(KafkaFuture.completedFuture(committedOffsetsMap)); + DescribeStreamsGroupsResult describeResult = mock(DescribeStreamsGroupsResult.class); + StreamsGroupDescription groupDescription = mock(StreamsGroupDescription.class); + StreamsGroupSubtopologyDescription subtopology = mock(StreamsGroupSubtopologyDescription.class); + when(ADMIN_CLIENT.describeStreamsGroups(List.of(groupId))).thenReturn(describeResult); + when(describeResult.all()).thenReturn(KafkaFuture.completedFuture(Map.of(groupId, groupDescription))); + when(groupDescription.subtopologies()).thenReturn(List.of(subtopology)); + when(subtopology.sourceTopics()).thenReturn(List.of("topic1")); + StreamsGroupMemberDescription description = new StreamsGroupMemberDescription("foo", 0, Optional.empty(), Optional.empty(), "bar", "baz", 0, "qux", Optional.empty(), Map.of(), List.of(), List.of(), new StreamsGroupMemberAssignment(List.of(), List.of(), List.of()), new StreamsGroupMemberAssignment(List.of(), List.of(), List.of()), false); StreamsGroupDescription x = new StreamsGroupDescription( - "group1", + groupId, 0, 0, 0, @@ -215,8 +232,12 @@ public void testDescribeStreamsGroupsGetOffsets() throws Exception { GroupState.STABLE, new Node(0, "host", 0), null); - StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(null, adminClient); + StreamsGroupCommandOptions streamsGroupCommandOptions = new StreamsGroupCommandOptions( + new String[]{"--bootstrap-server", BOOTSTRAP_SERVERS, "--group", groupId, "--describe"}); + + StreamsGroupCommand.StreamsGroupService service = new StreamsGroupCommand.StreamsGroupService(streamsGroupCommandOptions, ADMIN_CLIENT); Map lags = service.getOffsets(x); + assertEquals(1, lags.size()); assertEquals(new StreamsGroupCommand.OffsetsInfo(Optional.of(12L), Optional.of(0), 30L, 18L), lags.get(new TopicPartition("topic1", 0))); @@ -233,8 +254,82 @@ public void testPrintEmptyGroupState() { } @Test - public void testRetrieveInternalTopics() { + public void testGroupStatesFromString() { + Set result = StreamsGroupCommand.groupStatesFromString("empty"); + assertEquals(new HashSet<>(List.of(GroupState.EMPTY)), result); + result = StreamsGroupCommand.groupStatesFromString("EMPTY"); + assertEquals(new HashSet<>(List.of(GroupState.EMPTY)), result); + + result = StreamsGroupCommand.groupStatesFromString("notready"); + assertEquals(new HashSet<>(List.of(GroupState.NOT_READY)), result); + result = StreamsGroupCommand.groupStatesFromString("notReady"); + assertEquals(new HashSet<>(List.of(GroupState.NOT_READY)), result); + + result = StreamsGroupCommand.groupStatesFromString("assigning"); + assertEquals(new HashSet<>(List.of(GroupState.ASSIGNING)), result); + result = StreamsGroupCommand.groupStatesFromString("ASSIGNING"); + assertEquals(new HashSet<>(List.of(GroupState.ASSIGNING)), result); + + result = StreamsGroupCommand.groupStatesFromString("RECONCILING"); + assertEquals(new HashSet<>(List.of(GroupState.RECONCILING)), result); + result = StreamsGroupCommand.groupStatesFromString("reconCILING"); + assertEquals(new HashSet<>(List.of(GroupState.RECONCILING)), result); + + result = StreamsGroupCommand.groupStatesFromString("STABLE"); + assertEquals(new HashSet<>(List.of(GroupState.STABLE)), result); + result = StreamsGroupCommand.groupStatesFromString("stable"); + assertEquals(new HashSet<>(List.of(GroupState.STABLE)), result); + + result = StreamsGroupCommand.groupStatesFromString("DEAD"); + assertEquals(new HashSet<>(List.of(GroupState.DEAD)), result); + result = StreamsGroupCommand.groupStatesFromString("dead"); + assertEquals(new HashSet<>(List.of(GroupState.DEAD)), result); + + assertThrow("preparingRebalance"); + assertThrow("completingRebalance"); + assertThrow("bad, wrong"); + assertThrow(" bad, Stable"); + assertThrow(" , ,"); + } + + @Test + public void testAdminRequestsForResetOffsets() { Admin adminClient = mock(KafkaAdminClient.class); + String groupId = "foo-group"; + List args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--reset-offsets", "--input-topic", "topic1", "--to-latest")); + List topics = List.of("topic1"); + + when(adminClient.describeStreamsGroups(List.of(groupId))) + .thenReturn(describeStreamsResult(groupId, GroupState.DEAD)); + when(adminClient.describeTopics(eq(topics), any(DescribeTopicsOptions.class))) + .thenReturn(describeTopicsResult(topics, 1)); + when(adminClient.listOffsets(any(), any())) + .thenReturn(listOffsetsResult()); + ListGroupsResult listGroupsResult = listGroupResult(groupId); + when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(listGroupsResult); + ListStreamsGroupOffsetsResult result = mock(ListStreamsGroupOffsetsResult.class); + Map committedOffsetsMap = new HashMap<>(); + committedOffsetsMap.put(new TopicPartition("topic1", 0), mock(OffsetAndMetadata.class)); + when(adminClient.listStreamsGroupOffsets(ArgumentMatchers.anyMap())).thenReturn(result); + when(result.partitionsToOffsetAndMetadata(ArgumentMatchers.anyString())).thenReturn(KafkaFuture.completedFuture(committedOffsetsMap)); + + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args.toArray(new String[0]), adminClient); + Map> resetResult = service.resetOffsets(); + + assertEquals(Collections.singleton(groupId), resetResult.keySet()); + assertEquals(new HashSet<>(List.of(new TopicPartition(topics.get(0), 0))), + resetResult.get(groupId).keySet()); + + verify(adminClient, times(1)).describeStreamsGroups(List.of(groupId)); + verify(adminClient, times(1)).describeTopics(eq(topics), any(DescribeTopicsOptions.class)); + verify(adminClient, times(1)).listOffsets(any(), any()); + verify(adminClient, times(1)).listStreamsGroupOffsets(any()); + + service.close(); + } + + @Test + public void testRetrieveInternalTopics() { String groupId = "foo-group"; List args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete")); List sourceTopics = List.of("source-topic1", "source-topic2"); @@ -262,9 +357,9 @@ public void testRetrieveInternalTopics() { null)); DescribeStreamsGroupsResult result = mock(DescribeStreamsGroupsResult.class); when(result.all()).thenReturn(KafkaFuture.completedFuture(resultMap)); - when(adminClient.describeStreamsGroups(ArgumentMatchers.anyCollection())).thenReturn(result); + when(ADMIN_CLIENT.describeStreamsGroups(ArgumentMatchers.anyCollection())).thenReturn(result); - StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args.toArray(new String[0]), adminClient); + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args.toArray(new String[0])); Map> internalTopics = service.retrieveInternalTopics(List.of(groupId)); assertNotNull(internalTopics.get(groupId)); @@ -335,43 +430,17 @@ public void testDeleteNonStreamsGroup() { service.close(); } - @Test - public void testGroupStatesFromString() { - Set result = StreamsGroupCommand.groupStatesFromString("empty"); - assertEquals(new HashSet<>(List.of(GroupState.EMPTY)), result); - result = StreamsGroupCommand.groupStatesFromString("EMPTY"); - assertEquals(new HashSet<>(List.of(GroupState.EMPTY)), result); - - result = StreamsGroupCommand.groupStatesFromString("notready"); - assertEquals(new HashSet<>(List.of(GroupState.NOT_READY)), result); - result = StreamsGroupCommand.groupStatesFromString("notReady"); - assertEquals(new HashSet<>(List.of(GroupState.NOT_READY)), result); - - result = StreamsGroupCommand.groupStatesFromString("assigning"); - assertEquals(new HashSet<>(List.of(GroupState.ASSIGNING)), result); - result = StreamsGroupCommand.groupStatesFromString("ASSIGNING"); - assertEquals(new HashSet<>(List.of(GroupState.ASSIGNING)), result); - - result = StreamsGroupCommand.groupStatesFromString("RECONCILING"); - assertEquals(new HashSet<>(List.of(GroupState.RECONCILING)), result); - result = StreamsGroupCommand.groupStatesFromString("reconCILING"); - assertEquals(new HashSet<>(List.of(GroupState.RECONCILING)), result); - - result = StreamsGroupCommand.groupStatesFromString("STABLE"); - assertEquals(new HashSet<>(List.of(GroupState.STABLE)), result); - result = StreamsGroupCommand.groupStatesFromString("stable"); - assertEquals(new HashSet<>(List.of(GroupState.STABLE)), result); - - result = StreamsGroupCommand.groupStatesFromString("DEAD"); - assertEquals(new HashSet<>(List.of(GroupState.DEAD)), result); - result = StreamsGroupCommand.groupStatesFromString("dead"); - assertEquals(new HashSet<>(List.of(GroupState.DEAD)), result); + private ListGroupsResult listGroupResult(String groupId) { + ListGroupsResult listGroupsResult = mock(ListGroupsResult.class); + when(listGroupsResult.all()).thenReturn(KafkaFuture.completedFuture(List.of( + new GroupListing(groupId, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.DEAD)) + ))); + return listGroupsResult; + } - assertThrow("preparingRebalance"); - assertThrow("completingRebalance"); - assertThrow("bad, wrong"); - assertThrow(" bad, Stable"); - assertThrow(" , ,"); + StreamsGroupCommand.StreamsGroupService getStreamsGroupService(String[] args) { + StreamsGroupCommandOptions opts = new StreamsGroupCommandOptions(args); + return new StreamsGroupCommand.StreamsGroupService(opts, ADMIN_CLIENT); } StreamsGroupCommand.StreamsGroupService getStreamsGroupService(String[] args, Admin adminClient) { @@ -392,4 +461,46 @@ private static void assertThrow(final String wrongState) { .map(String::trim) .collect(Collectors.toSet()), validStates); } -} + + private DescribeStreamsGroupsResult describeStreamsResult(String groupId, GroupState groupState) { + StreamsGroupMemberDescription memberDescription = new StreamsGroupMemberDescription("foo", 0, Optional.empty(), + Optional.empty(), "bar", "baz", 0, "qux", + Optional.empty(), Map.of(), List.of(), List.of(), + new StreamsGroupMemberAssignment(List.of(), List.of(), List.of()), new StreamsGroupMemberAssignment(List.of(), List.of(), List.of()), + false); + StreamsGroupDescription description = new StreamsGroupDescription(groupId, + 0, + 0, + 0, + Collections.singletonList(new StreamsGroupSubtopologyDescription("subtopologyId", Collections.emptyList(), Collections.emptyList(), Map.of(), Map.of())), + List.of(memberDescription), + groupState, + new Node(1, "localhost", 9092), + Set.of()); + KafkaFutureImpl future = new KafkaFutureImpl<>(); + future.complete(description); + return new DescribeStreamsGroupsResult(Collections.singletonMap(groupId, future)); + } + + private DescribeTopicsResult describeTopicsResult(Collection topics, int numOfPartitions) { + Map topicDescriptions = new HashMap<>(); + + topics.forEach(topic -> { + List partitions = IntStream.range(0, numOfPartitions) + .mapToObj(i -> new TopicPartitionInfo(i, null, Collections.emptyList(), Collections.emptyList())) + .collect(Collectors.toList()); + topicDescriptions.put(topic, new TopicDescription(topic, false, partitions)); + }); + return AdminClientTestUtils.describeTopicsResult(topicDescriptions); + } + + private ListOffsetsResult listOffsetsResult() { + List topicPartitions = new ArrayList<>(); + topicPartitions.add(new TopicPartition("topic1", 0)); + ListOffsetsResult.ListOffsetsResultInfo resultInfo = new ListOffsetsResult.ListOffsetsResultInfo(100, System.currentTimeMillis(), Optional.of(1)); + Map> futures = topicPartitions.stream().collect(Collectors.toMap( + Function.identity(), + __ -> KafkaFuture.completedFuture(resultInfo))); + return new ListOffsetsResult(futures); + } +} \ No newline at end of file From a6e1925a4cee2469d322fab2965ddec847230a42 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Wed, 11 Jun 2025 22:44:01 +0200 Subject: [PATCH 22/26] fix doc --- .../apache/kafka/tools/streams/StreamsGroupCommandOptions.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index d0c3849fb942b..b71751bd1e17e 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -42,7 +42,7 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { private static final String INPUT_TOPIC_DOC = "The input topic whose committed offset should be deleted or reset. " + "In `reset-offsets` case, partitions can be specified using this format: `topic1:0,1,2`, where 0,1,2 are the partition to be included in the process. " + "Multiple input topics can be specified. Supported operations: delete-offsets, reset-offsets."; - private static final String ALL_INPUT_TOPICS_DOC = "Consider all topics assigned to a group in the `reset-offsets` process."; + private static final String ALL_INPUT_TOPICS_DOC = "Consider all source topics used in the topology of the group. Supported operations: delete-offsets, reset-offsets."; public static final String LIST_DOC = "List all streams groups."; public static final String DESCRIBE_DOC = "Describe streams group and list offset lag related to given group."; private static final String DELETE_DOC = "Pass in groups to delete topic partition offsets and ownership information " + From a7be157d04fa42a2b2d11fefb3a92cb786260341 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Wed, 11 Jun 2025 23:05:41 +0200 Subject: [PATCH 23/26] keep options package private --- .../streams/StreamsGroupCommandOptions.java | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index b71751bd1e17e..bd2051f3bd913 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -34,27 +34,27 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { private static final String NL = System.lineSeparator(); - public static final Logger LOGGER = LoggerFactory.getLogger(StreamsGroupCommandOptions.class); + static final Logger LOGGER = LoggerFactory.getLogger(StreamsGroupCommandOptions.class); - public static final String BOOTSTRAP_SERVER_DOC = "REQUIRED: The server(s) to connect to."; - public static final String GROUP_DOC = "The streams group we wish to act on."; + private static final String BOOTSTRAP_SERVER_DOC = "REQUIRED: The server(s) to connect to."; + private static final String GROUP_DOC = "The streams group we wish to act on."; private static final String ALL_GROUPS_DOC = "Apply to all streams groups."; private static final String INPUT_TOPIC_DOC = "The input topic whose committed offset should be deleted or reset. " + "In `reset-offsets` case, partitions can be specified using this format: `topic1:0,1,2`, where 0,1,2 are the partition to be included in the process. " + "Multiple input topics can be specified. Supported operations: delete-offsets, reset-offsets."; private static final String ALL_INPUT_TOPICS_DOC = "Consider all source topics used in the topology of the group. Supported operations: delete-offsets, reset-offsets."; - public static final String LIST_DOC = "List all streams groups."; - public static final String DESCRIBE_DOC = "Describe streams group and list offset lag related to given group."; + private static final String LIST_DOC = "List all streams groups."; + private static final String DESCRIBE_DOC = "Describe streams group and list offset lag related to given group."; private static final String DELETE_DOC = "Pass in groups to delete topic partition offsets and ownership information " + "over the entire streams group. For instance --group g1 --group g2"; private static final String DELETE_OFFSETS_DOC = "Delete offsets of streams group. Supports one streams group at the time, and multiple topics."; - public static final String TIMEOUT_MS_DOC = "The timeout that can be set for some use cases. For example, it can be used when describing the group " + + private static final String TIMEOUT_MS_DOC = "The timeout that can be set for some use cases. For example, it can be used when describing the group " + "to specify the maximum amount of time in milliseconds to wait before the group stabilizes."; - public static final String COMMAND_CONFIG_DOC = "Property file containing configs to be passed to Admin Client."; - public static final String STATE_DOC = "When specified with '--list', it displays the state of all groups. It can also be used to list groups with specific states. " + + private static final String COMMAND_CONFIG_DOC = "Property file containing configs to be passed to Admin Client."; + private static final String STATE_DOC = "When specified with '--list', it displays the state of all groups. It can also be used to list groups with specific states. " + "Valid values are Empty, NotReady, Stable, Assigning, Reconciling, and Dead."; - public static final String MEMBERS_DOC = "Describe members of the group. This option may be used with the '--describe' option only."; - public static final String OFFSETS_DOC = "Describe the group and list all topic partitions in the group along with their offset information." + + private static final String MEMBERS_DOC = "Describe members of the group. This option may be used with the '--describe' option only."; + private static final String OFFSETS_DOC = "Describe the group and list all topic partitions in the group along with their offset information." + "This is the default sub-action and may be used with the '--describe' option only."; private static final String RESET_OFFSETS_DOC = "Reset offsets of streams group. The instances should be inactive." + NL + "Has 2 execution options: --dry-run (the default) to plan which offsets to reset, and --execute to update the offsets." + NL + @@ -73,38 +73,38 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { private static final String RESET_TO_LATEST_DOC = "Reset offsets to latest offset."; private static final String RESET_TO_CURRENT_DOC = "Reset offsets to current offset."; private static final String RESET_SHIFT_BY_DOC = "Reset offsets shifting current offset by 'n', where 'n' can be positive or negative."; - public static final String VERBOSE_DOC = """ + private static final String VERBOSE_DOC = """ Use with --describe --state to show group epoch and target assignment epoch. Use with --describe --members to show for each member the member epoch, target assignment epoch, current assignment, target assignment, and whether member is still using the classic rebalance protocol. Use with --describe --offsets and --describe to show leader epochs for each partition."""; - public final OptionSpec bootstrapServerOpt; - public final OptionSpec groupOpt; - public final OptionSpec inputTopicOpt; - public final OptionSpec allInputTopicsOpt; - public final OptionSpec listOpt; - public final OptionSpec describeOpt; - public final OptionSpec deleteOpt; - public final OptionSpec deleteOffsetsOpt; - public final OptionSpec allGroupsOpt; - public final OptionSpec timeoutMsOpt; - public final OptionSpec commandConfigOpt; - public final OptionSpec stateOpt; - public final OptionSpec membersOpt; - public final OptionSpec offsetsOpt; - public final OptionSpec resetOffsetsOpt; - public final OptionSpec resetToOffsetOpt; - public final OptionSpec resetFromFileOpt; - public final OptionSpec resetToDatetimeOpt; - public final OptionSpec resetByDurationOpt; - public final OptionSpec resetToEarliestOpt; - public final OptionSpec resetToLatestOpt; - public final OptionSpec resetToCurrentOpt; - public final OptionSpec resetShiftByOpt; - public final OptionSpec dryRunOpt; - public final OptionSpec executeOpt; - public final OptionSpec exportOpt; - public final OptionSpec verboseOpt; + final OptionSpec bootstrapServerOpt; + final OptionSpec groupOpt; + final OptionSpec inputTopicOpt; + final OptionSpec allInputTopicsOpt; + final OptionSpec listOpt; + final OptionSpec describeOpt; + final OptionSpec deleteOpt; + final OptionSpec deleteOffsetsOpt; + final OptionSpec allGroupsOpt; + final OptionSpec timeoutMsOpt; + final OptionSpec commandConfigOpt; + final OptionSpec stateOpt; + final OptionSpec membersOpt; + final OptionSpec offsetsOpt; + final OptionSpec resetOffsetsOpt; + final OptionSpec resetToOffsetOpt; + final OptionSpec resetFromFileOpt; + final OptionSpec resetToDatetimeOpt; + final OptionSpec resetByDurationOpt; + final OptionSpec resetToEarliestOpt; + final OptionSpec resetToLatestOpt; + final OptionSpec resetToCurrentOpt; + final OptionSpec resetShiftByOpt; + final OptionSpec dryRunOpt; + final OptionSpec executeOpt; + final OptionSpec exportOpt; + final OptionSpec verboseOpt; final Set> allResetOffsetScenarioOpts; final Set> allGroupSelectionScopeOpts; @@ -196,7 +196,7 @@ public StreamsGroupCommandOptions(String[] args) { allDeleteOffsetsOpts = new HashSet<>(Arrays.asList(inputTopicOpt, allInputTopicsOpt)); } - public void checkArgs() { + void checkArgs() { CommandLineUtils.maybePrintHelpOrVersion(this, "This tool helps to list, or describe streams groups."); CommandLineUtils.checkRequiredArgs(parser, options, bootstrapServerOpt); From 33cf7d3c80cb8cc66165d42f628466b9f051a030 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Thu, 19 Jun 2025 08:32:12 +0200 Subject: [PATCH 24/26] introduce delete internal topics options --- .../tools/streams/StreamsGroupCommand.java | 74 +++++--- .../streams/StreamsGroupCommandOptions.java | 38 +++- .../streams/DeleteStreamsGroupOffsetTest.java | 2 +- .../tools/streams/DeleteStreamsGroupTest.java | 176 ++++++++++++++++-- .../streams/ResetStreamsGroupOffsetTest.java | 124 +++++++++--- 5 files changed, 348 insertions(+), 66 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index ee2d6935e1829..4b9b7eef2b80e 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -483,8 +483,8 @@ Map> resetOffsets() { result.put(groupId, resetOffsetsForInactiveGroup(groupId, dryRun)); // delete internal topics if (!dryRun) { - List internalTopics = retrieveInternalTopics(List.of(groupId)).get(groupId); - if (internalTopics != null && !internalTopics.isEmpty()) { + List internalTopics = getInternalTopicsForGroup(groupId); + if (!internalTopics.isEmpty()) { try { adminClient.deleteTopics(internalTopics).all().get(); } catch (InterruptedException | ExecutionException e) { @@ -518,6 +518,16 @@ Map> resetOffsets() { return result; } + private List getInternalTopicsForGroup(String groupId) { + List internalTopics = new ArrayList<>(); + if (opts.options.has(opts.deleteAllInternalTopicsOpt)) { + internalTopics = retrieveInternalTopics(List.of(groupId)).get(groupId); + } else if (opts.options.has(opts.deleteInternalTopicOpt)) { + internalTopics = opts.options.valuesOf(opts.deleteInternalTopicOpt); + } + return internalTopics; + } + private Map.Entry> deleteOffsets(String groupId, List topics) { Map partitionLevelResult = new HashMap<>(); Set topicWithPartitions = new HashSet<>(); @@ -650,6 +660,8 @@ Map deleteGroups() { ? new ArrayList<>(listStreamsGroups()) : new ArrayList<>(opts.options.valuesOf(opts.groupOpt)); + final boolean deleteInternalTopics = opts.options.has(opts.deleteAllInternalTopicsOpt); + // pre admin call checks Map failed = preAdminCallChecks(groupIds); @@ -659,7 +671,9 @@ Map deleteGroups() { Map internalTopicsDeletionFailures = new HashMap<>(); if (!groupIds.isEmpty()) { // retrieve internal topics before deleting groups - internalTopics = retrieveInternalTopics(groupIds); + if (deleteInternalTopics) { + internalTopics = retrieveInternalTopics(groupIds); + } // delete streams groups Map> groupsToDelete = adminClient.deleteStreamsGroups( groupIds, @@ -678,29 +692,7 @@ Map deleteGroups() { }); // delete internal topics - if (!success.isEmpty()) { - for (String groupId : success.keySet()) { - List internalTopicsToDelete = internalTopics.get(groupId); - if (internalTopicsToDelete != null && !internalTopicsToDelete.isEmpty()) { - DeleteTopicsResult deleteTopicsResult = null; - try { - deleteTopicsResult = adminClient.deleteTopics(internalTopicsToDelete); - deleteTopicsResult.all().get(); - } catch (InterruptedException | ExecutionException e) { - if (deleteTopicsResult != null) { - deleteTopicsResult.topicNameValues().forEach((topic, future) -> { - try { - future.get(); - } catch (Exception topicException) { - System.out.println("Failed to delete internal topic: " + topic); - } - }); - } - internalTopicsDeletionFailures.put(groupId, e.getCause()); - } - } - } - } + internalTopicsDeletionFailures = maybeDeleteInternalTopics(deleteInternalTopics, success, internalTopics); } // display outcome messages based on the results @@ -723,6 +715,36 @@ Map deleteGroups() { return failed; } + private Map maybeDeleteInternalTopics(boolean deleteInternalTopics, + Map success, + Map> internalTopics) { + Map internalTopicsDeletionFailures = new HashMap<>(); + if (deleteInternalTopics && !success.isEmpty()) { + for (String groupId : success.keySet()) { + List internalTopicsToDelete = internalTopics.get(groupId); + if (internalTopicsToDelete != null && !internalTopicsToDelete.isEmpty()) { + DeleteTopicsResult deleteTopicsResult = null; + try { + deleteTopicsResult = adminClient.deleteTopics(internalTopicsToDelete); + deleteTopicsResult.all().get(); + } catch (InterruptedException | ExecutionException e) { + if (deleteTopicsResult != null) { + deleteTopicsResult.topicNameValues().forEach((topic, future) -> { + try { + future.get(); + } catch (Exception topicException) { + System.out.println("Failed to delete internal topic: " + topic); + } + }); + } + internalTopicsDeletionFailures.put(groupId, e.getCause()); + } + } + } + } + return internalTopicsDeletionFailures; + } + private Map preAdminCallChecks(List groupIds) { List streamsGroupIds = listDetailedStreamsGroups(); LinkedHashSet groupIdSet = new LinkedHashSet<>(groupIds); diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java index bd2051f3bd913..e74104d27ffea 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommandOptions.java @@ -73,6 +73,10 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { private static final String RESET_TO_LATEST_DOC = "Reset offsets to latest offset."; private static final String RESET_TO_CURRENT_DOC = "Reset offsets to current offset."; private static final String RESET_SHIFT_BY_DOC = "Reset offsets shifting current offset by 'n', where 'n' can be positive or negative."; + private static final String DELETE_INTERNAL_TOPIC_DOC = "Delete specified internal topic of the streams group. Supported operations: reset-offsets." + + "This option is applicable only when --execute is used."; + private static final String DELETE_ALL_INTERNAL_TOPICS_DOC = "Delete all internal topics linked to the streams group. Supported operations: reset-offsets, delete." + + "With reset-offsets, this option is applicable only when --execute is used."; private static final String VERBOSE_DOC = """ Use with --describe --state to show group epoch and target assignment epoch. Use with --describe --members to show for each member the member epoch, target assignment epoch, current assignment, target assignment, and whether member is still using the classic rebalance protocol. @@ -101,6 +105,8 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { final OptionSpec resetToLatestOpt; final OptionSpec resetToCurrentOpt; final OptionSpec resetShiftByOpt; + final OptionSpec deleteInternalTopicOpt; + final OptionSpec deleteAllInternalTopicsOpt; final OptionSpec dryRunOpt; final OptionSpec executeOpt; final OptionSpec exportOpt; @@ -110,6 +116,7 @@ public class StreamsGroupCommandOptions extends CommandDefaultOptions { final Set> allGroupSelectionScopeOpts; final Set> allStreamsGroupLevelOpts; final Set> allDeleteOffsetsOpts; + final Set> allDeleteInternalGroupsOpts; public static StreamsGroupCommandOptions fromArgs(String[] args) { StreamsGroupCommandOptions opts = new StreamsGroupCommandOptions(args); @@ -181,6 +188,10 @@ public StreamsGroupCommandOptions(String[] args) { .withRequiredArg() .describedAs("number-of-offsets") .ofType(Long.class); + deleteInternalTopicOpt = parser.accepts("delete-internal-topic", DELETE_INTERNAL_TOPIC_DOC) + .withRequiredArg() + .ofType(String.class); + deleteAllInternalTopicsOpt = parser.accepts("delete-all-internal-topics", DELETE_ALL_INTERNAL_TOPICS_DOC); verboseOpt = parser.accepts("verbose", VERBOSE_DOC) .availableIf(describeOpt); @@ -194,8 +205,10 @@ public StreamsGroupCommandOptions(String[] args) { allGroupSelectionScopeOpts = new HashSet<>(Arrays.asList(groupOpt, allGroupsOpt)); allStreamsGroupLevelOpts = new HashSet<>(Arrays.asList(listOpt, describeOpt, deleteOpt)); allDeleteOffsetsOpts = new HashSet<>(Arrays.asList(inputTopicOpt, allInputTopicsOpt)); + allDeleteInternalGroupsOpts = new HashSet<>(Arrays.asList(resetOffsetsOpt, deleteOpt)); } + @SuppressWarnings("NPathComplexity") void checkArgs() { CommandLineUtils.maybePrintHelpOrVersion(this, "This tool helps to list, or describe streams groups."); @@ -224,8 +237,12 @@ void checkArgs() { checkOffsetResetArgs(); } + if (options.has(deleteAllInternalTopicsOpt) || options.has(deleteInternalTopicOpt)) { + checkDeleteInternalTopicsArgs(); + } + if ((options.has(dryRunOpt) || options.has(executeOpt)) && !options.has(resetOffsetsOpt)) - CommandLineUtils.printUsageAndExit(parser, "Only Option " + resetOffsetsOpt + "accepts " + executeOpt + " or " + dryRunOpt); + CommandLineUtils.printUsageAndExit(parser, "Only Option " + resetOffsetsOpt + " accepts " + executeOpt + " or " + dryRunOpt); CommandLineUtils.checkInvalidArgs(parser, options, listOpt, membersOpt, offsetsOpt); CommandLineUtils.checkInvalidArgs(parser, options, groupOpt, minus(allGroupSelectionScopeOpts, groupOpt)); @@ -282,4 +299,23 @@ private void checkOffsetResetArgs() { CommandLineUtils.checkInvalidArgs(parser, options, resetShiftByOpt, minus(allResetOffsetScenarioOpts, resetShiftByOpt)); CommandLineUtils.checkInvalidArgs(parser, options, resetFromFileOpt, minus(allResetOffsetScenarioOpts, resetFromFileOpt)); } + + private void checkDeleteAllInternalTopicsArgs() { + if (!options.has(resetOffsetsOpt) && !options.has(deleteOpt)) { + CommandLineUtils.printUsageAndExit(parser, + "Option " + deleteAllInternalTopicsOpt + " takes one of these options: " + allDeleteInternalGroupsOpts.stream().map(Object::toString).collect(Collectors.joining(", "))); + } else if (options.has(resetOffsetsOpt) && !options.has(executeOpt)) { + CommandLineUtils.printUsageAndExit(parser, + "Option " + deleteAllInternalTopicsOpt + " takes " + executeOpt + " when " + resetOffsetsOpt + " is used."); + } + } + + private void checkDeleteInternalTopicsArgs() { + if (options.has(deleteAllInternalTopicsOpt)) { + checkDeleteAllInternalTopicsArgs(); + } else if (options.has(deleteInternalTopicOpt) && (!options.has(resetOffsetsOpt) || !options.has(executeOpt))) { + CommandLineUtils.printUsageAndExit(parser, + "Option " + deleteInternalTopicOpt + " takes " + resetOffsetsOpt + " when " + executeOpt + " is used."); + } + } } \ No newline at end of file diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupOffsetTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupOffsetTest.java index 74079548675c6..16e2770c87fe2 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupOffsetTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupOffsetTest.java @@ -89,7 +89,7 @@ public static void startCluster() { } @AfterEach - public void deleteTopics() { + public void deleteTopicsAndGroups() { try (final Admin adminClient = cluster.createAdminClient()) { // delete all topics final Set topics = adminClient.listTopics().names().get(); diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupTest.java index 394e4cf63d01a..5e00cd5dae385 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/DeleteStreamsGroupTest.java @@ -19,10 +19,13 @@ import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.admin.FeatureUpdate; +import org.apache.kafka.clients.admin.GroupListing; +import org.apache.kafka.clients.admin.ListGroupsOptions; import org.apache.kafka.clients.admin.UpdateFeaturesOptions; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.GroupState; import org.apache.kafka.common.errors.GroupNotEmptyException; +import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.StringSerializer; @@ -44,6 +47,7 @@ import org.apache.kafka.tools.ToolsTestUtils; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @@ -59,9 +63,11 @@ import java.util.Objects; import java.util.Optional; import java.util.Properties; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; import joptsimple.OptionException; @@ -84,6 +90,7 @@ public class DeleteStreamsGroupTest { private static final int RECORD_TOTAL = 10; public static EmbeddedKafkaCluster cluster; private static String bootstrapServers; + private static Admin adminClient; @BeforeAll public static void startCluster() { @@ -92,6 +99,26 @@ public static void startCluster() { cluster.start(); bootstrapServers = cluster.bootstrapServers(); + adminClient = cluster.createAdminClient(); + } + + @AfterEach + public void deleteTopicsAndGroups() { + try (final Admin adminClient = cluster.createAdminClient()) { + // delete all topics + final Set topics = adminClient.listTopics().names().get(); + adminClient.deleteTopics(topics).all().get(); + // delete all groups + List groupIds = + adminClient.listGroups(ListGroupsOptions.forStreamsGroups().timeoutMs(1000)).all().get() + .stream().map(GroupListing::groupId).toList(); + adminClient.deleteStreamsGroups(groupIds).all().get(); + } catch (final UnknownTopicOrPartitionException ignored) { + } catch (final ExecutionException | InterruptedException e) { + if (!(e.getCause() instanceof UnknownTopicOrPartitionException)) { + throw new RuntimeException(e); + } + } } @AfterAll @@ -122,7 +149,23 @@ public void testDeleteWithoutGroupOption() { } @Test - public void testDeleteSingleGroup() throws Exception { + public void testDeleteWithDeleteInternalTopicOption() { + final String[] args = new String[]{"--bootstrap-server", bootstrapServers, "--delete", "--all-groups", "--delete-internal-topic", "foo"}; + AtomicBoolean exited = new AtomicBoolean(false); + Exit.setExitProcedure(((statusCode, message) -> { + assertNotEquals(0, statusCode); + assertTrue(message.contains("Option [delete-internal-topic] takes [reset-offsets] when [execute] is used.")); + exited.set(true); + })); + try { + getStreamsGroupService(args); + } finally { + assertTrue(exited.get()); + } + } + + @Test + public void testDeleteSingleGroupWithoutDeletingInternalTopics() throws Exception { final String appId = generateGroupAppId(); String[] args = new String[]{"--bootstrap-server", bootstrapServers, "--delete", "--group", appId}; @@ -144,19 +187,20 @@ public void testDeleteSingleGroup() throws Exception { result.get(appId), "The expected error (" + Errors.NON_EMPTY_GROUP + ") was not detected while deleting streams group. Result was:(" + result + ")"); - /* test 2: delete EMPTY streams group */ + /* test 2: delete EMPTY streams group without deleting internal topics */ stopKSApp(appId, streams, service); final Map emptyGrpRes = new HashMap<>(); output = ToolsTestUtils.grabConsoleOutput(() -> emptyGrpRes.putAll(service.deleteGroups())); assertTrue(output.contains("Deletion of requested streams groups ('" + appId + "') was successful."), "The streams group could not be deleted as expected"); - assertTrue(output.contains("Deletion of associated internal topics of the streams groups ('" + appId + "') was successful."), + assertFalse(output.contains("Deletion of associated internal topics of the streams groups ('" + appId + "') was successful."), "The internal topics could not be deleted as expected."); assertEquals(1, emptyGrpRes.size()); assertTrue(emptyGrpRes.containsKey(appId)); assertNull(emptyGrpRes.get(appId), "The streams group could not be deleted as expected"); - assertTrue(service.retrieveInternalTopics(List.of(appId)).isEmpty()); + assertEquals(3, getInternalTopics(appId).size(), + "The internal topics were deleted, but they shouldn't have been."); /* test 3: delete an already deleted streams group (non-existing group) */ result = service.deleteGroups(); @@ -169,7 +213,30 @@ public void testDeleteSingleGroup() throws Exception { } @Test - public void testDeleteMultipleGroup() throws Exception { + public void testDeleteSingleGroupWithDeletingInternalTopics() throws Exception { + final String appId = generateGroupAppId(); + String[] args = new String[]{"--bootstrap-server", bootstrapServers, "--delete", "--group", appId, "--delete-all-internal-topics"}; + + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args); + try (KafkaStreams streams = startKSApp(appId, service)) { + stopKSApp(appId, streams, service); + final Map emptyGrpRes = new HashMap<>(); + String output = ToolsTestUtils.grabConsoleOutput(() -> emptyGrpRes.putAll(service.deleteGroups())); + + assertTrue(output.contains("Deletion of requested streams groups ('" + appId + "') was successful."), + "The streams group could not be deleted as expected"); + assertTrue(output.contains("Deletion of associated internal topics of the streams groups ('" + appId + "') was successful."), + "The internal topics could not be deleted as expected."); + assertEquals(1, emptyGrpRes.size()); + assertTrue(emptyGrpRes.containsKey(appId)); + assertNull(emptyGrpRes.get(appId), "The streams group could not be deleted as expected"); + TestUtils.waitForCondition(() -> getInternalTopics(appId).isEmpty(), + "The internal topics of the streams group " + appId + " were not deleted as expected."); + } + } + + @Test + public void testDeleteMultipleGroupsWithoutDeletingInternalTopics() throws Exception { final String appId1 = generateGroupAppId(); final String appId2 = generateGroupAppId(); final String appId3 = generateGroupAppId(); @@ -222,20 +289,24 @@ public void testDeleteMultipleGroup() throws Exception { assertTrue(output.contains("Deletion of some streams groups failed:"), "The streams groups deletion did not work as expected"); assertTrue(output.contains("Group '" + appId2 + "' could not be deleted due to:") - && output.contains("Streams group '" + appId2 + "' is not EMPTY."), "The expected error (" + Errors.NON_EMPTY_GROUP + ") was not detected while deleting streams group. Result was:(" + result + ")"); + && output.contains("Streams group '" + appId2 + "' is not EMPTY."), "The expected error (" + Errors.NON_EMPTY_GROUP + ") was not detected while deleting streams group. Result was:(" + mixGrpsRes + ")"); assertTrue(output.contains("Group '" + appId3 + "' could not be deleted due to:") - && output.contains("Streams group '" + appId3 + "' is not EMPTY."), "The expected error (" + Errors.NON_EMPTY_GROUP + ") was not detected while deleting streams group. Result was:(" + result + ")"); + && output.contains("Streams group '" + appId3 + "' is not EMPTY."), "The expected error (" + Errors.NON_EMPTY_GROUP + ") was not detected while deleting streams group. Result was:(" + mixGrpsRes + ")"); assertTrue(output.contains("These streams groups were deleted successfully: '" + appId1 + "'"), "The streams groups deletion did not work as expected"); - assertTrue(output.contains("Deletion of associated internal topics of the streams groups ('" + appId1 + "') was successful."), + assertFalse(output.contains("Deletion of associated internal topics of the streams groups ('" + appId1 + "') was successful."), "The internal topics could not be deleted as expected"); assertEquals(3, mixGrpsRes.size()); assertNull(mixGrpsRes.get(appId1)); assertNotNull(mixGrpsRes.get(appId2)); assertNotNull(mixGrpsRes.get(appId3)); - assertTrue(service.retrieveInternalTopics(List.of(appId1)).isEmpty()); - assertFalse(service.retrieveInternalTopics(List.of(appId2, appId3)).isEmpty()); + assertEquals(3, getInternalTopics(appId1).size(), + "The internal topics were deleted, but they shouldn't have been."); + assertEquals(3, getInternalTopics(appId2).size(), + "The internal topics were deleted, but they shouldn't have been."); + assertEquals(3, getInternalTopics(appId3).size(), + "The internal topics were deleted, but they shouldn't have been."); /* test 3: delete all groups */ stopKSApp(appId2, streams2, service); @@ -244,6 +315,68 @@ public void testDeleteMultipleGroup() throws Exception { final Map allGrpsRes = new HashMap<>(); output = ToolsTestUtils.grabConsoleOutput(() -> allGrpsRes.putAll(service.deleteGroups())); + assertTrue(output.contains("Deletion of requested streams groups ('" + appId2 + "', '" + appId3 + "') was successful.") | + output.contains("Deletion of requested streams groups ('" + appId3 + "', '" + appId2 + "') was successful."), + "The streams groups deletion did not work as expected"); + assertFalse(output.contains("Deletion of associated internal topics of the streams groups ('" + appId2 + "', '" + appId3 + "') was successful.") | + output.contains("Deletion of associated internal topics of the streams groups ('" + appId3 + "', '" + appId2 + "') was successful."), + "The internal topics could not be deleted as expected"); + + assertEquals(2, allGrpsRes.size()); + assertNull(allGrpsRes.get(appId2)); + assertNull(allGrpsRes.get(appId3)); + assertEquals(3, getInternalTopics(appId1).size(), + "The internal topics were deleted, but they shouldn't have been."); + assertEquals(3, getInternalTopics(appId2).size(), + "The internal topics were deleted, but they shouldn't have been."); + assertEquals(3, getInternalTopics(appId3).size(), + "The internal topics were deleted, but they shouldn't have been."); + } + + @Test + public void testDeleteAllGroupsWithDeletingInternalTopics() throws Exception { + final String appId1 = generateGroupAppId(); + final String appId2 = generateGroupAppId(); + final String appId3 = generateGroupAppId(); + + String[] args = new String[]{"--bootstrap-server", bootstrapServers, "--delete", "--all-groups", "--delete-all-internal-topics"}; + + StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args); + KafkaStreams streams1 = startKSApp(appId1, service); + KafkaStreams streams2 = startKSApp(appId2, service); + KafkaStreams streams3 = startKSApp(appId3, service); + + /* test 1: delete mix of EMPTY and NON_EMPTY streams group */ + stopKSApp(appId1, streams1, service); + final Map mixGrpsRes = new HashMap<>(); + String output = ToolsTestUtils.grabConsoleOutput(() -> mixGrpsRes.putAll(service.deleteGroups())); + + assertTrue(output.contains("Deletion of some streams groups failed:"), "The streams groups deletion did not work as expected"); + assertTrue(output.contains("Group '" + appId2 + "' could not be deleted due to:") + && output.contains("Streams group '" + appId2 + "' is not EMPTY."), "The expected error (" + Errors.NON_EMPTY_GROUP + ") was not detected while deleting streams group. Result was:(" + mixGrpsRes + ")"); + assertTrue(output.contains("Group '" + appId3 + "' could not be deleted due to:") + && output.contains("Streams group '" + appId3 + "' is not EMPTY."), "The expected error (" + Errors.NON_EMPTY_GROUP + ") was not detected while deleting streams group. Result was:(" + mixGrpsRes + ")"); + assertTrue(output.contains("These streams groups were deleted successfully: '" + appId1 + "'"), + "The streams groups deletion did not work as expected"); + assertTrue(output.contains("Deletion of associated internal topics of the streams groups ('" + appId1 + "') was successful."), + "The internal topics could not be deleted as expected"); + + assertEquals(3, mixGrpsRes.size()); + assertNull(mixGrpsRes.get(appId1)); + assertNotNull(mixGrpsRes.get(appId2)); + assertNotNull(mixGrpsRes.get(appId3)); + TestUtils.waitForCondition(() -> getInternalTopics(appId1).isEmpty(), + "The internal topics of the streams group " + appId1 + " were not deleted as expected."); + assertFalse(getInternalTopics(appId2).isEmpty()); + assertFalse(getInternalTopics(appId3).isEmpty()); + + /* test 2: delete all groups */ + stopKSApp(appId2, streams2, service); + stopKSApp(appId3, streams3, service); + + final Map allGrpsRes = new HashMap<>(); + output = ToolsTestUtils.grabConsoleOutput(() -> allGrpsRes.putAll(service.deleteGroups())); + assertTrue(output.contains("Deletion of requested streams groups ('" + appId2 + "', '" + appId3 + "') was successful.") | output.contains("Deletion of requested streams groups ('" + appId3 + "', '" + appId2 + "') was successful."), "The streams groups deletion did not work as expected"); @@ -254,13 +387,16 @@ public void testDeleteMultipleGroup() throws Exception { assertEquals(2, allGrpsRes.size()); assertNull(allGrpsRes.get(appId2)); assertNull(allGrpsRes.get(appId3)); - assertTrue(service.retrieveInternalTopics(List.of(appId1, appId2, appId3)).isEmpty()); + TestUtils.waitForCondition(() -> getInternalTopics(appId2).isEmpty(), + "The internal topics of the streams group " + appId2 + " were not deleted as expected."); + TestUtils.waitForCondition(() -> getInternalTopics(appId3).isEmpty(), + "The internal topics of the streams group " + appId3 + " were not deleted as expected."); } @Test public void testDeleteAllGroupsAfterVersionDowngrade() throws Exception { final String appId = generateGroupAppId(); - String[] args = new String[]{"--bootstrap-server", bootstrapServers, "--delete", "--all-groups"}; + String[] args = new String[]{"--bootstrap-server", bootstrapServers, "--delete", "--all-groups", "--delete-all-internal-topics"}; StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args); try (KafkaStreams streams = startKSApp(appId, service)) { @@ -277,14 +413,26 @@ public void testDeleteAllGroupsAfterVersionDowngrade() throws Exception { assertEquals(1, result.size()); assertTrue(result.containsKey(appId)); assertNull(result.get(appId), "The streams group could not be deleted as expected"); - assertTrue(service.retrieveInternalTopics(List.of(appId)).isEmpty()); - + assertEquals(3, getInternalTopics(appId).size(), + "The internal topics were deleted, but they shouldn't have been."); } finally { // upgrade back the streams.version to 1 updateStreamsGroupProtocol((short) 1); } } + private Set getInternalTopics(String appId) { + try { + Set topics = adminClient.listTopics().names().get(); + return topics.stream() + .filter(topic -> topic.startsWith(appId + "-")) + .filter(topic -> topic.endsWith("-changelog") || topic.endsWith("-repartition")) + .collect(Collectors.toSet()); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + private void updateStreamsGroupProtocol(short version) { try (Admin admin = cluster.createAdminClient()) { Map updates = Utils.mkMap( diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java index 1fed51031d0b0..7552e7fff11c0 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/ResetStreamsGroupOffsetTest.java @@ -18,9 +18,12 @@ import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.admin.GroupListing; +import org.apache.kafka.clients.admin.ListGroupsOptions; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Exit; @@ -38,6 +41,7 @@ import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @@ -57,6 +61,7 @@ import java.util.Map; import java.util.Optional; import java.util.Properties; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -68,6 +73,7 @@ import static java.util.Arrays.asList; import static java.util.stream.Collectors.toMap; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -104,6 +110,25 @@ private static void createStreamsConfig(String bootstrapServers) { STREAMS_CONFIG.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2); } + @AfterEach + public void deleteTopicsAndGroups() { + try (final Admin adminClient = cluster.createAdminClient()) { + // delete all topics + final Set topics = adminClient.listTopics().names().get(); + adminClient.deleteTopics(topics).all().get(); + // delete all groups + List groupIds = + adminClient.listGroups(ListGroupsOptions.forStreamsGroups().timeoutMs(1000)).all().get() + .stream().map(GroupListing::groupId).toList(); + adminClient.deleteStreamsGroups(groupIds).all().get(); + } catch (final UnknownTopicOrPartitionException ignored) { + } catch (final ExecutionException | InterruptedException e) { + if (!(e.getCause() instanceof UnknownTopicOrPartitionException)) { + throw new RuntimeException(e); + } + } + } + @AfterAll public static void closeCluster() { cluster.stop(); @@ -131,6 +156,22 @@ public void testResetOffsetsWithoutGroupOption() { } } + @Test + public void testResetOffsetsWithDeleteInternalTopicsOption() { + final String[] args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--all-groups", "--all-input-topics", "--to-offset", "5", "--delete-all-internal-topics"}; + AtomicBoolean exited = new AtomicBoolean(false); + Exit.setExitProcedure(((statusCode, message) -> { + assertNotEquals(0, statusCode); + assertTrue(message.contains("Option [delete-all-internal-topics] takes [execute] when [reset-offsets] is used")); + exited.set(true); + })); + try { + getStreamsGroupService(args); + } finally { + assertTrue(exited.get()); + } + } + @Test public void testResetOffset() throws Exception { final String appId = generateRandomAppId(); @@ -284,12 +325,14 @@ public void testResetOffset() throws Exception { assertEquals(exp, toOffsetMap(importedOffsets.get(appId))); } - adminClient.deleteTopics(List.of(topic1, topic2)).all().get(); + // assert that the internal topics are not deleted + assertEquals(2, getInternalTopics(appId).size()); } @Test - public void testTopicsWhenResettingOffset() throws Exception { + public void testResetOffsetsWithDeleteSpecifiedInternalTopics() throws Exception { final String appId = generateRandomAppId(); + final String internalTopic = appId + "-aggregated_value-changelog"; final String topic1 = generateRandomTopic(); final String topic2 = generateRandomTopic(); final int numOfPartitions = 2; @@ -298,10 +341,26 @@ public void testTopicsWhenResettingOffset() throws Exception { produceMessagesOnTwoPartitions(RECORD_TOTAL, topic1); produceMessagesOnTwoPartitions(RECORD_TOTAL, topic2); - args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-input-topics", "--to-offset", "5"}; - resetOffsetsAndAssertInternalTopicDeletionForDryRunAndExecute(args, appId); + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-input-topics", "--execute", "--to-offset", "5", + "--delete-internal-topic", internalTopic + }; - adminClient.deleteTopics(List.of(topic1, topic2)).all().get(); + resetOffsetsAndAssertInternalTopicDeletion(args, appId, internalTopic); + } + + @Test + public void testResetOffsetsWithDeleteAllInternalTopics() throws Exception { + final String appId = generateRandomAppId(); + final String topic1 = generateRandomTopic(); + final String topic2 = generateRandomTopic(); + final int numOfPartitions = 2; + String[] args; + produceConsumeShutdown(appId, topic1, topic2, RECORD_TOTAL * numOfPartitions * 2); + produceMessagesOnTwoPartitions(RECORD_TOTAL, topic1); + produceMessagesOnTwoPartitions(RECORD_TOTAL, topic2); + + args = new String[]{"--bootstrap-server", bootstrapServers, "--reset-offsets", "--group", appId, "--all-input-topics", "--delete-all-internal-topics", "--execute", "--to-offset", "5"}; + resetOffsetsAndAssertInternalTopicDeletion(args, appId); } private void resetForNextTest(String appId, long desiredOffset, String... topics) throws ExecutionException, InterruptedException { @@ -389,23 +448,47 @@ private void resetOffsetsAndAssert(String[] args, AssertCommittedOffsets(appId, topic, expectedCommittedOffset, partitions); } - private void resetOffsetsAndAssertInternalTopicDeletion(String[] args, - String appId) throws InterruptedException { - final boolean executeMode = Arrays.asList(args).contains("--execute"); - List internalTopics; - List allTopics; + private void resetOffsetsAndAssertInternalTopicDeletion(String[] args, String appId, String... specifiedInternalTopics) throws InterruptedException { + List specifiedInternalTopicsList = asList(specifiedInternalTopics); + Set allInternalTopics = getInternalTopics(appId); + specifiedInternalTopicsList.forEach(allInternalTopics::remove); + try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args)) { - internalTopics = service.retrieveInternalTopics(List.of(appId)).get(appId); - allTopics = service.collectAllTopics(appId); service.resetOffsets(); } - // assert that the internal topics are deleted in --execute mode and not in --dry-run mode - allTopics.addAll(List.of("__consumer_offsets", "__transaction_state")); - if (executeMode) { - allTopics.removeAll(internalTopics); + // assert that the internal topics are deleted + if (specifiedInternalTopics.length > 0) { + Set internalTopicsAfterReset = getInternalTopics(appId); + + TestUtils.waitForCondition( + () -> internalTopicsAfterReset.size() == allInternalTopics.size(), + 30_000, "Internal topics were not deleted as expected after reset" + ); + + specifiedInternalTopicsList.forEach(topic -> { + assertFalse(internalTopicsAfterReset.contains(topic), + "Internal topic '" + topic + "' was not deleted as expected after reset"); + }); + + } else { + TestUtils.waitForCondition(() -> { + Set internalTopicsAfterReset = getInternalTopics(appId); + return internalTopicsAfterReset.isEmpty(); + }, 30_000, "Internal topics were not deleted after reset"); + } + } + + private Set getInternalTopics(String appId) { + try { + Set topics = adminClient.listTopics().names().get(); + return topics.stream() + .filter(topic -> topic.startsWith(appId + "-")) + .filter(topic -> topic.endsWith("-changelog") || topic.endsWith("-repartition")) + .collect(Collectors.toSet()); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); } - cluster.waitForRemainingTopics(30000, allTopics.toArray(new String[0])); } /** @@ -499,13 +582,6 @@ private void resetOffsetsAndAssertForDryRunAndExecute(String[] args, resetOffsetsAndAssert(addTo(args, "--execute"), appId, topic, expectedOffset, expectedOffset, partitions); } - private void resetOffsetsAndAssertInternalTopicDeletionForDryRunAndExecute(String[] args, - String appId) throws InterruptedException { - resetOffsetsAndAssertInternalTopicDeletion(args, appId); - resetOffsetsAndAssertInternalTopicDeletion(addTo(args, "--dry-run"), appId); - resetOffsetsAndAssertInternalTopicDeletion(addTo(args, "--execute"), appId); - } - private void resetOffsetsAndAssertForDryRunAndExecute(String[] args, String appId, String topic1, From 45fb98ccc7e0cf5f4a5a8153dd0803b70c6d5fbb Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Thu, 19 Jun 2025 10:04:22 +0200 Subject: [PATCH 25/26] fix utest --- .../org/apache/kafka/tools/streams/StreamsGroupCommandTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java index 5160456781ec4..6f7d199b3bd19 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java @@ -377,7 +377,7 @@ public void testRetrieveInternalTopics() { public void testDeleteStreamsGroup() { Admin adminClient = mock(KafkaAdminClient.class); String groupId = "foo-group"; - List args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete")); + List args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete", "--delete-all-internal-topics")); DeleteStreamsGroupsResult deleteStreamsGroupsResult = mock(DeleteStreamsGroupsResult.class); when(adminClient.deleteStreamsGroups(eq(List.of(groupId)), any(DeleteStreamsGroupsOptions.class))).thenReturn(deleteStreamsGroupsResult); From 73b54b3150d1102e244c2b067f74384012b91ae4 Mon Sep 17 00:00:00 2001 From: aliehsaeedii Date: Fri, 20 Jun 2025 11:40:16 +0200 Subject: [PATCH 26/26] address reviews --- .../tools/streams/StreamsGroupCommand.java | 26 ++++++++----------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index 4b9b7eef2b80e..323f93c8813d7 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -483,7 +483,7 @@ Map> resetOffsets() { result.put(groupId, resetOffsetsForInactiveGroup(groupId, dryRun)); // delete internal topics if (!dryRun) { - List internalTopics = getInternalTopicsForGroup(groupId); + List internalTopics = getInternalTopicsToBeDeleted(groupId); if (!internalTopics.isEmpty()) { try { adminClient.deleteTopics(internalTopics).all().get(); @@ -518,7 +518,7 @@ Map> resetOffsets() { return result; } - private List getInternalTopicsForGroup(String groupId) { + private List getInternalTopicsToBeDeleted(String groupId) { List internalTopics = new ArrayList<>(); if (opts.options.has(opts.deleteAllInternalTopicsOpt)) { internalTopics = retrieveInternalTopics(List.of(groupId)).get(groupId); @@ -660,19 +660,17 @@ Map deleteGroups() { ? new ArrayList<>(listStreamsGroups()) : new ArrayList<>(opts.options.valuesOf(opts.groupOpt)); - final boolean deleteInternalTopics = opts.options.has(opts.deleteAllInternalTopicsOpt); - // pre admin call checks Map failed = preAdminCallChecks(groupIds); groupIds.removeAll(failed.keySet()); Map success = new HashMap<>(); - Map> internalTopics = new HashMap<>(); + Map> internalTopicsToBeDeleted = new HashMap<>(); Map internalTopicsDeletionFailures = new HashMap<>(); if (!groupIds.isEmpty()) { - // retrieve internal topics before deleting groups - if (deleteInternalTopics) { - internalTopics = retrieveInternalTopics(groupIds); + // if needed, retrieve internal topics before deleting groups + if (opts.options.has(opts.deleteAllInternalTopicsOpt)) { + internalTopicsToBeDeleted = retrieveInternalTopics(groupIds); } // delete streams groups Map> groupsToDelete = adminClient.deleteStreamsGroups( @@ -692,7 +690,7 @@ Map deleteGroups() { }); // delete internal topics - internalTopicsDeletionFailures = maybeDeleteInternalTopics(deleteInternalTopics, success, internalTopics); + internalTopicsDeletionFailures = maybeDeleteInternalTopics(success, internalTopicsToBeDeleted); } // display outcome messages based on the results @@ -706,8 +704,8 @@ Map deleteGroups() { System.out.println("\nThese streams groups were deleted successfully: " + "'" + success.keySet().stream().map(Object::toString).collect(Collectors.joining("', '")) + "'."); } } - if (!internalTopics.keySet().isEmpty()) { - printInternalTopicErrors(internalTopicsDeletionFailures, success.keySet(), internalTopics.keySet()); + if (!internalTopicsToBeDeleted.keySet().isEmpty()) { + printInternalTopicErrors(internalTopicsDeletionFailures, success.keySet(), internalTopicsToBeDeleted.keySet()); } // for testing purpose: return all failures, including internal topics deletion failures failed.putAll(success); @@ -715,11 +713,9 @@ Map deleteGroups() { return failed; } - private Map maybeDeleteInternalTopics(boolean deleteInternalTopics, - Map success, - Map> internalTopics) { + private Map maybeDeleteInternalTopics(Map success, Map> internalTopics) { Map internalTopicsDeletionFailures = new HashMap<>(); - if (deleteInternalTopics && !success.isEmpty()) { + if (!internalTopics.isEmpty() && !success.isEmpty()) { for (String groupId : success.keySet()) { List internalTopicsToDelete = internalTopics.get(groupId); if (internalTopicsToDelete != null && !internalTopicsToDelete.isEmpty()) {