Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 7a29c4c

Browse files
committed
Revert to straming consumer
1 parent 16194f6 commit 7a29c4c

File tree

2 files changed

+57
-32
lines changed

2 files changed

+57
-32
lines changed

src/bin/consumer/asyncnoop.rs

+45-32
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,25 @@
11
use clap::{App, Arg};
22
use log::{debug, error, info};
3-
use rdkafka::config::ClientConfig;
3+
use rdkafka::config::{ClientConfig, RDKafkaLogLevel};
4+
use rdkafka::consumer::stream_consumer::StreamConsumer;
5+
use rdkafka::consumer::CommitMode;
6+
use rdkafka::consumer::Consumer;
47
use rdkafka::producer::FutureProducer;
58
use rdkafka::util::get_rdkafka_version;
6-
use rust_arroyo::backends::kafka::config::KafkaConfig;
7-
use rust_arroyo::backends::kafka::KafkaConsumer;
8-
use rust_arroyo::backends::{AssignmentCallbacks, Consumer};
9+
use rust_arroyo::backends::kafka::create_kafka_message;
10+
use rust_arroyo::backends::AssignmentCallbacks;
11+
use rust_arroyo::processing::strategies::async_noop::build_topic_partitions;
912
use rust_arroyo::processing::strategies::async_noop::AsyncNoopCommit;
13+
use rust_arroyo::processing::strategies::async_noop::CustomContext;
1014
use rust_arroyo::types::{Partition, Topic};
1115
use std::collections::HashMap;
1216
use std::time::Duration;
1317
use std::time::SystemTime;
1418
use tokio::time::timeout;
1519

20+
// A type alias with your custom consumer can be created for convenience.
21+
type LoggingConsumer = StreamConsumer<CustomContext>;
22+
1623
struct EmptyCallbacks {}
1724
impl AssignmentCallbacks for EmptyCallbacks {
1825
fn on_assign(&mut self, _: HashMap<Partition, u64>) {
@@ -30,19 +37,28 @@ async fn consume_and_produce(
3037
dest_topic: &str,
3138
batch_size: usize,
3239
) {
33-
let config = KafkaConfig::new_consumer_config(
34-
vec![brokers.to_string()],
35-
group_id.to_string(),
36-
"earliest".to_string(),
37-
false,
38-
None,
39-
);
40-
let mut consumer = KafkaConsumer::new(config);
40+
let context = CustomContext {};
41+
42+
let consumer: LoggingConsumer = ClientConfig::new()
43+
.set("group.id", group_id)
44+
.set("bootstrap.servers", brokers)
45+
.set("enable.partition.eof", "false")
46+
.set("session.timeout.ms", "6000")
47+
.set("enable.auto.commit", "false")
48+
//.set("statistics.interval.ms", "30000")
49+
.set("auto.offset.reset", "earliest")
50+
.set_log_level(RDKafkaLogLevel::Warning)
51+
.create_with_context(context)
52+
.expect("Consumer creation failed");
53+
54+
consumer
55+
.subscribe(&[source_topic])
56+
.expect("Can't subscribe to specified topics");
57+
4158
let topic = Topic {
4259
name: source_topic.to_string(),
4360
};
4461
let topic_clone = topic.clone();
45-
let _ = consumer.subscribe(&[topic], Box::new(EmptyCallbacks {}));
4662

4763
let producer: FutureProducer = ClientConfig::new()
4864
.set("bootstrap.servers", brokers)
@@ -66,31 +82,28 @@ async fn consume_and_produce(
6682
};
6783
loop {
6884
match timeout(Duration::from_secs(2), consumer.recv()).await {
69-
Ok(result) => {
70-
match result {
71-
Ok(message) => {
72-
match strategy.poll().await {
73-
Some(request) => {
74-
consumer.stage_positions(request.positions).await.unwrap();
75-
consumer.commit_positions().await.unwrap();
76-
//info!("Committed: {:?}", request);
77-
}
78-
None => {}
85+
Ok(result) => match result {
86+
Err(e) => panic!("Kafka error: {}", e),
87+
Ok(m) => {
88+
match strategy.poll().await {
89+
Some(partition_list) => {
90+
let part_list = build_topic_partitions(partition_list);
91+
consumer.commit(&part_list, CommitMode::Sync).unwrap();
92+
info!("Committed: {:?}", part_list);
7993
}
80-
strategy.submit(message).await;
81-
}
82-
Err(e) => {
83-
panic!("Kafka error: {}", e)
94+
None => {}
8495
}
96+
97+
strategy.submit(create_kafka_message(m)).await;
8598
}
86-
}
99+
},
87100
Err(_) => {
88101
error!("timeoout, flushing batch");
89102
match strategy.poll().await {
90-
Some(request) => {
91-
consumer.stage_positions(request.positions).await.unwrap();
92-
consumer.commit_positions().await.unwrap();
93-
//info!("Committed: {:?}", request);
103+
Some(partition_list) => {
104+
let part_list = build_topic_partitions(partition_list);
105+
consumer.commit(&part_list, CommitMode::Sync).unwrap();
106+
info!("Committed: {:?}", part_list);
94107
}
95108
None => {}
96109
}

src/processing/strategies/async_noop.rs

+12
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,18 @@ fn build_commit_request(partitions: TopicPartitionList, topic: Topic) -> CommitR
5959
CommitRequest { positions: ret }
6060
}
6161

62+
pub fn build_topic_partitions(commit_request: CommitRequest) -> TopicPartitionList {
63+
let mut topic_map = HashMap::new();
64+
for (partition, position) in commit_request.positions.iter() {
65+
topic_map.insert(
66+
(partition.topic.name.clone(), partition.index as i32),
67+
Offset::from_raw(position.offset as i64),
68+
);
69+
}
70+
71+
TopicPartitionList::from_topic_map(&topic_map).unwrap()
72+
}
73+
6274
pub struct AsyncNoopCommit {
6375
pub topic: Topic,
6476
pub producer: FutureProducer,

0 commit comments

Comments
 (0)