Skip to content

Commit 4a764d1

Browse files
committed
fix(coprocesor): listener, typos
1 parent 4b0d537 commit 4a764d1

File tree

11 files changed

+35
-32
lines changed

11 files changed

+35
-32
lines changed

listener/crates/listener_core/src/blockchain/evm/sem_evm_rpc_provider.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ impl SemEvmRpcProvider {
9292
// Network policy blackholes (TCP keepalive detects silent drops)
9393
// Load balancer idle timeout alignment (10 s < any cloud LB timeout)
9494
let http_client = alloy::transports::http::Client::builder()
95-
.timeout(Duration::from_secs(25)) // Must accomodate slow providers with high response time
95+
.timeout(Duration::from_secs(25)) // Must accommodate slow providers with high response time
9696
.connect_timeout(Duration::from_secs(3)) // fast failure if the host is unreachable
9797
.pool_idle_timeout(Duration::from_secs(10)) // aggressive eviction of unused request.
9898
.pool_max_idle_per_host(max_concurrent_requests) // Match the semaphore size setting

listener/crates/listener_core/src/core/evm_listener.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -733,7 +733,7 @@ impl EvmListener {
733733
&self.repositories,
734734
&block_n,
735735
chain_id_u64,
736-
// This block is comming from the live flow, and detect the reorg processing, but was issued from the live flow.
736+
// This block is coming from the live flow, and detect the reorg processing, but was issued from the live flow.
737737
BlockFlow::Live,
738738
&self.broker,
739739
&self.event_publisher,

listener/crates/listener_core/src/core/evm_listener.rs.orig

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -439,10 +439,10 @@ impl EvmListener {
439439
/// to `EvmListenerError::InvariantViolation`.
440440
pub async fn fetch_blocks_and_run_cursor(&self) -> Result<CursorResult, EvmListenerError> {
441441
// Pre verification:
442-
// The prefetch count setted up to 1 allows us to simply verify emptiness of the queue/stream, to ensure there is no possibility of overlaping
442+
// The prefetch count set up to 1 allows us to simply verify emptiness of the queue/stream, to ensure there is no possibility of overlapping
443443
// messages.
444-
// If there is another message sitting in the queue, we can basically skip the message and avoid consuming it, by directly acknoledging.
445-
// TODO: This is a first security, but not complient with horizontal scaling and needs to be locked.
444+
// If there is another message sitting in the queue, we can basically skip the message and avoid consuming it, by directly acknowledging.
445+
// TODO: This is a first security, but not compliant with horizontal scaling and needs to be locked.
446446
let fetch_topic = Topic::new(routing::FETCH_NEW_BLOCKS)
447447
.with_namespace(chain_id_to_namespace(self.chain_id));
448448
let task_lock = self
@@ -744,7 +744,7 @@ impl EvmListener {
744744
let chain_id_u64 = self.repositories.chain_id() as u64;
745745

746746
// pre-check: Skipping duplicate messages.
747-
// TODO: This is a first security, but not complient with horizontal scaling and needs to be locked.
747+
// TODO: This is a first security, but not compliant with horizontal scaling and needs to be locked.
748748
let reorg_topic = Topic::new(routing::BACKTRACK_REORG)
749749
.with_namespace(chain_id_to_namespace(self.chain_id));
750750
let task_lock = self
@@ -786,7 +786,7 @@ impl EvmListener {
786786
&self.repositories,
787787
&block_n,
788788
chain_id_u64,
789-
// This block is comming from the live flow, and detect the reorg processing, but was issued from the live flow.
789+
// This block is coming from the live flow, and detect the reorg processing, but was issued from the live flow.
790790
BlockFlow::Live,
791791
&self.broker,
792792
&self.event_publisher,

listener/crates/listener_core/src/main.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ async fn main() {
140140
) {
141141
Ok(provider) => provider,
142142
Err(e) => {
143-
error!("Could not instanciate the semaphore provider: {}", e);
143+
error!("Could not instantiate the semaphore provider: {}", e);
144144
process::exit(1);
145145
}
146146
};
@@ -475,7 +475,7 @@ async fn main() {
475475

476476
// let consumer_lib_handler_test =
477477
// AsyncHandlerPayloadOnly::new(move |event: BlockPayload| async move {
478-
// // println!("Consumer recieved event: {}", event);
478+
// // println!("Consumer received event: {}", event);
479479
// info!("GETTING EVENT FROM BLOCK: {}", event.block_number);
480480
// Ok::<(), EvmListenerError>(())
481481
// });

listener/crates/listener_core/src/main.rs.orig

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ async fn main() {
114114
) {
115115
Ok(provider) => provider,
116116
Err(e) => {
117-
error!("Could not instanciate the semaphore provider: {}", e);
117+
error!("Could not instantiate the semaphore provider: {}", e);
118118
process::exit(1);
119119
}
120120
};
@@ -446,7 +446,7 @@ async fn main() {
446446

447447
// let consumer_lib_handler_test =
448448
// AsyncHandlerPayloadOnly::new(move |event: BlockPayload| async move {
449-
// println!("Consumer recieved event: {}", event);
449+
// println!("Consumer received event: {}", event);
450450
// Ok::<(), EvmListenerError>(())
451451
// });
452452

listener/crates/shared/broker/src/amqp/consumer.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ impl RmqConsumer {
290290
}
291291
HandlerOutcome::Nack => {
292292
if let Some(b) = cb { b.record_success(); }
293-
debug!("Handler voluntarily yielded, requeueing at tail of main queue");
293+
debug!("Handler voluntarily yielded, requeuing at tail of main queue");
294294
result.delivery
295295
.nack(BasicNackOptions { requeue: true, ..Default::default() })
296296
.await

listener/crates/shared/broker/src/redis/consumer.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1101,7 +1101,7 @@ impl RedisConsumer {
11011101
///
11021102
/// Non-blocking: does NOT use Redis BLOCK argument. BLOCK commands on
11031103
/// `ConnectionManager` (which wraps `MultiplexedConnection`) are
1104-
/// architecturally broken — a blocking call monopolises the shared TCP
1104+
/// architecturally broken — a blocking call monopolizes the shared TCP
11051105
/// connection and hangs indefinitely on dead sockets (redis-rs #1236).
11061106
///
11071107
/// Callers use client-side `sleep()` as the polling interval instead.

listener/crates/shared/broker/tests/redis_e2e.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -684,7 +684,7 @@ async fn test_circuit_breaker_halts_consumption() {
684684
// Without a circuit breaker, the message remains in retry circulation.
685685
//
686686
// With CB (threshold=2, cooldown=5 s) the consumer pauses after 2 failures,
687-
// preventing any further XREADGROUP reads. The ClaimSweeper is neutralised by a
687+
// preventing any further XREADGROUP reads. The ClaimSweeper is neutralized by a
688688
// very long claim_min_idle (60 s) so it cannot accumulate delivery counts within
689689
// the 3 s observation window — keeping the dead stream empty.
690690
#[tokio::test]

listener/docs/library_notifier.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ Integrate event consuming for specific logs and filters for zama components, and
66

77
## Logic:
88

9-
This component consume blocks, transasctions, receipts from the different queues declared on rabbitmq, checks into its table to see if there is relevent filters or abi to watch, or even "from" or "to" sources if we need to watch for some transactions, register logs in a table, and forward to the internal logic of the components that need logs.
9+
This component consume blocks, transasctions, receipts from the different queues declared on rabbitmq, checks into its table to see if there is relevant filters or abi to watch, or even "from" or "to" sources if we need to watch for some transactions, register logs in a table, and forward to the internal logic of the components that need logs.
1010

1111
Basically, the library is receipt parser.
1212
You refer a watcher, and match the current watcher from the receipt if we need to get process an event.
@@ -37,7 +37,7 @@ For inspiration regarding this library, There is an existing implementation for
3737
#### Minimal features:
3838

3939
- Persist block height and resilient to failure mode.
40-
- Multichain by design (consuming multuple queues (blocks, transactions with receipts -> e.g logs) for each networks).
40+
- Multichain by design (consuming multiple queues (blocks, transactions with receipts -> e.g logs) for each networks).
4141
- Should consume all events even if they are not used (or rmq memory will grow).
4242
- Declare notifiers for dynamical events ABI
4343
- Store log watchers types into a postgres.
@@ -49,8 +49,8 @@ For inspiration regarding this library, There is an existing implementation for
4949
- Declare multiple watcher with a number of block confirmations if a block confirmation number is required (RPC url could be required for this). (e.g finality, safe or n confirmations blocks) based on events.
5050
- Ability to be aware of new available chain from rabbitmq.
5151
- different types of watchers (logs, tx)
52-
- OPTIONNAL: Cancel reorged events.
53-
- OPTIONNAL: Replay past blocks (should not need this with rmq, since its queueing messages).
52+
- OPTIONAL: Cancel reorged events.
53+
- OPTIONAL: Replay past blocks (should not need this with rmq, since its queuing messages).
5454
- Check altogether if problems with duplicate logs, and how to manage them in the zama internals (could be handled optionally) To get a unicity regarding logs and handle deduplication, we can if needed apply a semantic hash regarding log.
5555
- Metrics
5656
- Alerting.
@@ -64,7 +64,7 @@ For inspiration regarding this library, There is an existing implementation for
6464
- table 1: watcher
6565
- uuid, chainId, number of conf block ?, ABI, watcher type (tx, contract)
6666
- table 2: logs
67-
- uuid, watcher_uuid, block_number, released (TRUE, FALSE), log (deserialised or not), UNCLE? (Not mendatory if leverage on block confirmations)
67+
- uuid, watcher_uuid, block_number, released (TRUE, FALSE), log (deserialised or not), UNCLE? (Not mandatory if leverage on block confirmations)
6868

6969
## Schemes:
7070

listener/docs/listener_core.md

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -14,18 +14,18 @@
1414
- Counter-intuitively, it is always the fresher information that will bring us the truth, especially regarding the past.
1515
- Transaction receipts contains all the logs
1616
- ReceiptRoot calculation, and block hash calculation ensure there is no missing logs for a given block.
17-
- Zero websocket, not resilent.
17+
- Zero websocket, not resilient.
1818

1919
## Goal
2020

21-
The goal of this core algorithm is to fetch (http polling), blocks, transactions, receipt, by polling, handle reorogs properly by checking hash and parent hash, fetch new informations if needed to be consistent and aware of canonical chain and broadcast blocks, and transaction to the message broker for the library to be aware of new events.
21+
The goal of this core algorithm is to fetch (http polling), blocks, transactions, receipt, by polling, handle reorogs properly by checking hash and parent hash, fetch new information if needed to be consistent and aware of canonical chain and broadcast blocks, and transaction to the message broker for the library to be aware of new events.
2222

2323
## Logic / Algorithms
2424

2525
### Algorithm v1: Sequential poller and reorg checker
2626

2727
This is a descriptive of a basic algorithm, which could be sufficient with chains that produces blocks in more time than an http call duration.
28-
This algorithm is sequential, and is just refered here for knowledge.
28+
This algorithm is sequential, and is just referred here for knowledge.
2929

3030
If you need access to an existing implementation of this algorithm, ask and I will share you the implementation.
3131

@@ -35,8 +35,8 @@ This algorithm leverages mostly on database, to perform checks, states updates,
3535
2. we register block, transactions of this block, and associated receipt.
3636
1. The receipt contains all the logs.
3737
2. We broadcast the block, and the transactions with receipts to given queues with chainId, to get almost real time performance, for being consumed and filtered by the library notifier over abi filter and contract address
38-
3. we compare current block parent hash, and previous block parent hash to detect if a reorg occured.
39-
1. if it matches, we go back to the begining of the algorithm.
38+
3. we compare current block parent hash, and previous block parent hash to detect if a reorg occurred.
39+
1. if it matches, we go back to the beginning of the algorithm.
4040
2. if it didn't match: Reorg is detected.
4141
1. we fetch one by one all the previous blocks by hash, we broadcast events in the same fashion we did previously. (BACKTRACKING)
4242
2. pass the other ones to UNCLES status.
@@ -45,7 +45,7 @@ This algorithm leverages mostly on database, to perform checks, states updates,
4545

4646
### Algorithm v2: Cursor Algorithm
4747

48-
The major flaw with the v1 iterative poller algorithm, is the block production time for faster chains, such as Arbitrum, Monad, or even Solana later could be fater than a single http response call, database operations, and network time calls if levraging on rabbitmq to trigger block fetch and polling operations, cumulated operations could be more than 100/200ms only in average time. It does not keep up with chain with a smaller block time duration.
48+
The major flaw with the v1 iterative poller algorithm, is the block production time for faster chains, such as Arbitrum, Monad, or even Solana later could be faster than a single http response call, database operations, and network time calls if levraging on rabbitmq to trigger block fetch and polling operations, cumulated operations could be more than 100/200ms only in average time. It does not keep up with chain with a smaller block time duration.
4949
Also, if later a full chain indexer is needed, this is impossible to leverage on the first algorithm.
5050

5151
Here is the proposed algorithm to address this flaw.
@@ -55,10 +55,10 @@ Here is the proposed algorithm to address this flaw.
5555
Resolving the http latency, and ensure no event is missed.
5656

5757
1. We calculate a block range:
58-
1. `min(blockHeight - currentRegisterdBlock, maxParallelBlockFetch + currentRegisterdBlock)`
58+
1. `min(blockHeight - currentRegisteredBlock, maxParallelBlockFetch + currentRegisteredBlock)`
5959
2. or range given from an order to fetch the next block.
60-
2. We spawn parallel task to fetch blocks (http polling), and register them in an in memory datastructure (slots for new blocks). And we fetch receipt for thoses blocks (strategy pattern could be required for diffrernt chains implems (`eth_getBlockReceipts` or `eth_getTransactionReceipt`for each trasnaction))
61-
3. Optionnal: we recompute block hash: The rationale behind this, calculate receiptRoot and then block hash from receipt root and all other headers: this ensure that there is no inconsistency in receipts, hence logs contained in the receipts.
60+
2. We spawn parallel task to fetch blocks (http polling), and register them in an in memory datastructure (slots for new blocks). And we fetch receipt for those blocks (strategy pattern could be required for diffrernt chains implems (`eth_getBlockReceipts` or `eth_getTransactionReceipt`for each transaction))
61+
3. Optional: we recompute block hash: The rationale behind this, calculate receiptRoot and then block hash from receipt root and all other headers: this ensure that there is no inconsistency in receipts, hence logs contained in the receipts.
6262

6363
#### task two: cursor, reorg check and event broadcaster
6464

@@ -78,14 +78,14 @@ Resolving the http latency, and ensure no event is missed.
7878
- Event driven system to react to multiple events for the algorithm described above.
7979
- strategy pattern (handling chains that doesn't support `eth_getBlockReceipts` method, and solana later)
8080
- algorithm v2 implementation with eth_getBlockReceipt first.
81-
- tables to store minimal metadata (blocks, transactions and receipts) with status (CANNONICAL, UNCLE).
81+
- tables to store minimal metadata (blocks, transactions and receipts) with status (CANONICAL, UNCLE).
8282
- Sql cleaner feature.
83-
- OPTIONNAL: Finalized status.
83+
- OPTIONAL: Finalized status.
8484
- Block computer.
85-
- OPTIONNAL: data storage layer no-sql or S3.
85+
- OPTIONAL: data storage layer no-sql or S3.
8686
- push rabbitmq messages.
8787

88-
## Additionnal:
88+
## Additional:
8989

9090

9191

0 commit comments

Comments
 (0)