@@ -24,7 +24,6 @@ use tycho_block_util::state::{RefMcStateHandle, ShardStateStuff};
24
24
use tycho_core:: global_config:: MempoolGlobalConfig ;
25
25
use tycho_executor:: { AccountMeta , PublicLibraryChange , TransactionMeta } ;
26
26
use tycho_network:: PeerId ;
27
- use tycho_util:: metrics:: HistogramGuard ;
28
27
use tycho_util:: { DashMapEntry , FastDashMap , FastHashMap , FastHashSet } ;
29
28
30
29
use super :: do_collate:: work_units:: PrepareMsgGroupsWu ;
@@ -1281,6 +1280,9 @@ pub struct QueueStatisticsWithRemaning {
1281
1280
}
1282
1281
1283
1282
pub struct CumulativeStatistics {
1283
+ /// Cumulative statistics created for this shard. When this shard reads statistics, it decrements `remaining messages`
1284
+ /// Another shard can decrements the statistics remaining only by using `update_processed_to_by_partitions`
1285
+ for_shard : ShardIdent ,
1284
1286
/// Actual processed to info for master and all shards
1285
1287
all_shards_processed_to_by_partitions : FastHashMap < ShardIdent , ( bool , ProcessedToByPartitions ) > ,
1286
1288
@@ -1290,38 +1292,35 @@ pub struct CumulativeStatistics {
1290
1292
1291
1293
/// The final aggregated statistics (across all shards) by partitions.
1292
1294
result : FastHashMap < QueuePartitionIdx , QueueStatisticsWithRemaning > ,
1293
-
1294
- /// A flag indicating that data has changed, and we need to recalculate before returning `result`.
1295
- dirty : bool ,
1296
1295
}
1297
1296
1298
1297
impl CumulativeStatistics {
1299
1298
pub fn new (
1299
+ for_shard : ShardIdent ,
1300
1300
all_shards_processed_to_by_partitions : FastHashMap <
1301
1301
ShardIdent ,
1302
1302
( bool , ProcessedToByPartitions ) ,
1303
1303
> ,
1304
1304
) -> Self {
1305
1305
Self {
1306
+ for_shard,
1306
1307
all_shards_processed_to_by_partitions,
1307
1308
shards_stats_by_partitions : Default :: default ( ) ,
1308
1309
result : Default :: default ( ) ,
1309
- dirty : false ,
1310
1310
}
1311
1311
}
1312
1312
1313
1313
/// Create range and full load statistics and store it
1314
1314
pub fn load < V : InternalMessageValue > (
1315
1315
& mut self ,
1316
1316
mq_adapter : Arc < dyn MessageQueueAdapter < V > > ,
1317
- current_shard : & ShardIdent ,
1318
1317
partitions : & FastHashSet < QueuePartitionIdx > ,
1319
1318
prev_state_gen_lt : Lt ,
1320
1319
mc_state_gen_lt : Lt ,
1321
1320
mc_top_shards_end_lts : & FastHashMap < ShardIdent , Lt > ,
1322
1321
) -> Result < ( ) > {
1323
1322
let ranges = Self :: compute_cumulative_stats_ranges (
1324
- current_shard ,
1323
+ & self . for_shard ,
1325
1324
& self . all_shards_processed_to_by_partitions ,
1326
1325
prev_state_gen_lt,
1327
1326
mc_state_gen_lt,
@@ -1345,7 +1344,6 @@ impl CumulativeStatistics {
1345
1344
partitions : & FastHashSet < QueuePartitionIdx > ,
1346
1345
ranges : Vec < QueueShardBoundedRange > ,
1347
1346
) -> Result < ( ) > {
1348
- self . dirty = true ;
1349
1347
tracing:: trace!(
1350
1348
target: tracing_targets:: COLLATOR ,
1351
1349
"cumulative_stats_partial_ranges: {:?}" ,
@@ -1407,7 +1405,6 @@ impl CumulativeStatistics {
1407
1405
} ) ;
1408
1406
1409
1407
self . all_shards_processed_to_by_partitions = new_pt;
1410
- self . dirty = true ;
1411
1408
}
1412
1409
1413
1410
fn compute_cumulative_stats_ranges (
@@ -1469,15 +1466,23 @@ impl CumulativeStatistics {
1469
1466
}
1470
1467
}
1471
1468
1469
+ let entry = self
1470
+ . result
1471
+ . entry ( partition)
1472
+ . or_insert_with ( || QueueStatisticsWithRemaning {
1473
+ initial_stats : QueueStatistics :: default ( ) ,
1474
+ remaning_stats : ConcurrentQueueStatistics :: default ( ) ,
1475
+ } ) ;
1476
+ entry. initial_stats . append ( & diff_partition_stats) ;
1477
+ entry. remaning_stats . append ( & diff_partition_stats) ;
1478
+
1472
1479
// finally add weeded stats
1473
1480
self . add_diff_partition_stats (
1474
1481
partition,
1475
1482
diff_shard,
1476
1483
diff_max_message,
1477
1484
diff_partition_stats,
1478
1485
) ;
1479
-
1480
- self . dirty = true ;
1481
1486
}
1482
1487
1483
1488
fn add_diff_partition_stats (
@@ -1541,6 +1546,12 @@ impl CumulativeStatistics {
1541
1546
cumulative_stats
1542
1547
. initial_stats
1543
1548
. decrement_for_account ( dst_acc. clone ( ) , * count) ;
1549
+
1550
+ if self . for_shard != dst_shard {
1551
+ cumulative_stats
1552
+ . remaning_stats
1553
+ . decrement_for_account ( dst_acc. clone ( ) , * count) ;
1554
+ }
1544
1555
false
1545
1556
} else {
1546
1557
true
@@ -1571,15 +1582,12 @@ impl CumulativeStatistics {
1571
1582
/// Returns a reference to the aggregated stats by partitions.
1572
1583
/// If the data is marked as dirty, it triggers a lazy recalculation first.
1573
1584
pub fn result ( & mut self ) -> & FastHashMap < QueuePartitionIdx , QueueStatisticsWithRemaning > {
1574
- self . ensure_finalized ( ) ;
1575
1585
& self . result
1576
1586
}
1577
1587
1578
1588
/// Calc aggregated stats among all partitions.
1579
1589
/// If the data is marked as dirty, it triggers a lazy recalculation first.
1580
1590
pub fn get_aggregated_result ( & mut self ) -> QueueStatistics {
1581
- self . ensure_finalized ( ) ;
1582
-
1583
1591
let mut res: Option < QueueStatistics > = None ;
1584
1592
for stats in self . result . values ( ) {
1585
1593
if let Some ( aggregated) = res. as_mut ( ) {
@@ -1590,42 +1598,6 @@ impl CumulativeStatistics {
1590
1598
}
1591
1599
res. unwrap_or_default ( )
1592
1600
}
1593
-
1594
- /// A helper function to trigger a recalculation if `dirty` is set.
1595
- fn ensure_finalized ( & mut self ) {
1596
- if self . dirty {
1597
- self . recalculate ( ) ;
1598
- }
1599
- }
1600
-
1601
- /// Clears the existing result and aggregates all data from `shards_statistics`.
1602
- fn recalculate ( & mut self ) {
1603
- let _histogram = HistogramGuard :: begin ( "tycho_do_collate_recalculate_statistics_time" ) ;
1604
- self . result . clear ( ) ;
1605
-
1606
- for shard_stats_by_partitions in self . shards_stats_by_partitions . values ( ) {
1607
- for ( & partition, diffs) in shard_stats_by_partitions {
1608
- let mut partition_stats = AccountStatistics :: new ( ) ;
1609
- for diff_stats in diffs. values ( ) {
1610
- for ( account, & count) in diff_stats {
1611
- * partition_stats. entry ( account. clone ( ) ) . or_default ( ) += count;
1612
- }
1613
- }
1614
- self . result
1615
- . entry ( partition)
1616
- . and_modify ( |stats| {
1617
- stats. initial_stats . append ( & partition_stats) ;
1618
- stats. remaning_stats . append ( & partition_stats) ;
1619
- } )
1620
- . or_insert ( QueueStatisticsWithRemaning {
1621
- initial_stats : QueueStatistics :: with_statistics ( partition_stats. clone ( ) ) ,
1622
- remaning_stats : ConcurrentQueueStatistics :: with_statistics ( partition_stats) ,
1623
- } ) ;
1624
- }
1625
- }
1626
-
1627
- self . dirty = false ;
1628
- }
1629
1601
}
1630
1602
1631
1603
#[ derive( Debug , Default , Clone ) ]
@@ -1634,12 +1606,6 @@ pub struct ConcurrentQueueStatistics {
1634
1606
}
1635
1607
1636
1608
impl ConcurrentQueueStatistics {
1637
- pub fn with_statistics ( statistics : AccountStatistics ) -> Self {
1638
- Self {
1639
- statistics : Arc :: new ( statistics. into_iter ( ) . collect ( ) ) ,
1640
- }
1641
- }
1642
-
1643
1609
pub fn statistics ( & self ) -> & FastDashMap < IntAddr , u64 > {
1644
1610
& self . statistics
1645
1611
}
@@ -1694,7 +1660,6 @@ impl fmt::Debug for CumulativeStatistics {
1694
1660
)
1695
1661
. field ( "shards_stats_by_partitions" , & shards_summary)
1696
1662
. field ( "result" , & format ! ( "{} partitions" , self . result. len( ) ) )
1697
- . field ( "dirty" , & self . dirty )
1698
1663
. finish ( )
1699
1664
}
1700
1665
}
0 commit comments