|
1 | 1 | use super::{ |
2 | 2 | block_reward_hbbft::BlockRewardContract, |
3 | | - hbbft_early_epoch_end_manager::HbbftEarlyEpochEndManager, |
| 3 | + hbbft_early_epoch_end_manager::HbbftEarlyEpochEndManager, hbbft_engine_cache::HbbftEngineCache, |
4 | 4 | }; |
5 | 5 | use crate::{ |
6 | 6 | client::BlockChainClient, |
@@ -92,6 +92,7 @@ pub struct HoneyBadgerBFT { |
92 | 92 | peers_management: Mutex<HbbftPeersManagement>, |
93 | 93 | current_minimum_gas_price: Mutex<Option<U256>>, |
94 | 94 | early_epoch_manager: Mutex<Option<HbbftEarlyEpochEndManager>>, |
| 95 | + hbbft_engine_cache: Mutex<HbbftEngineCache>, |
95 | 96 | } |
96 | 97 |
|
97 | 98 | struct TransitionHandler { |
@@ -360,45 +361,31 @@ impl IoHandler<()> for TransitionHandler { |
360 | 361 |
|
361 | 362 | debug!(target: "consensus", "Honey Badger check for unavailability shutdown."); |
362 | 363 |
|
363 | | - match self.engine.is_staked() { |
364 | | - Ok(is_stacked) => { |
365 | | - if is_stacked { |
366 | | - debug!(target: "consensus", "is_staked: {}", is_stacked); |
367 | | - match self.engine.is_available() { |
368 | | - Ok(is_available) => { |
369 | | - if !is_available { |
370 | | - warn!(target: "consensus", "Initiating Shutdown: Honey Badger Consensus detected that this Node has been flagged as unavailable, while it should be available."); |
371 | | - |
372 | | - if let Some(ref weak) = *self.client.read() { |
373 | | - if let Some(c) = weak.upgrade() { |
374 | | - if let Some(id) = c.block_number(BlockId::Latest) { |
375 | | - warn!(target: "consensus", "BlockID: {id}"); |
376 | | - } |
377 | | - } |
378 | | - } |
| 364 | + let is_staked = self.engine.is_staked(); |
| 365 | + if is_staked { |
| 366 | + debug!(target: "consensus", "We are staked!"); |
| 367 | + let is_available = self.engine.is_available(); |
| 368 | + if !is_available { |
| 369 | + warn!(target: "consensus", "Initiating Shutdown: Honey Badger Consensus detected that this Node has been flagged as unavailable, while it should be available."); |
| 370 | + |
| 371 | + if let Some(ref weak) = *self.client.read() { |
| 372 | + if let Some(c) = weak.upgrade() { |
| 373 | + if let Some(id) = c.block_number(BlockId::Latest) { |
| 374 | + warn!(target: "consensus", "BlockID: {id}"); |
| 375 | + } |
| 376 | + } |
| 377 | + } |
379 | 378 |
|
380 | | - let id: usize = std::process::id() as usize; |
381 | | - let thread_id = std::thread::current().id(); |
382 | | - info!(target: "engine", "Waiting for Signaling shutdown to process ID: {id} thread: {:?}", thread_id); |
| 379 | + let id: usize = std::process::id() as usize; |
| 380 | + let thread_id = std::thread::current().id(); |
| 381 | + info!(target: "engine", "Waiting for Signaling shutdown to process ID: {id} thread: {:?}", thread_id); |
383 | 382 |
|
384 | | - if let Some(ref weak) = *self.client.read() { |
385 | | - if let Some(client) = weak.upgrade() { |
386 | | - info!(target: "engine", "demanding shutdown from hbbft engine."); |
387 | | - client.demand_shutdown(); |
388 | | - } |
389 | | - } |
390 | | - } |
391 | | - // if the node is available, everythign is fine! |
392 | | - } |
393 | | - Err(error) => { |
394 | | - warn!(target: "consensus", "Could not query Honey Badger check for unavailability shutdown. {:?}", error); |
395 | | - } |
| 383 | + if let Some(ref weak) = *self.client.read() { |
| 384 | + if let Some(client) = weak.upgrade() { |
| 385 | + info!(target: "engine", "demanding shutdown from hbbft engine."); |
| 386 | + client.demand_shutdown(); |
396 | 387 | } |
397 | 388 | } |
398 | | - // else: just a regular node. |
399 | | - } |
400 | | - Err(error) => { |
401 | | - warn!(target: "consensus", "Could not query Honey Badger check if validator is staked. {:?}", error); |
402 | 389 | } |
403 | 390 | } |
404 | 391 | } else if timer == ENGINE_DELAYED_UNITL_SYNCED_TOKEN { |
@@ -451,6 +438,7 @@ impl HoneyBadgerBFT { |
451 | 438 | peers_management: Mutex::new(HbbftPeersManagement::new()), |
452 | 439 | current_minimum_gas_price: Mutex::new(None), |
453 | 440 | early_epoch_manager: Mutex::new(None), |
| 441 | + hbbft_engine_cache: Mutex::new(HbbftEngineCache::new()), |
454 | 442 | }); |
455 | 443 |
|
456 | 444 | if !engine.params.is_unit_test.unwrap_or(false) { |
@@ -1014,6 +1002,15 @@ impl HoneyBadgerBFT { |
1014 | 1002 | } |
1015 | 1003 | }; |
1016 | 1004 |
|
| 1005 | + let engine_client = client_arc.as_ref(); |
| 1006 | + if let Err(err) = self |
| 1007 | + .hbbft_engine_cache |
| 1008 | + .lock() |
| 1009 | + .refresh_cache(mining_address, engine_client) |
| 1010 | + { |
| 1011 | + trace!(target: "engine", "do_validator_engine_actions: data could not get updated, follow up tasks might fail: {:?}", err); |
| 1012 | + } |
| 1013 | + |
1017 | 1014 | let engine_client = client_arc.deref(); |
1018 | 1015 |
|
1019 | 1016 | let block_chain_client = match engine_client.as_full_client() { |
@@ -1261,114 +1258,14 @@ impl HoneyBadgerBFT { |
1261 | 1258 | } |
1262 | 1259 | } |
1263 | 1260 |
|
1264 | | - /** returns if the signer of hbbft is tracked as available in the hbbft contracts. NOTE:Low Performance.*/ |
1265 | | - pub fn is_available(&self) -> Result<bool, Error> { |
1266 | | - match self.signer.read().as_ref() { |
1267 | | - Some(signer) => { |
1268 | | - match self.client_arc() { |
1269 | | - Some(client) => { |
1270 | | - let engine_client = client.deref(); |
1271 | | - let mining_address = signer.address(); |
1272 | | - |
1273 | | - if mining_address.is_zero() { |
1274 | | - debug!(target: "consensus", "is_available: not available because mining address is zero: "); |
1275 | | - return Ok(false); |
1276 | | - } |
1277 | | - match super::contracts::validator_set::get_validator_available_since( |
1278 | | - engine_client, |
1279 | | - &mining_address, |
1280 | | - ) { |
1281 | | - Ok(available_since) => { |
1282 | | - debug!(target: "consensus", "available_since: {}", available_since); |
1283 | | - return Ok(!available_since.is_zero()); |
1284 | | - } |
1285 | | - Err(err) => { |
1286 | | - warn!(target: "consensus", "Error get get_validator_available_since: ! {:?}", err); |
1287 | | - } |
1288 | | - } |
1289 | | - } |
1290 | | - None => { |
1291 | | - // warn!("Could not retrieve address for writing availability transaction."); |
1292 | | - warn!(target: "consensus", "is_available: could not get engine client"); |
1293 | | - } |
1294 | | - } |
1295 | | - } |
1296 | | - None => {} |
1297 | | - } |
1298 | | - return Ok(false); |
| 1261 | + /** returns if the signer of hbbft is tracked as available in the hbbft contracts..*/ |
| 1262 | + pub fn is_available(&self) -> bool { |
| 1263 | + self.hbbft_engine_cache.lock().is_available() |
1299 | 1264 | } |
1300 | 1265 |
|
1301 | 1266 | /** returns if the signer of hbbft is stacked. */ |
1302 | | - pub fn is_staked(&self) -> Result<bool, Error> { |
1303 | | - // is the configured validator stacked ?? |
1304 | | - |
1305 | | - // TODO: improvement: |
1306 | | - // since a signer address can not change after boot, |
1307 | | - // we can just cash the value |
1308 | | - // so we don't need a read lock here, |
1309 | | - // getting the numbers of required read locks down (deadlock risk) |
1310 | | - // and improving the performance. |
1311 | | - |
1312 | | - match self.signer.read().as_ref() { |
1313 | | - Some(signer) => { |
1314 | | - match self.client_arc() { |
1315 | | - Some(client) => { |
1316 | | - let engine_client = client.deref(); |
1317 | | - let mining_address = signer.address(); |
1318 | | - |
1319 | | - if mining_address.is_zero() { |
1320 | | - return Ok(false); |
1321 | | - } |
1322 | | - |
1323 | | - match super::contracts::validator_set::staking_by_mining_address( |
1324 | | - engine_client, |
1325 | | - &mining_address, |
1326 | | - ) { |
1327 | | - Ok(staking_address) => { |
1328 | | - // if there is no pool for this validator defined, we know that |
1329 | | - if staking_address.is_zero() { |
1330 | | - return Ok(false); |
1331 | | - } |
1332 | | - match super::contracts::staking::stake_amount( |
1333 | | - engine_client, |
1334 | | - &staking_address, |
1335 | | - &staking_address, |
1336 | | - ) { |
1337 | | - Ok(stake_amount) => { |
1338 | | - debug!(target: "consensus", "stake_amount: {}", stake_amount); |
1339 | | - |
1340 | | - // we need to check if the pool stake amount is >= minimum stake |
1341 | | - match super::contracts::staking::candidate_min_stake( |
1342 | | - engine_client, |
1343 | | - ) { |
1344 | | - Ok(min_stake) => { |
1345 | | - debug!(target: "consensus", "min_stake: {}", min_stake); |
1346 | | - return Ok(stake_amount.ge(&min_stake)); |
1347 | | - } |
1348 | | - Err(err) => { |
1349 | | - error!(target: "consensus", "Error get candidate_min_stake: ! {:?}", err); |
1350 | | - } |
1351 | | - } |
1352 | | - } |
1353 | | - Err(err) => { |
1354 | | - warn!(target: "consensus", "Error get stake_amount: ! {:?}", err); |
1355 | | - } |
1356 | | - } |
1357 | | - } |
1358 | | - Err(err) => { |
1359 | | - warn!(target: "consensus", "Error get staking_by_mining_address: ! {:?}", err); |
1360 | | - } |
1361 | | - } |
1362 | | - } |
1363 | | - None => { |
1364 | | - // warn!("Could not retrieve address for writing availability transaction."); |
1365 | | - warn!(target: "consensus", "could not get engine client"); |
1366 | | - } |
1367 | | - } |
1368 | | - } |
1369 | | - None => {} |
1370 | | - } |
1371 | | - return Ok(false); |
| 1267 | + pub fn is_staked(&self) -> bool { |
| 1268 | + self.hbbft_engine_cache.lock().is_staked() |
1372 | 1269 | } |
1373 | 1270 |
|
1374 | 1271 | fn start_hbbft_epoch_if_ready(&self) { |
@@ -1788,6 +1685,7 @@ impl Engine<EthereumMachine> for HoneyBadgerBFT { |
1788 | 1685 | // note: this is by design not part of the PrometheusMetrics trait, |
1789 | 1686 | // it is part of the Engine trait and does nothing by default. |
1790 | 1687 | fn prometheus_metrics(&self, registry: &mut stats::PrometheusRegistry) { |
| 1688 | + let is_staked = self.is_staked(); |
1791 | 1689 | self.hbbft_message_dispatcher.prometheus_metrics(registry); |
1792 | 1690 | if let Some(early_epoch_manager_option) = self |
1793 | 1691 | .early_epoch_manager |
|
0 commit comments