Skip to content

Commit eda3ac5

Browse files
committed
refactored fetching of data for decition making for validator action:
instead of fetching data one by one, we store the data in a Mutexed cache, with only short locks. This makes it happen to access the (calculation expensive) data more often, for example in the grafana interface.
1 parent 6e111d5 commit eda3ac5

File tree

3 files changed

+196
-141
lines changed

3 files changed

+196
-141
lines changed

crates/ethcore/src/engines/hbbft/hbbft_engine.rs

Lines changed: 39 additions & 141 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
use super::{
22
block_reward_hbbft::BlockRewardContract,
3-
hbbft_early_epoch_end_manager::HbbftEarlyEpochEndManager,
3+
hbbft_early_epoch_end_manager::HbbftEarlyEpochEndManager, hbbft_engine_cache::HbbftEngineCache,
44
};
55
use crate::{
66
client::BlockChainClient,
@@ -92,6 +92,7 @@ pub struct HoneyBadgerBFT {
9292
peers_management: Mutex<HbbftPeersManagement>,
9393
current_minimum_gas_price: Mutex<Option<U256>>,
9494
early_epoch_manager: Mutex<Option<HbbftEarlyEpochEndManager>>,
95+
hbbft_engine_cache: Mutex<HbbftEngineCache>,
9596
}
9697

9798
struct TransitionHandler {
@@ -360,45 +361,31 @@ impl IoHandler<()> for TransitionHandler {
360361

361362
debug!(target: "consensus", "Honey Badger check for unavailability shutdown.");
362363

363-
match self.engine.is_staked() {
364-
Ok(is_stacked) => {
365-
if is_stacked {
366-
debug!(target: "consensus", "is_staked: {}", is_stacked);
367-
match self.engine.is_available() {
368-
Ok(is_available) => {
369-
if !is_available {
370-
warn!(target: "consensus", "Initiating Shutdown: Honey Badger Consensus detected that this Node has been flagged as unavailable, while it should be available.");
371-
372-
if let Some(ref weak) = *self.client.read() {
373-
if let Some(c) = weak.upgrade() {
374-
if let Some(id) = c.block_number(BlockId::Latest) {
375-
warn!(target: "consensus", "BlockID: {id}");
376-
}
377-
}
378-
}
364+
let is_staked = self.engine.is_staked();
365+
if is_staked {
366+
debug!(target: "consensus", "We are staked!");
367+
let is_available = self.engine.is_available();
368+
if !is_available {
369+
warn!(target: "consensus", "Initiating Shutdown: Honey Badger Consensus detected that this Node has been flagged as unavailable, while it should be available.");
370+
371+
if let Some(ref weak) = *self.client.read() {
372+
if let Some(c) = weak.upgrade() {
373+
if let Some(id) = c.block_number(BlockId::Latest) {
374+
warn!(target: "consensus", "BlockID: {id}");
375+
}
376+
}
377+
}
379378

380-
let id: usize = std::process::id() as usize;
381-
let thread_id = std::thread::current().id();
382-
info!(target: "engine", "Waiting for Signaling shutdown to process ID: {id} thread: {:?}", thread_id);
379+
let id: usize = std::process::id() as usize;
380+
let thread_id = std::thread::current().id();
381+
info!(target: "engine", "Waiting for Signaling shutdown to process ID: {id} thread: {:?}", thread_id);
383382

384-
if let Some(ref weak) = *self.client.read() {
385-
if let Some(client) = weak.upgrade() {
386-
info!(target: "engine", "demanding shutdown from hbbft engine.");
387-
client.demand_shutdown();
388-
}
389-
}
390-
}
391-
// if the node is available, everythign is fine!
392-
}
393-
Err(error) => {
394-
warn!(target: "consensus", "Could not query Honey Badger check for unavailability shutdown. {:?}", error);
395-
}
383+
if let Some(ref weak) = *self.client.read() {
384+
if let Some(client) = weak.upgrade() {
385+
info!(target: "engine", "demanding shutdown from hbbft engine.");
386+
client.demand_shutdown();
396387
}
397388
}
398-
// else: just a regular node.
399-
}
400-
Err(error) => {
401-
warn!(target: "consensus", "Could not query Honey Badger check if validator is staked. {:?}", error);
402389
}
403390
}
404391
} else if timer == ENGINE_DELAYED_UNITL_SYNCED_TOKEN {
@@ -451,6 +438,7 @@ impl HoneyBadgerBFT {
451438
peers_management: Mutex::new(HbbftPeersManagement::new()),
452439
current_minimum_gas_price: Mutex::new(None),
453440
early_epoch_manager: Mutex::new(None),
441+
hbbft_engine_cache: Mutex::new(HbbftEngineCache::new()),
454442
});
455443

456444
if !engine.params.is_unit_test.unwrap_or(false) {
@@ -1014,6 +1002,15 @@ impl HoneyBadgerBFT {
10141002
}
10151003
};
10161004

1005+
let engine_client = client_arc.as_ref();
1006+
if let Err(err) = self
1007+
.hbbft_engine_cache
1008+
.lock()
1009+
.refresh_cache(mining_address, engine_client)
1010+
{
1011+
trace!(target: "engine", "do_validator_engine_actions: data could not get updated, follow up tasks might fail: {:?}", err);
1012+
}
1013+
10171014
let engine_client = client_arc.deref();
10181015

10191016
let block_chain_client = match engine_client.as_full_client() {
@@ -1261,114 +1258,14 @@ impl HoneyBadgerBFT {
12611258
}
12621259
}
12631260

1264-
/** returns if the signer of hbbft is tracked as available in the hbbft contracts. NOTE:Low Performance.*/
1265-
pub fn is_available(&self) -> Result<bool, Error> {
1266-
match self.signer.read().as_ref() {
1267-
Some(signer) => {
1268-
match self.client_arc() {
1269-
Some(client) => {
1270-
let engine_client = client.deref();
1271-
let mining_address = signer.address();
1272-
1273-
if mining_address.is_zero() {
1274-
debug!(target: "consensus", "is_available: not available because mining address is zero: ");
1275-
return Ok(false);
1276-
}
1277-
match super::contracts::validator_set::get_validator_available_since(
1278-
engine_client,
1279-
&mining_address,
1280-
) {
1281-
Ok(available_since) => {
1282-
debug!(target: "consensus", "available_since: {}", available_since);
1283-
return Ok(!available_since.is_zero());
1284-
}
1285-
Err(err) => {
1286-
warn!(target: "consensus", "Error get get_validator_available_since: ! {:?}", err);
1287-
}
1288-
}
1289-
}
1290-
None => {
1291-
// warn!("Could not retrieve address for writing availability transaction.");
1292-
warn!(target: "consensus", "is_available: could not get engine client");
1293-
}
1294-
}
1295-
}
1296-
None => {}
1297-
}
1298-
return Ok(false);
1261+
/** returns if the signer of hbbft is tracked as available in the hbbft contracts..*/
1262+
pub fn is_available(&self) -> bool {
1263+
self.hbbft_engine_cache.lock().is_available()
12991264
}
13001265

13011266
/** returns if the signer of hbbft is stacked. */
1302-
pub fn is_staked(&self) -> Result<bool, Error> {
1303-
// is the configured validator stacked ??
1304-
1305-
// TODO: improvement:
1306-
// since a signer address can not change after boot,
1307-
// we can just cash the value
1308-
// so we don't need a read lock here,
1309-
// getting the numbers of required read locks down (deadlock risk)
1310-
// and improving the performance.
1311-
1312-
match self.signer.read().as_ref() {
1313-
Some(signer) => {
1314-
match self.client_arc() {
1315-
Some(client) => {
1316-
let engine_client = client.deref();
1317-
let mining_address = signer.address();
1318-
1319-
if mining_address.is_zero() {
1320-
return Ok(false);
1321-
}
1322-
1323-
match super::contracts::validator_set::staking_by_mining_address(
1324-
engine_client,
1325-
&mining_address,
1326-
) {
1327-
Ok(staking_address) => {
1328-
// if there is no pool for this validator defined, we know that
1329-
if staking_address.is_zero() {
1330-
return Ok(false);
1331-
}
1332-
match super::contracts::staking::stake_amount(
1333-
engine_client,
1334-
&staking_address,
1335-
&staking_address,
1336-
) {
1337-
Ok(stake_amount) => {
1338-
debug!(target: "consensus", "stake_amount: {}", stake_amount);
1339-
1340-
// we need to check if the pool stake amount is >= minimum stake
1341-
match super::contracts::staking::candidate_min_stake(
1342-
engine_client,
1343-
) {
1344-
Ok(min_stake) => {
1345-
debug!(target: "consensus", "min_stake: {}", min_stake);
1346-
return Ok(stake_amount.ge(&min_stake));
1347-
}
1348-
Err(err) => {
1349-
error!(target: "consensus", "Error get candidate_min_stake: ! {:?}", err);
1350-
}
1351-
}
1352-
}
1353-
Err(err) => {
1354-
warn!(target: "consensus", "Error get stake_amount: ! {:?}", err);
1355-
}
1356-
}
1357-
}
1358-
Err(err) => {
1359-
warn!(target: "consensus", "Error get staking_by_mining_address: ! {:?}", err);
1360-
}
1361-
}
1362-
}
1363-
None => {
1364-
// warn!("Could not retrieve address for writing availability transaction.");
1365-
warn!(target: "consensus", "could not get engine client");
1366-
}
1367-
}
1368-
}
1369-
None => {}
1370-
}
1371-
return Ok(false);
1267+
pub fn is_staked(&self) -> bool {
1268+
self.hbbft_engine_cache.lock().is_staked()
13721269
}
13731270

13741271
fn start_hbbft_epoch_if_ready(&self) {
@@ -1788,6 +1685,7 @@ impl Engine<EthereumMachine> for HoneyBadgerBFT {
17881685
// note: this is by design not part of the PrometheusMetrics trait,
17891686
// it is part of the Engine trait and does nothing by default.
17901687
fn prometheus_metrics(&self, registry: &mut stats::PrometheusRegistry) {
1688+
let is_staked = self.is_staked();
17911689
self.hbbft_message_dispatcher.prometheus_metrics(registry);
17921690
if let Some(early_epoch_manager_option) = self
17931691
.early_epoch_manager
Lines changed: 156 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
use crate::client::EngineClient;
2+
use error::Error;
3+
use ethereum_types::Address;
4+
use parking_lot::Mutex;
5+
6+
#[derive(Debug, Clone)]
7+
pub struct HbbftEngineCacheData {
8+
pub signer_address: Address,
9+
10+
pub is_staked: bool,
11+
12+
pub is_available: bool,
13+
}
14+
15+
impl HbbftEngineCacheData {
16+
pub fn new() -> Self {
17+
HbbftEngineCacheData {
18+
signer_address: Address::zero(),
19+
is_staked: false,
20+
is_available: false,
21+
}
22+
}
23+
}
24+
25+
pub struct HbbftEngineCache {
26+
data: Mutex<HbbftEngineCacheData>,
27+
}
28+
29+
impl HbbftEngineCache {
30+
pub fn new() -> Self {
31+
HbbftEngineCache {
32+
data: Mutex::new(HbbftEngineCacheData::new()),
33+
}
34+
}
35+
36+
pub fn is_staked(&self) -> bool {
37+
self.data.lock().is_staked
38+
}
39+
40+
pub fn signer_address(&self) -> Address {
41+
self.data.lock().signer_address
42+
}
43+
44+
pub fn is_available(&self) -> bool {
45+
self.data.lock().is_available
46+
}
47+
48+
/// Refresh the cache values.
49+
pub fn refresh_cache(
50+
&mut self,
51+
signer_address: Address,
52+
engine_client: &dyn EngineClient,
53+
) -> Result<(), Error> {
54+
//self.is_staked = false;
55+
56+
let mut new_data = HbbftEngineCacheData::new();
57+
new_data.signer_address = signer_address;
58+
let is_available = self.calc_is_available(signer_address, engine_client)?;
59+
new_data.is_available = is_available;
60+
new_data.is_staked = self.calc_is_staked(signer_address, engine_client)?;
61+
62+
self.data.lock().clone_from(&new_data);
63+
64+
return Ok(());
65+
}
66+
67+
fn calc_is_available(
68+
&mut self,
69+
signer_address: Address,
70+
engine_client: &dyn EngineClient,
71+
) -> Result<bool, Error> {
72+
// match self.signer.read().as_ref() {
73+
// Some(signer) => {
74+
// match self.client_arc() {
75+
// Some(client) => {
76+
let engine_client = engine_client;
77+
// let mining_address = signer.address();
78+
79+
if signer_address.is_zero() {
80+
// debug!(target: "consensus", "is_available: not available because mining address is zero: ");
81+
return Ok(false);
82+
}
83+
match super::contracts::validator_set::get_validator_available_since(
84+
engine_client,
85+
&signer_address,
86+
) {
87+
Ok(available_since) => {
88+
debug!(target: "consensus", "available_since: {}", available_since);
89+
return Ok(!available_since.is_zero());
90+
}
91+
Err(err) => {
92+
warn!(target: "consensus", "Error get get_validator_available_since: ! {:?}", err);
93+
}
94+
}
95+
//}
96+
// None => {
97+
// // warn!("Could not retrieve address for writing availability transaction.");
98+
// warn!(target: "consensus", "is_available: could not get engine client");
99+
// }
100+
// }
101+
// }
102+
// None => {}
103+
// }
104+
return Ok(false);
105+
}
106+
107+
/// refreshes cache, if node is staked.
108+
fn calc_is_staked(
109+
&self,
110+
mining_address: Address,
111+
engine_client: &dyn EngineClient,
112+
) -> Result<bool, Error> {
113+
// is the configured validator stacked ??
114+
match super::contracts::validator_set::staking_by_mining_address(
115+
engine_client,
116+
&mining_address,
117+
) {
118+
Ok(staking_address) => {
119+
// if there is no pool for this validator defined, we know that
120+
if staking_address.is_zero() {
121+
return Ok(false);
122+
}
123+
match super::contracts::staking::stake_amount(
124+
engine_client,
125+
&staking_address,
126+
&staking_address,
127+
) {
128+
Ok(stake_amount) => {
129+
debug!(target: "consensus", "stake_amount: {}", stake_amount);
130+
131+
// we need to check if the pool stake amount is >= minimum stake
132+
match super::contracts::staking::candidate_min_stake(engine_client) {
133+
Ok(min_stake) => {
134+
debug!(target: "consensus", "min_stake: {}", min_stake);
135+
return Ok(stake_amount.ge(&min_stake));
136+
}
137+
Err(err) => {
138+
error!(target: "consensus", "Error get candidate_min_stake: ! {:?}", err);
139+
return Ok(false);
140+
//return Err(err.into());
141+
}
142+
}
143+
}
144+
Err(err) => {
145+
warn!(target: "consensus", "Error get stake_amount: ! {:?}", err);
146+
return Ok(false);
147+
}
148+
}
149+
}
150+
Err(err) => {
151+
warn!(target: "consensus", "Error get staking_by_mining_address: ! {:?}", err);
152+
return Ok(false);
153+
}
154+
}
155+
}
156+
}

crates/ethcore/src/engines/hbbft/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ mod contracts;
33
mod contribution;
44
mod hbbft_early_epoch_end_manager;
55
mod hbbft_engine;
6+
mod hbbft_engine_cache;
67
mod hbbft_message_memorium;
78
mod hbbft_network_fork_manager;
89
mod hbbft_peers_management;

0 commit comments

Comments
 (0)