diff --git a/xelis_common/src/account/balance.rs b/xelis_common/src/account/balance.rs index 716b7128..b1d29380 100644 --- a/xelis_common/src/account/balance.rs +++ b/xelis_common/src/account/balance.rs @@ -8,8 +8,8 @@ use super::CiphertextCache; #[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)] #[serde(rename_all = "snake_case")] pub enum BalanceType { - // Only incoming funds were added - // By default, a balance is considered as input + // Only incoming funds were added. + // By default, a balance is considered as input. Input, // Only a spending was made from this Output, @@ -42,17 +42,17 @@ impl Serializer for BalanceType { #[derive(Clone, Deserialize, Serialize, PartialEq, Eq, Debug)] pub struct VersionedBalance { - // Output balance is used in case of multi TXs not in same block + // Output balance is used in case of multi TXs not in same block. // If you build several TXs at same time but are not in the same block, - // and a incoming tx happen we need to keep track of the output balance + // and a incoming tx happen we need to keep track of the output balance. output_balance: Option, - // Final user balance that contains outputs and inputs balance - // This is the balance shown to a user and used to build TXs + // Final user balance that contains outputs and inputs balance. + // This is the balance shown to a user and used to build TXs. final_balance: CiphertextCache, // Determine if there was any output made in this version balance_type: BalanceType, - // Topoheight of the previous versioned balance - // If its none, that means it's the first version available + // Topoheight of the previous versioned balance. + // If its none, that means it's the first version available. previous_topoheight: Option, } @@ -211,11 +211,11 @@ impl Serializer for Balance { #[derive(Debug)] pub struct AccountSummary { - // last output balance stored on chain + // Last output balance stored on chain. // It can be None if the account has no output balance - // or if the output balance is already in stable_version + // or if the output balance is already in stable_version. pub output_version: Option, - // last balance stored on chain below or equal to stable topoheight + // Last balance stored on chain below or equal to stable topoheight. pub stable_version: Balance } diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index e19ca91e..a80c3a82 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -134,16 +134,16 @@ pub struct GetBlockTemplateResult { #[derive(Serialize, Deserialize, PartialEq)] pub struct GetMinerWorkResult { - // algorithm to use + // Algorithm to use pub algorithm: Algorithm, - // template is miner job in hex format + // Template is miner job in hex format pub miner_work: String, - // block height + // Block height pub height: u64, - // difficulty required for valid block POW + // Difficulty required for valid block POW pub difficulty: Difficulty, - // topoheight of the daemon - // this is for visual purposes only + // Topoheight of the daemon + // This is for visual purposes only pub topoheight: u64, } @@ -252,7 +252,7 @@ pub struct GetInfoResult { pub block_reward: u64, pub dev_reward: u64, pub miner_reward: u64, - // count how many transactions are present in mempool + // Count how many transactions are present in mempool pub mempool_size: usize, // software version on which the daemon is running pub version: String, @@ -262,7 +262,7 @@ pub struct GetInfoResult { #[derive(Serialize, Deserialize)] pub struct SubmitTransactionParams { - pub data: String // should be in hex format + pub data: String // Should be in hex format } #[derive(Serialize, Deserialize)] @@ -371,13 +371,13 @@ pub struct GetTransactionsParams { #[derive(Serialize, Deserialize)] pub struct TransactionResponse<'a> { - // in which blocks it was included + // In which blocks it was included pub blocks: Option>, - // in which blocks it was executed + // In which blocks it was executed pub executed_in_block: Option, - // if it is in mempool + // If it is in mempool pub in_mempool: bool, - // if its a mempool tx, we add the timestamp when it was added + // If its a mempool tx, we add the timestamp when it was added #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] pub first_seen: Option, @@ -470,16 +470,16 @@ pub struct IsTxExecutedInBlockParams<'a> { // Struct to define dev fee threshold #[derive(serde::Serialize, serde::Deserialize)] pub struct DevFeeThreshold { - // block height to start dev fee + // Block height to start dev fee pub height: u64, - // percentage of dev fee, example 10 = 10% + // Percentage of dev fee, example 10 = 10% pub fee_percentage: u64 } // Struct to define hard fork #[derive(Debug, serde::Serialize, serde::Deserialize)] pub struct HardFork { - // block height to start hard fork + // Block height to start hard fork pub height: u64, // Block version to use pub version: BlockVersion, @@ -504,11 +504,11 @@ pub struct GetMempoolCacheParams<'a> { #[derive(Serialize, Deserialize)] pub struct GetMempoolCacheResult { - // lowest nonce used + // Lowest nonce used min: u64, - // highest nonce used + // Highest nonce used max: u64, - // all txs ordered by nonce + // All TXs ordered by nonce txs: Vec, // All "final" cached balances used balances: HashMap @@ -559,29 +559,29 @@ pub struct MakeIntegratedAddressParams<'a> { #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub enum NotifyEvent { - // When a new block is accepted by chain - // it contains NewBlockEvent as value + // When a new block is accepted by chain, + // it contains NewBlockEvent as value. NewBlock, - // When a block (already in chain or not) is ordered (new topoheight) - // it contains BlockOrderedEvent as value + // When a block (already in chain or not) is ordered (new topoheight), + // it contains BlockOrderedEvent as value. BlockOrdered, - // When a block that was ordered is not in the new DAG order - // it contains BlockOrphanedEvent that got orphaned + // When a block that was ordered is not in the new DAG order, + // it contains BlockOrphanedEvent that got orphaned. BlockOrphaned, - // When stable height has changed (different than the previous one) - // it contains StableHeightChangedEvent struct as value + // When stable height has changed (different than the previous one), + // it contains StableHeightChangedEvent struct as value. StableHeightChanged, - // When stable topoheight has changed (different than the previous one) - // it contains StableTopoHeightChangedEvent struct as value + // When stable topoheight has changed (different than the previous one), + // it contains StableTopoHeightChangedEvent struct as value. StableTopoHeightChanged, - // When a transaction that was executed in a block is not reintroduced in mempool - // It contains TransactionOrphanedEvent as value + // When a transaction that was executed in a block is not reintroduced in mempool, + // It contains TransactionOrphanedEvent as value. TransactionOrphaned, - // When a new transaction is added in mempool - // it contains TransactionAddedInMempoolEvent struct as value + // When a new transaction is added in mempool, + // it contains TransactionAddedInMempoolEvent struct as value. TransactionAddedInMempool, - // When a transaction has been included in a valid block & executed on chain - // it contains TransactionExecutedEvent struct as value + // When a transaction has been included in a valid block & executed on chain, + // it contains TransactionExecutedEvent struct as value. TransactionExecuted, // When a registered TX SC Call hash has been executed by chain // TODO: Smart Contracts @@ -589,21 +589,21 @@ pub enum NotifyEvent { // When a new asset has been registered // TODO: Smart Contracts NewAsset, - // When a new peer has connected to us - // It contains PeerConnectedEvent struct as value + // When a new peer has connected to us, + // it contains PeerConnectedEvent struct as value. PeerConnected, - // When a peer has disconnected from us - // It contains PeerDisconnectedEvent struct as value + // When a peer has disconnected from us, + // it contains PeerDisconnectedEvent struct as value. PeerDisconnected, - // Peer peerlist updated, its all its connected peers - // It contains PeerPeerListUpdatedEvent as value + // Peer peerlist updated, its all its connected peers, + // it contains PeerPeerListUpdatedEvent as value. PeerPeerListUpdated, - // Peer has been updated through a ping packet - // Contains PeerStateUpdatedEvent as value + // Peer has been updated through a ping packet, + // contains PeerStateUpdatedEvent as value. PeerStateUpdated, // When a peer of a peer has disconnected - // and that he notified us - // It contains PeerPeerDisconnectedEvent as value + // and he notified us, + // it contains PeerPeerDisconnectedEvent as value. PeerPeerDisconnected, } @@ -613,10 +613,10 @@ pub type NewBlockEvent = BlockResponse; // Value of NotifyEvent::BlockOrdered #[derive(Serialize, Deserialize)] pub struct BlockOrderedEvent<'a> { - // block hash in which this event was triggered + // Block hash in which this event was triggered pub block_hash: Cow<'a, Hash>, pub block_type: BlockType, - // the new topoheight of the block + // The new topoheight of the block pub topoheight: u64, } @@ -624,7 +624,7 @@ pub struct BlockOrderedEvent<'a> { #[derive(Serialize, Deserialize)] pub struct BlockOrphanedEvent<'a> { pub block_hash: Cow<'a, Hash>, - // Tpoheight of the block before being orphaned + // Topoheight of the block before being orphaned pub old_topoheight: u64 } diff --git a/xelis_common/src/api/data.rs b/xelis_common/src/api/data.rs index 174aef3d..74435067 100644 --- a/xelis_common/src/api/data.rs +++ b/xelis_common/src/api/data.rs @@ -22,7 +22,7 @@ pub enum DataConversionError { UnexpectedValue(ValueType), } -// All types availables +// All types available #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Hash, Clone, Copy)] pub enum ValueType { Bool, @@ -189,9 +189,9 @@ impl DataElement { } impl Serializer for DataElement { - // Don't do any pre-allocation because of infinite depth + // Don't do any pre-allocation because of infinite depth. // Otherwise an attacker could generate big depth with high size until max limit - // which can create OOM on low devices + // which can create OOM on low devices. fn read(reader: &mut Reader) -> Result { Ok(match reader.read_u8()? { 0 => Self::Value(DataValue::read(reader)?), @@ -225,7 +225,7 @@ impl Serializer for DataElement { } Self::Array(values) => { writer.write_u8(1); - writer.write_u8(values.len() as u8); // we accept up to 255 values + writer.write_u8(values.len() as u8); // We accept up to 255 values for value in values { value.write(writer); } @@ -273,9 +273,9 @@ pub enum DataValue { U64(u64), U128(u128), Hash(Hash), - // This is a specific type for optimized size of binary data - // Because above variants rewrite for each element the byte of the element and of each value - // It supports up to 65535 bytes (u16::MAX) + // This is a specific type for optimized size of binary data. + // Because the above variants rewrite for each element the byte of the element and of each value + // It supports up to 65535 bytes (u16::MAX). Blob(Vec), } diff --git a/xelis_common/src/api/mod.rs b/xelis_common/src/api/mod.rs index 43ae2998..d9103fad 100644 --- a/xelis_common/src/api/mod.rs +++ b/xelis_common/src/api/mod.rs @@ -116,21 +116,21 @@ impl From> for TransactionType { // This is exactly the same as the one in xelis_common/src/transaction/mod.rs // We use this one for serde (de)serialization -// So we have addresses displayed as strings and not Public Key as bytes -// This is much more easier for developers relying on the API +// So we have addresses displayed as strings and not Public Key as bytes. +// This is much more easier for developers relying on the API. #[derive(Serialize, Deserialize, Clone)] pub struct RPCTransaction<'a> { pub hash: Cow<'a, Hash>, /// Version of the transaction pub version: TxVersion, - // Source of the transaction + /// Source of the transaction pub source: Address, /// Type of the transaction pub data: RPCTransactionType<'a>, /// Fees in XELIS pub fee: u64, - /// nonce must be equal to the one on chain account - /// used to prevent replay attacks and have ordered transactions + /// Nonce must be equal to the one on chain account + /// Used to prevent replay attacks and have ordered transactions pub nonce: u64, /// We have one source commitment and equality proof per asset used in the tx. pub source_commitments: Cow<'a, Vec>, @@ -178,12 +178,12 @@ impl<'a> From> for Transaction { } // We create a type above it so for deserialize we can use this type directly -// and not have to specify the lifetime +// and not have to specify the lifetime. pub type TransactionResponse = RPCTransaction<'static>; #[derive(Serialize, Deserialize)] pub struct SplitAddressParams { - // address which must be in integrated form + // Address which must be in integrated form pub address: Address } @@ -203,7 +203,7 @@ fn default_true_value() -> bool { true } -// same here +// Same here fn default_false_value() -> bool { false } \ No newline at end of file diff --git a/xelis_common/src/api/wallet.rs b/xelis_common/src/api/wallet.rs index f42805b6..885c8ef0 100644 --- a/xelis_common/src/api/wallet.rs +++ b/xelis_common/src/api/wallet.rs @@ -94,8 +94,8 @@ pub struct GetAssetPrecisionParams<'a> { #[derive(Serialize, Deserialize)] pub struct GetAddressParams { - // Data to use for creating an integrated address - // Returned address will contains all the data provided here + // Data to use for creating an integrated address. + // Returned address will contain all the data provided here. pub integrated_data: Option } @@ -179,21 +179,21 @@ pub struct QueryDBParams { #[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub enum NotifyEvent { - // When a new topoheight is detected by wallet - // it contains the topoheight (u64) as value - // It may be lower than the previous one, based on how the DAG reacts + // When a new topoheight is detected by the wallet, + // it contains the topoheight (u64) as value. + // It may be lower than the previous one, based on how the DAG reacts. NewTopoHeight, - // When a new asset is added to wallet - // Contains a Hash as value + // When a new asset is added to the wallet, + // Contains a Hash as value. NewAsset, - // When a new transaction is added to wallet - // Contains TransactionEntry struct as value + // When a new transaction is added to the wallet, + // Contains TransactionEntry struct as value. NewTransaction, - // When a balance is changed - // Contains a BalanceChanged as value + // When a balance is changed, + // Contains a BalanceChanged as value. BalanceChanged, - // When a rescan happened on the wallet - // Contains a topoheight as value to indicate until which topoheight transactions got deleted + // When a rescan happens on the wallet, + // Contains a topoheight as value to indicate until which topoheight transactions got deleted. Rescan, // When network state changed Online, @@ -209,7 +209,7 @@ pub struct TransferOut { pub asset: Hash, // Plaintext amount pub amount: u64, - // extra data + // Extra data pub extra_data: Option } @@ -219,7 +219,7 @@ pub struct TransferIn { pub asset: Hash, // Plaintext amount pub amount: u64, - // extra data + // Extra data pub extra_data: Option } @@ -250,7 +250,7 @@ pub enum EntryType { } // This struct is used to represent a transaction entry like in wallet -// But we replace every PublicKey to use Address instead +// but we replace every PublicKey to use Address instead. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TransactionEntry { pub hash: Hash, diff --git a/xelis_common/src/block/header.rs b/xelis_common/src/block/header.rs index c3972d60..c49d67ee 100644 --- a/xelis_common/src/block/header.rs +++ b/xelis_common/src/block/header.rs @@ -150,9 +150,9 @@ impl BlockHeader { self.txs_hashes.len() } - // Build the header work (immutable part in mining process) - // This is the part that will be used to compute the header work hash - // See get_work_hash function and get_serialized_header for final hash computation + // Build the header work (immutable part in mining process). + // This is the part that will be used to compute the header work hash. + // See get_work_hash function and get_serialized_header for final hash computation. pub fn get_work(&self) -> Vec { let mut bytes: Vec = Vec::with_capacity(HEADER_WORK_SIZE); @@ -166,7 +166,7 @@ impl BlockHeader { bytes } - // compute the header work hash (immutable part in mining process) + // Compute the header work hash (immutable part in mining process) pub fn get_work_hash(&self) -> Hash { hash(&self.get_work()) } @@ -185,7 +185,7 @@ impl BlockHeader { bytes } - // compute the block POW hash + // Compute the block POW hash pub fn get_pow_hash(&self, algorithm: Algorithm) -> Result { pow_hash(&self.get_serialized_header(), algorithm) } @@ -261,7 +261,7 @@ impl Serializer for BlockHeader { } fn size(&self) -> usize { - // additional byte for tips count + // Additional byte for tips count let tips_size = 1 + self.tips.len() * HASH_SIZE; // 2 bytes for txs count (u16) let txs_size = 2 + self.txs_hashes.len() * HASH_SIZE; @@ -277,8 +277,8 @@ impl Serializer for BlockHeader { } impl Hashable for BlockHeader { - // this function has the same behavior as the get_pow_hash function - // but we use a fast algorithm here + // This function has the same behavior as the get_pow_hash function + // but we use a fast algorithm here. fn hash(&self) -> Hash { hash(&self.get_serialized_header()) } diff --git a/xelis_common/src/block/miner.rs b/xelis_common/src/block/miner.rs index 6e4e55bd..02cac8d2 100644 --- a/xelis_common/src/block/miner.rs +++ b/xelis_common/src/block/miner.rs @@ -83,18 +83,18 @@ impl fmt::Display for Algorithm { // This structure is used by xelis-miner which allow to compute a valid block POW hash #[derive(Clone, Debug, Serialize, Deserialize)] pub struct MinerWork<'a> { - header_work_hash: Hash, // include merkle tree of tips, txs, and height (immutable) - timestamp: TimestampMillis, // miners can update timestamp to keep it up-to-date + header_work_hash: Hash, // Include merkle tree of tips, txs, and height (immutable) + timestamp: TimestampMillis, // Miners can update timestamp to keep it up-to-date nonce: u64, miner: Option>, - // Extra nonce so miner can write anything - // Can also be used to spread more the work job and increase its work capacity + // Extra nonce so miner can write anything. + // Can also be used to spread more the work job and increase its work capacity. extra_nonce: [u8; EXTRA_NONCE_SIZE] } -// Worker is used to store the current work and its variant -// Based on the variant, the worker can compute the POW hash -// It is used by the miner to efficiently switch context in case of algorithm change +// Worker is used to store the current work and its variant. +// Based on the variant, the worker can compute the POW hash. +// It is used by the miner to efficiently switch context in case of algorithm change. pub struct Worker<'a> { work: Option<(MinerWork<'a>, [u8; BLOCK_WORK_SIZE])>, variant: WorkVariant @@ -198,8 +198,8 @@ impl<'a> Worker<'a> { Ok(hash) } - // Compute the block hash based on the current work - // This is used to get the expected block hash + // Compute the block hash based on the current work. + // This is used to get the expected block hash. pub fn get_block_hash(&self) -> Result { match self.work.as_ref() { Some((_, cache)) => Ok(hash(cache)), @@ -332,8 +332,8 @@ impl<'a> Serializer for MinerWork<'a> { } } -// no need to override hash() as its already serialized in good format -// This is used to get the expected block hash +// No need to override hash() as its already serialized in good format. +// This is used to get the expected block hash. impl Hashable for MinerWork<'_> {} #[cfg(test)] diff --git a/xelis_common/src/block/mod.rs b/xelis_common/src/block/mod.rs index b79cb32e..1c288488 100644 --- a/xelis_common/src/block/mod.rs +++ b/xelis_common/src/block/mod.rs @@ -14,8 +14,8 @@ pub const EXTRA_NONCE_SIZE: usize = 32; pub const HEADER_WORK_SIZE: usize = 73; pub const BLOCK_WORK_SIZE: usize = 112; // 32 + 8 + 8 + 32 + 32 = 112 -// Get combined hash for tips -// This is used to get a hash that is unique for a set of tips +// Get combined hash for tips. +// This is used to get a hash that is unique for a set of tips. pub fn get_combined_hash_for_tips<'a, I: Iterator>(tips: I) -> Hash { let mut bytes = [0u8; HASH_SIZE]; for tip in tips { diff --git a/xelis_common/src/crypto/address.rs b/xelis_common/src/crypto/address.rs index 60dbbddc..05d0e113 100644 --- a/xelis_common/src/crypto/address.rs +++ b/xelis_common/src/crypto/address.rs @@ -21,8 +21,8 @@ use anyhow::Error; #[derive(Clone, Debug, PartialEq, Eq)] pub enum AddressType { Normal, - // Data variant allow to integrate data in address for easier communication / data transfered - // those data are directly integrated in the data part and can be transfered in the transaction directly + // Data variant allows embedding data in an address for easier communication and transfer. + // This data is directly integrated and can be transferred within a transaction. Data(DataElement) } @@ -104,8 +104,8 @@ impl Address { self.mainnet } - // Compress the address to a byte array - // We don't use Serializer trait to avoid storing mainnet bool + // Compress the address to a byte array. + // We don't use Serializer trait to avoid storing mainnet bool. fn compress(&self) -> Vec { let mut writer = Writer::new(); self.key.write(&mut writer); @@ -113,8 +113,8 @@ impl Address { writer.bytes() } - // Read the address from a byte array - // Hrp validity isn't checked here, it should be done before calling this function + // Read the address from a byte array. + // Hrp validity isn't checked here, it should be done before calling this function. fn decompress(bytes: &[u8], hrp: &str) -> Result { let mut reader = Reader::new(bytes); let mainnet = hrp == PREFIX_ADDRESS; @@ -147,7 +147,7 @@ impl Address { // Parse an address from a string (human readable format) pub fn from_string(address: &String) -> Result { let (hrp, decoded) = decode(address)?; - // check that hrp is valid one + // Check that hrp is valid one if hrp != PREFIX_ADDRESS && hrp != TESTNET_PREFIX_ADDRESS { return Err(Bech32Error::InvalidPrefix(hrp, format!("{} or {}", PREFIX_ADDRESS, TESTNET_PREFIX_ADDRESS)).into()) } @@ -155,7 +155,7 @@ impl Address { let bits = convert_bits(&decoded, 5, 8, false)?; let addr = Address::decompress(&bits, hrp.as_str())?; - // now check that the hrp decoded is the one for the network state + // Now check that the hrp decoded is the one for the network state if (addr.is_mainnet() && hrp != PREFIX_ADDRESS) || (!addr.is_mainnet() && hrp != TESTNET_PREFIX_ADDRESS) { let expected = if addr.is_mainnet() { PREFIX_ADDRESS diff --git a/xelis_common/src/crypto/elgamal/ciphertext.rs b/xelis_common/src/crypto/elgamal/ciphertext.rs index c17ba897..c39c0d90 100644 --- a/xelis_common/src/crypto/elgamal/ciphertext.rs +++ b/xelis_common/src/crypto/elgamal/ciphertext.rs @@ -4,9 +4,9 @@ use curve25519_dalek::{traits::Identity, RistrettoPoint, Scalar}; use serde::{Deserialize, Deserializer, Serialize}; use super::{pedersen::{DecryptHandle, PedersenCommitment}, CompressedCiphertext, CompressedCommitment, CompressedHandle}; -// Represents a twisted ElGamal Ciphertext -// One part is a Pedersen commitment to be bulletproofs compatible -// The other part is a handle to be used for decryption +// Represents a twisted ElGamal Ciphertext. +// One part is a Pedersen commitment to be bulletproofs compatible. +// The other part is a handle to be used for decryption. #[derive(Clone, Debug, PartialEq, Eq)] pub struct Ciphertext { commitment: PedersenCommitment, diff --git a/xelis_common/src/crypto/elgamal/key.rs b/xelis_common/src/crypto/elgamal/key.rs index 30e46b07..0bdf809a 100644 --- a/xelis_common/src/crypto/elgamal/key.rs +++ b/xelis_common/src/crypto/elgamal/key.rs @@ -43,9 +43,9 @@ impl PublicKey { Self(p) } - // Create a new public key from a private key - // The public key is H^(-1) * H - // Private key must not be zero + // Create a new public key from a private key. + // The public key is H^(-1) * H. + // Private key must not be zero. pub fn new(secret: &PrivateKey) -> Self { let s = &secret.0; assert!(s != &Scalar::ZERO); @@ -96,8 +96,8 @@ impl PublicKey { } impl PrivateKey { - // Create a new private key from a scalar - // The scalar must not be zero + // Create a new private key from a scalar. + // The scalar must not be zero. pub fn from_scalar(scalar: Scalar) -> Self { assert!(scalar != Scalar::ZERO); diff --git a/xelis_common/src/crypto/elgamal/mod.rs b/xelis_common/src/crypto/elgamal/mod.rs index 82584a08..0acbd9b3 100644 --- a/xelis_common/src/crypto/elgamal/mod.rs +++ b/xelis_common/src/crypto/elgamal/mod.rs @@ -20,7 +20,7 @@ pub use signature::*; pub use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT as G; lazy_static! { - // base point for encoding the commitments opening + // Base point for encoding the commitments opening pub static ref H: RistrettoPoint = { let mut hasher = sha3::Sha3_512::default(); hasher.update(RISTRETTO_BASEPOINT_COMPRESSED.as_bytes()); diff --git a/xelis_common/src/crypto/proofs.rs b/xelis_common/src/crypto/proofs.rs index af7d4242..af9ae7cf 100644 --- a/xelis_common/src/crypto/proofs.rs +++ b/xelis_common/src/crypto/proofs.rs @@ -33,8 +33,8 @@ use zeroize::Zeroize; pub const BULLET_PROOF_SIZE: usize = 64; lazy_static! { - // Bulletproof generators: party size is max transfers * 2 + 1 - // * 2 in case each transfer use a unique asset + 1 for xelis asset as fee and + 1 to be a power of 2 + // Bulletproof generators: party size is max transfers * 2 + 1. + // * 2 in case each transfer use a unique asset + 1 for xelis asset as fee and + 1 to be a power of 2. pub static ref BP_GENS: BulletproofGens = BulletproofGens::new(BULLET_PROOF_SIZE, MAX_TRANSFER_COUNT * 2 + 2); pub static ref PC_GENS: PedersenGens = PedersenGens::default(); } @@ -117,7 +117,7 @@ impl BatchCollector { #[allow(non_snake_case)] impl CommitmentEqProof { - // warning: caller must make sure not to forget to hash the public key, ciphertext, commitment in the transcript as it is not done here + // Warning: Caller must make sure not to forget to hash the public key, ciphertext, commitment in the transcript as it is not done here pub fn new( source_keypair: &KeyPair, source_ciphertext: &Ciphertext, @@ -127,7 +127,7 @@ impl CommitmentEqProof { ) -> Self { transcript.equality_proof_domain_separator(); - // extract the relevant scalar and Ristretto points from the inputs + // Extract the relevant scalar and Ristretto points from the inputs let P_source = source_keypair.get_public_key().as_point(); let D_source = source_ciphertext.handle().as_point(); @@ -135,7 +135,7 @@ impl CommitmentEqProof { let x = Scalar::from(amount); let r = opening.as_scalar(); - // generate random masking factors that also serves as nonces + // Generate random masking factors that also serves as nonces let mut y_s = Scalar::random(&mut OsRng); let mut y_x = Scalar::random(&mut OsRng); let mut y_r = Scalar::random(&mut OsRng); @@ -145,7 +145,7 @@ impl CommitmentEqProof { RistrettoPoint::multiscalar_mul(vec![&y_x, &y_s], vec![&(G), D_source]).compress(); let Y_2 = RistrettoPoint::multiscalar_mul(vec![&y_x, &y_r], vec![&(G), &(*H)]).compress(); - // record masking factors in the transcript + // Record masking factors in the transcript transcript.append_point(b"Y_0", &Y_0); transcript.append_point(b"Y_1", &Y_1); transcript.append_point(b"Y_2", &Y_2); @@ -153,12 +153,12 @@ impl CommitmentEqProof { let c = transcript.challenge_scalar(b"c"); transcript.challenge_scalar(b"w"); - // compute the masked values + // Compute the masked values let z_s = &(&c * s) + &y_s; let z_x = &(&c * &x) + &y_x; let z_r = &(&c * r) + &y_r; - // zeroize random scalars + // Zeroize random scalars y_s.zeroize(); y_x.zeroize(); y_r.zeroize(); @@ -183,13 +183,13 @@ impl CommitmentEqProof { ) -> Result<(), ProofVerificationError> { transcript.equality_proof_domain_separator(); - // extract the relevant scalar and Ristretto points from the inputs + // Extract the relevant scalar and Ristretto points from the inputs let P_source = source_pubkey.as_point(); let C_source = source_ciphertext.commitment().as_point(); let D_source = source_ciphertext.handle().as_point(); let C_destination = destination_commitment.as_point(); - // include Y_0, Y_1, Y_2 to transcript and extract challenges + // Include Y_0, Y_1, Y_2 to transcript and extract challenges transcript.validate_and_append_point(b"Y_0", &self.Y_0)?; transcript.validate_and_append_point(b"Y_1", &self.Y_1)?; transcript.validate_and_append_point(b"Y_2", &self.Y_2)?; @@ -201,7 +201,7 @@ impl CommitmentEqProof { let w_negated = -&w; let ww_negated = -&ww; - // check that the required algebraic condition holds + // Check that the required algebraic condition holds let Y_0 = self .Y_0 .decompress() @@ -284,7 +284,7 @@ impl CiphertextValidityProof { let c = transcript.challenge_scalar(b"c"); transcript.challenge_scalar(b"w"); - // masked message and opening + // Masked message and opening let z_r = &(&c * r) + &y_r; let z_x = &(&c * &x) + &y_x; diff --git a/xelis_common/src/crypto/transcript.rs b/xelis_common/src/crypto/transcript.rs index fedd2978..477e93da 100644 --- a/xelis_common/src/crypto/transcript.rs +++ b/xelis_common/src/crypto/transcript.rs @@ -74,7 +74,7 @@ impl ProtocolTranscript for Transcript { } } - // domain separators + // Domain separators fn new_commitment_eq_proof_domain_separator(&mut self) { self.append_message(b"dom-sep", b"new-commitment-proof"); diff --git a/xelis_common/src/difficulty.rs b/xelis_common/src/difficulty.rs index d7e3d2d6..296c0063 100644 --- a/xelis_common/src/difficulty.rs +++ b/xelis_common/src/difficulty.rs @@ -3,11 +3,11 @@ use primitive_types::U256; use thiserror::Error; // This type is used to easily switch between u64 and u128 as example -// And its easier to see where we use the block difficulty -// Difficulty is a value that represents the amount of work required to mine a block -// On XELIS, each difficulty point is a hash per second +// and its easier to see where we use the block difficulty. +// Difficulty is a value that represents the amount of work required to mine a block. +// On XELIS, each difficulty point is a hash per second. pub type Difficulty = VarUint; -// Cumulative difficulty is the sum of all difficulties of all blocks in the chain +// Cumulative difficulty is the sum of all difficulties of all blocks in the chain. // It is used to determine which branch is the main chain in BlockDAG merging. pub type CumulativeDifficulty = VarUint; @@ -19,15 +19,15 @@ pub enum DifficultyError { ErrorOnConversionBigUint } -// Verify the validity of a block difficulty against the current network difficulty -// All operations are done on U256 to avoid overflow +// Verify the validity of a block difficulty against the current network difficulty. +// All operations are done on U256 to avoid overflow. pub fn check_difficulty(hash: &Hash, difficulty: &Difficulty) -> Result { let target = compute_difficulty_target(difficulty)?; Ok(check_difficulty_against_target(hash, &target)) } -// Compute the difficulty target from the difficulty value -// This can be used to keep the target in cache instead of recomputing it each time +// Compute the difficulty target from the difficulty value. +// This can be used to keep the target in cache instead of recomputing it each time. pub fn compute_difficulty_target(difficulty: &Difficulty) -> Result { let diff = difficulty.as_ref(); if diff.is_zero() { @@ -43,8 +43,8 @@ pub fn check_difficulty_against_target(hash: &Hash, target: &U256) -> bool { hash_work <= *target } -// Convert a hash to a difficulty value -// This is only used by miner +// Convert a hash to a difficulty value. +// This is only used by miner. #[inline(always)] pub fn difficulty_from_hash(hash: &Hash) -> Difficulty { (U256::max_value() / U256::from_big_endian(hash.as_bytes())).into() diff --git a/xelis_common/src/json_rpc/websocket.rs b/xelis_common/src/json_rpc/websocket.rs index c88527b8..f72ad6bb 100644 --- a/xelis_common/src/json_rpc/websocket.rs +++ b/xelis_common/src/json_rpc/websocket.rs @@ -50,10 +50,10 @@ impl EventReceiver { } } - // Get the next event - // if we lagged behind, we will catch up + // Get the next event. + // If we lagged behind, we will catch up. // If you don't want to miss any event, you should create a queue to store them - // or an unbounded channel + // or an unbounded channel. pub async fn next(&mut self) -> Result { let mut res = self.inner.recv().await; // If we lagged behind, we need to catch up @@ -72,8 +72,8 @@ impl EventReceiver { } } -// It is around a Arc to be shareable easily -// it has a tokio task running in background to handle all incoming messages +// It is around an Arc to be shareable easily. +// It has a tokio task running in background to handle all incoming messages. pub type WebSocketJsonRPCClient = Arc>; enum InternalMessage { @@ -81,8 +81,8 @@ enum InternalMessage { Close, } -// A JSON-RPC Client over WebSocket protocol to support events -// It can be used in multi-thread safely because each request/response are linked using the id attribute. +// A JSON-RPC Client over WebSocket protocol to support events. +// It can be used in multi-thread safely because each request/response are linked using the id attribute.. pub struct WebSocketJsonRPCClientImpl { sender: Mutex>, // This is the ID for the next request @@ -97,9 +97,9 @@ pub struct WebSocketJsonRPCClientImpl>, // websocket server address target: String, - // delay auto reconnect duration + // Delay auto reconnect duration delay_auto_reconnect: Mutex>, - // is the client online + // Is the client online online: AtomicBool, // This channel is called when the connection is lost offline_channel: Mutex>>, @@ -209,7 +209,7 @@ impl self.online.load(Ordering::SeqCst) } - // resubscribe to all events because of a reconnection + // Resubscribe to all events because of a reconnection async fn resubscribe_events(self: Arc) -> Result<(), JsonRPCError> { let events = { let events = self.events_to_id.lock().await; @@ -354,7 +354,7 @@ impl zelf.set_online(false).await; - // retry to connect until we are online or that it got disabled + // Retry to connect until we are online or that it got disabled while let Some(auto_reconnect) = { zelf.delay_auto_reconnect.lock().await.as_ref().cloned() } { debug!("Reconnecting to the server in {} seconds...", auto_reconnect.as_secs()); sleep(auto_reconnect).await; @@ -415,7 +415,7 @@ impl Message::Text(text) => { let response: JsonRPCResponse = serde_json::from_str(&text)?; if let Some(id) = response.id { - // send the response to the requester if it matches the ID + // Send the response to the requester if it matches the ID { let mut requests = self.requests.lock().await; if let Some(sender) = requests.remove(&id) { @@ -466,8 +466,8 @@ impl events.contains_key(&event) } - // Subscribe to an event - // Capacity represents the number of events that can be stored in the channel + // Subscribe to an event. + // Capacity represents the number of events that can be stored in the channel. pub async fn subscribe_event(&self, event: E, capacity: usize) -> JsonRPCResult> { // Returns a Receiver for this event if already registered { @@ -515,7 +515,7 @@ impl // Send the unsubscribe rpc method self.send::("unsubscribe", None, event).await?; - // delete it from events list + // Delete it from events list { let mut handlers = self.handler_by_id.lock().await; handlers.remove(&id); diff --git a/xelis_common/src/prompt/command.rs b/xelis_common/src/prompt/command.rs index ca148252..eee1c2e4 100644 --- a/xelis_common/src/prompt/command.rs +++ b/xelis_common/src/prompt/command.rs @@ -14,7 +14,7 @@ pub enum CommandError { #[error("Command was not found")] CommandNotFound, #[error("Expected required argument {}", _0)] - ExpectedRequiredArg(String), // arg name + ExpectedRequiredArg(String), // Arg name #[error("Too many arguments")] TooManyArguments, #[error(transparent)] @@ -232,7 +232,7 @@ impl CommandManager { arguments.insert(arg.get_name().clone(), arg.get_type().to_value(arg_value)?); } - // include all options args available + // Include all options args available for optional_arg in command.get_optional_args() { if let Some(arg_value) = command_split.next() { arguments.insert(optional_arg.get_name().clone(), optional_arg.get_type().to_value(arg_value)?); diff --git a/xelis_common/src/prompt/mod.rs b/xelis_common/src/prompt/mod.rs index 9cf85967..f31a3536 100644 --- a/xelis_common/src/prompt/mod.rs +++ b/xelis_common/src/prompt/mod.rs @@ -48,7 +48,7 @@ use regex::Regex; use log::{info, error, Level, debug, LevelFilter, warn}; use thiserror::Error; -// used for launch param +// Used for launch param #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[cfg_attr(feature = "clap", derive(clap::ValueEnum))] pub enum LogLevel { @@ -178,8 +178,8 @@ struct State { impl State { fn new(allow_interactive: bool) -> Self { - // enable the raw mode for terminal - // so we can read each event/action + // Enable the raw mode for terminal + // so we can read each event/action. let interactive = if allow_interactive { !crossterminal::enable_raw_mode().is_err() } else { false }; Self { @@ -223,9 +223,9 @@ impl State { fn ioloop(self: &Arc, sender: UnboundedSender) -> Result<(), PromptError> { debug!("ioloop started"); - // all the history of commands + // All the history of commands let mut history: VecDeque = VecDeque::new(); - // current index in history in case we use arrows to move in history + // Current index in history in case we use arrows to move in history let mut history_index = 0; let mut is_in_history = false; loop { @@ -286,7 +286,7 @@ impl State { }, KeyCode::Char(c) => { is_in_history = false; - // handle CTRL+C + // Handle CTRL+C if key.modifiers == KeyModifiers::CONTROL && c == 'c' { break; } @@ -306,7 +306,7 @@ impl State { is_in_history = false; let mut buffer = self.user_input.lock()?; - // clone the buffer to send it to the command handler + // Clone the buffer to send it to the command handler let cloned_buffer = buffer.clone(); buffer.clear(); self.show_input(&buffer)?; @@ -390,7 +390,7 @@ impl State { } fn show_with_prompt_and_input(&self, prompt: &String, input: &String) -> Result<(), PromptError> { - // if not interactive, we don't need to show anything + // If not interactive, we don't need to show anything if !self.is_interactive() { return Ok(()) } @@ -508,7 +508,7 @@ impl Prompt { if prompt.state.is_interactive() { let (input_sender, input_receiver) = mpsc::unbounded_channel::(); let state = Arc::clone(&prompt.state); - // spawn a thread to prevent IO blocking - https://github.com/tokio-rs/tokio/issues/2466 + // Spawn a thread to prevent IO blocking - https://github.com/tokio-rs/tokio/issues/2466 std::thread::spawn(move || { if let Err(e) = state.ioloop(input_sender) { error!("Error in ioloop: {}", e); @@ -533,10 +533,10 @@ impl Prompt { } // Start the thread to read stdin and handle events - // Execute commands if a commande manager is present + // Execute commands if a command manager is present pub async fn start<'a>(&'a self, update_every: Duration, fn_message: AsyncF<'a, Self, Option<&'a CommandManager>, Result>, command_manager: Option<&'a CommandManager>) -> Result<(), PromptError> { - // setup the exit channel + // Setup the exit channel let mut exit_receiver = { let (sender, receiver) = oneshot::channel(); self.state.set_exit_channel(sender)?; @@ -578,8 +578,8 @@ impl Prompt { } _ = interval.tick() => { { - // verify that we don't have any reader - // as they may have changed the prompt + // Verify that we don't have any reader + // as they may have changed the prompt. if self.state.prompt_sender.lock()?.is_some() { continue; } @@ -608,8 +608,8 @@ impl Prompt { Ok(()) } - // Stop the prompt running - // can only be called when it was already started + // Stop the prompt running. + // Can only be called when it was already started. pub fn stop(&self) -> Result<(), PromptError> { self.state.stop() } @@ -634,7 +634,7 @@ impl Prompt { Ok(()) } - // get the current prompt displayed + // Get the current prompt displayed pub fn get_prompt(&self) -> Result, PromptError> { let prompt = self.state.prompt.lock()?; Ok(prompt.clone()) @@ -684,9 +684,9 @@ impl Prompt { Ok(()) } - // read a message from the user and apply the input mask if necessary + // Read a message from the user and apply the input mask if necessary pub async fn read_input(&self, prompt: S, apply_mask: bool) -> Result { - // This is also used as a sempahore to have only one call at a time + // This is also used as a semaphore to have only one call at a time let mut canceler = self.read_input_receiver.lock().await; // Verify that during the time it hasn't exited @@ -694,7 +694,7 @@ impl Prompt { return Err(PromptError::NotRunning) } - // register our reader + // Register our reader let receiver = { let mut prompt_sender = self.state.prompt_sender.lock()?; let (sender, receiver) = oneshot::channel(); @@ -702,7 +702,7 @@ impl Prompt { receiver }; - // keep in memory the previous prompt + // Keep in memory the previous prompt let old_prompt = self.get_prompt()?; let old_user_input = { let mut user_input = self.state.user_input.lock()?; @@ -715,7 +715,7 @@ impl Prompt { self.set_mask_input(true); } - // update the prompt to the requested one and keep blocking on the receiver + // Update the prompt to the requested one and keep blocking on the receiver self.update_prompt(prompt.to_string())?; let input = { let input = tokio::select! { @@ -732,7 +732,7 @@ impl Prompt { self.set_mask_input(false); } - // set the old user input + // Set the old user input { let mut user_input = self.state.user_input.lock()?; *user_input = old_user_input; @@ -743,17 +743,17 @@ impl Prompt { input } - // should we replace user input by * ? + // Should we replace user input by * ? pub fn should_mask_input(&self) -> bool { self.state.should_mask_input() } - // set the value to replace user input by * chars or not + // Set the value to replace user input by * chars or not pub fn set_mask_input(&self, value: bool) { self.state.mask_input.store(value, Ordering::SeqCst); } - // configure fern and print prompt message after each new output + // Configure fern and print prompt message after each new output fn setup_logger( &self, level: LogLevel, @@ -848,7 +848,7 @@ impl Prompt { } // Default log level modules - // It can be overriden by the user below + // It can be overridden by the user below base = base.level_for("sled", log::LevelFilter::Warn) .level_for("actix_server", log::LevelFilter::Warn) .level_for("actix_web", log::LevelFilter::Off) @@ -869,8 +869,8 @@ impl Prompt { Ok(()) } - // colorize a string with a specific color - // if colors are disabled, the message is returned as is + // Colorize a string with a specific color. + // If colors are disabled, the message is returned as is. pub fn colorize_string(&self, color: Color, message: &String) -> String { if self.disable_colors { return message.to_string(); @@ -879,8 +879,8 @@ impl Prompt { format!("\x1B[{}m{}\x1B[0m", color.to_fg_str(), message) } - // colorize a string with a specific color - // No color is set if colors are disabled + // Colorize a string with a specific color. + // No color is set if colors are disabled. pub fn colorize_str(&self, color: Color, message: &str) -> String { if self.disable_colors { return message.to_string(); diff --git a/xelis_common/src/prompt/terminal.rs b/xelis_common/src/prompt/terminal.rs index db71b7ff..92553e14 100644 --- a/xelis_common/src/prompt/terminal.rs +++ b/xelis_common/src/prompt/terminal.rs @@ -3,7 +3,7 @@ use std::mem; use indexmap::IndexSet; pub struct Terminal { - // all commands used during its running time + // All commands used during its running time history: IndexSet, // Index when navigating in the history history_index: Option, @@ -61,7 +61,7 @@ impl Terminal { } } - // advance by one if possible the cursor + // Advance by one if possible the cursor pub fn next_cursor(&mut self) { let next = self.buffer.len() > self.cursor_index; if next { diff --git a/xelis_common/src/queue.rs b/xelis_common/src/queue.rs index fd2dffc4..d192ff06 100644 --- a/xelis_common/src/queue.rs +++ b/xelis_common/src/queue.rs @@ -1,10 +1,10 @@ use std::{hash::Hash, collections::{VecDeque, HashSet}, fmt::Debug, sync::Arc}; -// A queue that allows for O(1) lookup of elements -// The queue is backed by a VecDeque and a HashSet -// The HashSet is used to check if an element is already in the queue -// The VecDeque is used to keep track of the order of the elements -// This can be shared between threads +// A queue that allows for O(1) lookup of elements. +// The queue is backed by a VecDeque and a HashSet. +// The HashSet is used to check if an element is already in the queue. +// The VecDeque is used to keep track of the order of the elements. +// This can be shared between threads. pub struct Queue { keys: HashSet>, order: VecDeque<(Arc, V)> @@ -18,8 +18,8 @@ impl Queue { } } - // Pushes a new element to the back of the queue - // Returns true if the element was added, false if it already exists + // Pushes a new element to the back of the queue. + // Returns true if the element was added, false if it already exists. pub fn push(&mut self, key: K, value: V) -> bool { if self.keys.contains(&key) { return false; diff --git a/xelis_common/src/rpc_server/mod.rs b/xelis_common/src/rpc_server/mod.rs index e17f1418..7ee97ab3 100644 --- a/xelis_common/src/rpc_server/mod.rs +++ b/xelis_common/src/rpc_server/mod.rs @@ -48,7 +48,7 @@ impl<'a> RpcResponse<'a> { } } -// trait to retrieve easily a JSON RPC handler for registered route +// Trait to retrieve easily a JSON RPC handler for registered route pub trait RPCServerHandler { fn get_rpc_handler(&self) -> &RPCHandler; } @@ -63,7 +63,7 @@ where Ok(HttpResponse::Ok().json(result)) } -// trait to retrieve easily a websocket handler for registered route +// Trait to retrieve easily a websocket handler for registered route pub trait WebSocketServerHandler { fn get_websocket(&self) -> &WebSocketServerShared; } diff --git a/xelis_common/src/rpc_server/rpc_handler.rs b/xelis_common/src/rpc_server/rpc_handler.rs index 0105120f..009f32f8 100644 --- a/xelis_common/src/rpc_server/rpc_handler.rs +++ b/xelis_common/src/rpc_server/rpc_handler.rs @@ -9,7 +9,7 @@ use log::{error, trace}; pub type Handler = fn(&'_ Context, Value) -> Pin> + Send + '_>>; pub struct RPCHandler { - methods: HashMap, // all RPC methods registered + methods: HashMap, // All RPC methods registered data: T } @@ -96,7 +96,7 @@ where }) } - // register a new RPC method handler + // Register a new RPC method handler pub fn register_method(&mut self, name: &str, handler: Handler) { if self.methods.insert(name.into(), handler).is_some() { error!("The method '{}' was already registered !", name); diff --git a/xelis_common/src/rpc_server/websocket/handler.rs b/xelis_common/src/rpc_server/websocket/handler.rs index 30d0439c..10a9e6ad 100644 --- a/xelis_common/src/rpc_server/websocket/handler.rs +++ b/xelis_common/src/rpc_server/websocket/handler.rs @@ -19,7 +19,7 @@ use crate::{ }; use super::{WebSocketSessionShared, WebSocketHandler}; -// generic websocket handler supporting event subscriptions +// Generic websocket handler supporting event subscriptions pub struct EventWebSocketHandler { events: RwLock, HashMap>>>, handler: RPCHandler diff --git a/xelis_common/src/rpc_server/websocket/mod.rs b/xelis_common/src/rpc_server/websocket/mod.rs index 5540cfb1..fa6a5deb 100644 --- a/xelis_common/src/rpc_server/websocket/mod.rs +++ b/xelis_common/src/rpc_server/websocket/mod.rs @@ -53,11 +53,11 @@ pub type WebSocketServerShared = Arc>; pub type WebSocketSessionShared = Arc>; // Constants -// timeout in seconds for sending a message +// Timeout in seconds for sending a message const MESSAGE_TIME_OUT: Duration = Duration::from_secs(1); -// interval in seconds to send a ping message +// Interval in seconds to send a ping message const KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); -// timeout in seconds to receive a pong message +// Timeout in seconds to receive a pong message const KEEP_ALIVE_TIME_OUT: Duration = Duration::from_secs(30); #[derive(Debug, thiserror::Error)] @@ -98,8 +98,8 @@ where Ok(()) } - // Send a ping message to the session - // this must be called from the task handling the session only + // Send a ping message to the session. + // This must be called from the task handling the session only. async fn ping(&self) -> Result<(), WebSocketError> { let mut inner = self.inner.lock().await; let session = inner.as_mut().ok_or(WebSocketError::SessionAlreadyClosed)?; @@ -107,8 +107,8 @@ where Ok(()) } - // Send a pong message to the session - // this must be called from the task handling the session only + // Send a pong message to the session. + // This must be called from the task handling the session only. async fn pong(&self) -> Result<(), WebSocketError> { let mut inner = self.inner.lock().await; let session = inner.as_mut().ok_or(WebSocketError::SessionAlreadyClosed)?; @@ -116,7 +116,7 @@ where Ok(()) } - // this must be called from the task handling the session only + // This must be called from the task handling the session only async fn send_text_internal>(&self, value: S) -> Result<(), WebSocketError> { let mut inner = self.inner.lock().await; let session = inner.as_mut().ok_or(WebSocketError::SessionAlreadyClosed)?; @@ -132,7 +132,7 @@ where Ok(()) } - // this must be called from the task handling the session only + // This must be called from the task handling the session only async fn close_internal(&self, reason: Option) -> Result<(), WebSocketError> { let mut inner = self.inner.lock().await; let session = inner.take().ok_or(WebSocketError::SessionAlreadyClosed)?; @@ -178,18 +178,18 @@ where #[async_trait] pub trait WebSocketHandler: Sized + Sync + Send { - // called when a new Session is added in websocket server - // if an error is returned, maintaining the session is aborted + // Called when a new Session is added in websocket server. + // If an error is returned, maintaining the session is aborted. async fn on_connection(&self, _: &WebSocketSessionShared) -> Result<(), anyhow::Error> { Ok(()) } - // called when a new message is received + // Called when a new message is received async fn on_message(&self, _: &WebSocketSessionShared, _: Bytes) -> Result<(), anyhow::Error> { Ok(()) } - // called when a Session is closed + // Called when a Session is closed async fn on_close(&self, _: &WebSocketSessionShared) -> Result<(), anyhow::Error> { Ok(()) } @@ -281,7 +281,7 @@ impl WebSocketServer where H: WebSocketHandler + 'static { // Delete a session from the server pub async fn delete_session(self: &Arc, session: &WebSocketSessionShared, reason: Option) { trace!("deleting session #{}", session.id); - // close session + // Close session if let Err(e) = session.close_internal(reason).await { debug!("Error while closing session: {}", e); } @@ -295,7 +295,7 @@ impl WebSocketServer where H: WebSocketHandler + 'static { if deleted { debug!("deleted session #{}", session.id); - // call on_close + // Call on_close if let Err(e) = self.handler.on_close(&session).await { debug!("Error while calling on_close: {}", e); } @@ -303,11 +303,11 @@ impl WebSocketServer where H: WebSocketHandler + 'static { trace!("sessions unlocked"); } - // Internal function to handle a WebSocket connection - // This will send a ping every 5 seconds and close the connection if no pong is received within 30 seconds - // It will also translate all messages to the handler + // Internal function to handle a WebSocket connection. + // This will send a ping every 5 seconds and close the connection if no pong is received within 30 seconds. + // It will also translate all messages to the handler. async fn handle_ws_internal(self: Arc, session: WebSocketSessionShared, mut stream: AggregatedMessageStream, mut rx: UnboundedReceiver) { - // call on_connection + // Call on_connection if let Err(e) = self.handler.on_connection(&session).await { debug!("Error while calling on_connection: {}", e); self.delete_session(&session, Some(CloseReason::from(CloseCode::Error))).await; @@ -318,7 +318,7 @@ impl WebSocketServer where H: WebSocketHandler + 'static { let mut last_pong_received = Instant::now(); let reason = loop { select! { - // heartbeat + // Heartbeat _ = interval.tick() => { trace!("Sending ping to session #{}", session.id); if last_pong_received.elapsed() > KEEP_ALIVE_TIME_OUT { @@ -351,7 +351,7 @@ impl WebSocketServer where H: WebSocketHandler + 'static { } } }, - // wait for next message + // Wait for next message res = stream.next() => { trace!("Received stream message for session #{}", session.id); let msg = match res { @@ -368,7 +368,7 @@ impl WebSocketServer where H: WebSocketHandler + 'static { }, }; - // handle message + // Handle message match msg { AggregatedMessage::Text(text) => { trace!("Received text message for session #{}: {}", session.id, text); @@ -404,7 +404,7 @@ impl WebSocketServer where H: WebSocketHandler + 'static { }; debug!("Session #{} is closing", session.id); - // attempt to close connection gracefully + // Attempt to close connection gracefully self.delete_session(&session, reason).await; debug!("Session #{} has been closed", session.id); } diff --git a/xelis_common/src/serializer/reader.rs b/xelis_common/src/serializer/reader.rs index 2a191c21..8131a984 100644 --- a/xelis_common/src/serializer/reader.rs +++ b/xelis_common/src/serializer/reader.rs @@ -24,8 +24,8 @@ pub enum ReaderError { // Reader help us to read safely from bytes // Mostly used when de-serializing an object from Serializer trait pub struct Reader<'a> { - bytes: &'a[u8], // bytes to read - total: usize // total read bytes + bytes: &'a[u8], // Bytes to read + total: usize // Total read bytes } impl<'a> Reader<'a> { diff --git a/xelis_common/src/time.rs b/xelis_common/src/time.rs index 16ed2a02..65afc22a 100644 --- a/xelis_common/src/time.rs +++ b/xelis_common/src/time.rs @@ -15,7 +15,7 @@ pub fn get_current_time() -> Duration { time } -// return timestamp in seconds +// Return timestamp in seconds pub fn get_current_time_in_seconds() -> TimestampSeconds { get_current_time().as_secs() } diff --git a/xelis_common/src/tokio/mod.rs b/xelis_common/src/tokio/mod.rs index 595e43c5..6f5e18c0 100644 --- a/xelis_common/src/tokio/mod.rs +++ b/xelis_common/src/tokio/mod.rs @@ -17,8 +17,8 @@ pub use tokio_with_wasm::*; pub use tokio::*; -// Spawn a new task with a name -// If the tokio_unstable feature is enabled, the task will be named +// Spawn a new task with a name. +// If the tokio_unstable feature is enabled, the task will be named. #[inline(always)] #[cfg(not(all( target_arch = "wasm32", @@ -43,8 +43,8 @@ where } } -// Spawn a new task with a name -// Send trait is not required for wasm32 +// Spawn a new task with a name. +// Send trait is not required for wasm32. #[cfg(all( target_arch = "wasm32", target_vendor = "unknown", diff --git a/xelis_common/src/transaction/builder.rs b/xelis_common/src/transaction/builder.rs index 018e9ae4..f0b6ac8d 100644 --- a/xelis_common/src/transaction/builder.rs +++ b/xelis_common/src/transaction/builder.rs @@ -74,7 +74,7 @@ pub enum GenerationError { EncryptedExtraDataTooLarge(usize, usize), #[error("Address is not on the same network as us")] InvalidNetwork, - #[error("Extra data was provied with an integrated address")] + #[error("Extra data was provided with an integrated address")] ExtraDataAndIntegratedAddress, #[error("Proof generation error: {0}")] Proof(#[from] ProofGenerationError), @@ -83,9 +83,9 @@ pub enum GenerationError { #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "snake_case")] pub enum FeeBuilder { - // calculate tx fees based on its size and multiply by this value + // Calculate tx fees based on its size and multiply by this value Multiplier(f64), - Value(u64) // set a direct value of how much fees you want to pay + Value(u64) // Set a direct value of how much fees you want to pay } impl Default for FeeBuilder { diff --git a/xelis_common/src/transaction/extra_data.rs b/xelis_common/src/transaction/extra_data.rs index a2e8e9e0..cdb53753 100644 --- a/xelis_common/src/transaction/extra_data.rs +++ b/xelis_common/src/transaction/extra_data.rs @@ -43,11 +43,11 @@ pub const TAG_SIZE: usize = 16; // This error is thrown when the ciphertext is not in the expected format. #[derive(Error, Clone, Debug, Eq, PartialEq)] -#[error("malformated ciphertext")] +#[error("malformatted ciphertext")] pub struct CipherFormatError; /// Every transfer has its associated secret key, derived from the shared secret. -/// We never use a key twice, then. We can reuse the same nonce everytime. +/// We never use a key twice, then. We can reuse the same nonce every time. const NONCE: &[u8; 12] = b"xelis-crypto"; /// This is the encrypted data, which is the result of the encryption process. @@ -77,7 +77,7 @@ pub struct UnknownExtraDataFormat(pub Vec); // New version of Extra Data due to the issue of commitment randomness reuse // https://gist.github.com/kayabaNerve/b754e9ed9fa4cc2c607f38a83aa3df2a -// We create a new opening to be independant of the amount opening. +// We create a new opening to be independent of the amount opening. // This is more secure and prevent bruteforce attack from the above link. // We need to store 64 bytes more than previous version due to the exclusive handles created. pub struct ExtraData { @@ -280,7 +280,7 @@ impl PlaintextData { pub fn encrypt_in_place_with_aead(mut self, key: &SharedKey) -> AEADCipher { let c = ChaCha20Poly1305::new(&key); c.encrypt_in_place(NONCE.into(), &[], &mut self.0) - .expect("unreachable (unsufficient capacity on a vec)"); + .expect("unreachable (insufficient capacity on a vec)"); AEADCipher(self.0) } diff --git a/xelis_common/src/transaction/mod.rs b/xelis_common/src/transaction/mod.rs index 0b5bcd25..d38c9589 100644 --- a/xelis_common/src/transaction/mod.rs +++ b/xelis_common/src/transaction/mod.rs @@ -64,7 +64,7 @@ pub struct SourceCommitment { pub struct TransferPayload { asset: Hash, destination: CompressedPublicKey, - // we can put whatever we want up to EXTRA_DATA_LIMIT_SIZE bytes + // We can put whatever we want up to EXTRA_DATA_LIMIT_SIZE bytes extra_data: Option, /// Represents the ciphertext along with `sender_handle` and `receiver_handle`. /// The opening is reused for both of the sender and receiver commitments. @@ -81,7 +81,7 @@ pub struct BurnPayload { pub amount: u64 } -// this enum represent all types of transaction available on XELIS Network +// This enum represent all types of transaction available on XELIS Network #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "snake_case")] pub enum TransactionType { @@ -100,8 +100,8 @@ pub struct Transaction { data: TransactionType, /// Fees in XELIS fee: u64, - /// nonce must be equal to the one on chain account - /// used to prevent replay attacks and have ordered transactions + /// Nonce must be equal to the one on chain account + /// Used to prevent replay attacks and have ordered transactions nonce: u64, /// We have one source commitment and equality proof per asset used in the tx. source_commitments: Vec, diff --git a/xelis_common/src/transaction/verify.rs b/xelis_common/src/transaction/verify.rs index fd16a347..9405bd36 100644 --- a/xelis_common/src/transaction/verify.rs +++ b/xelis_common/src/transaction/verify.rs @@ -147,7 +147,7 @@ impl DecompressedTransferCt { impl Transaction { /// Get the new output ciphertext - // This is used to substract the amount from the sender's balance + // This is used to subtract the amount from the sender's balance fn get_sender_output_ct( &self, asset: &Hash, @@ -229,8 +229,8 @@ impl Transaction { } } - // internal, does not verify the range proof - // returns (transcript, commitments for range proof) + // Internal, does not verify the range proof + // returns (transcript, commitments for range proof). async fn pre_verify<'a, E, B: BlockchainVerificationState<'a, E>>( &'a self, state: &mut B, @@ -576,9 +576,9 @@ impl Transaction { Ok(()) } - /// Verify only that the final sender balance is the expected one for each commitment - /// Then apply ciphertexts to the state - /// Checks done are: commitment eq proofs only + /// Verify only that the final sender balance is the expected one for each commitment. + /// Then apply ciphertexts to the state. + /// Checks done are: commitment eq proofs only. pub async fn apply_with_partial_verify<'a, E, B: BlockchainVerificationState<'a, E>>(&'a self, state: &mut B) -> Result<(), VerificationError> { trace!("apply with partial verify"); let mut sigma_batch_collector = BatchCollector::default(); diff --git a/xelis_common/src/utils.rs b/xelis_common/src/utils.rs index 69f94ce2..b8b69336 100644 --- a/xelis_common/src/utils.rs +++ b/xelis_common/src/utils.rs @@ -45,14 +45,14 @@ pub fn from_coin(value: impl Into, coin_decimals: u8) -> Option { Some(value * 10u64.pow(coin_decimals as u32) + decimals_value) } -// return the fee for a transaction based on its size in bytes -// the fee is calculated in atomic units for XEL -// Sending to a newly created address will increase the fee -// Each transfers output will also increase the fee +// Return the fee for a transaction based on its size in bytes. +// The fee is calculated in atomic units for XEL. +// Sending to a newly created address will increase the fee. +// Each transfers output will also increase the fee. pub fn calculate_tx_fee(tx_size: usize, output_count: usize, new_addresses: usize) -> u64 { let mut size_in_kb = tx_size as u64 / 1024; - if tx_size % 1024 != 0 { // we consume a full kb for fee + if tx_size % 1024 != 0 { // We consume a full kb for fee size_in_kb += 1; } @@ -98,8 +98,8 @@ pub fn format_difficulty(mut difficulty: Difficulty) -> String { return format!("{}{}{}", difficulty, left_str, DIFFICULTY_FORMATS[count]); } -// Sanitize a daemon address to make sure it's a valid websocket address -// By default, will use ws:// if no protocol is specified +// Sanitize a daemon address to make sure it's a valid websocket address. +// By default, will use ws:// if no protocol is specified. pub fn sanitize_daemon_address(target: &str) -> String { let mut target = target.to_lowercase(); if target.starts_with("https://") { diff --git a/xelis_common/src/varuint.rs b/xelis_common/src/varuint.rs index 13b96c0a..c6796aa3 100644 --- a/xelis_common/src/varuint.rs +++ b/xelis_common/src/varuint.rs @@ -7,11 +7,11 @@ use primitive_types::U256; use serde::{Deserialize, Serialize}; use crate::serializer::{Reader, ReaderError, Serializer, Writer}; -// This is like a variable length integer but up to U256 -// It is mostly used to save difficulty and cumulative difficulty on disk -// In memory, it keeps using U256 (32 bytes) -// On disk it can be as small as 1 byte and as big as 33 bytes -// First byte written is the VarUint length (1 to 32) +// This is like a variable length integer but up to U256. +// It is mostly used to save difficulty and cumulative difficulty on disk. +// In memory, it keeps using U256 (32 bytes). +// On disk it can be as small as 1 byte and as big as 33 bytes. +// First byte written is the VarUint length (1 to 32). #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)] pub struct VarUint(U256); @@ -85,7 +85,7 @@ impl Serializer for VarUint { Ok(Self(U256::from_big_endian(&buffer))) } - // no fast size impl as it's same as writing it + // No fast size impl as it's same as writing it } impl AsRef for VarUint { diff --git a/xelis_daemon/src/config.rs b/xelis_daemon/src/config.rs index 6737ce53..70cca121 100644 --- a/xelis_daemon/src/config.rs +++ b/xelis_daemon/src/config.rs @@ -13,11 +13,11 @@ use xelis_common::{ time::TimestampSeconds }; -// In case of potential forks, have a unique network id to not connect to others compatible chains +// In case of potential forks, have a unique network id to not connect to other compatible chains pub const NETWORK_ID_SIZE: usize = 16; pub const NETWORK_ID: [u8; NETWORK_ID_SIZE] = [0x73, 0x6c, 0x69, 0x78, 0x65, 0x5f, 0x78, 0x65, 0x6c, 0x69, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x63]; -// bind addresses +// Bind addresses pub const DEFAULT_P2P_BIND_ADDRESS: &str = "0.0.0.0:2125"; pub const DEFAULT_RPC_BIND_ADDRESS: &str = "0.0.0.0:8080"; @@ -25,36 +25,37 @@ pub const DEFAULT_RPC_BIND_ADDRESS: &str = "0.0.0.0:8080"; pub const DEFAULT_CACHE_SIZE: usize = 1024; // Block rules -// Millis per second, it is used to prevent having random 1000 values anywhere +// Millis per second, it is used to prevent having random 1000 values anywhere. pub const MILLIS_PER_SECOND: u64 = 1000; // Block Time in milliseconds pub const BLOCK_TIME_MILLIS: u64 = 15 * MILLIS_PER_SECOND; // 15s block time // Minimum difficulty (each difficulty point is in H/s) // Current: BLOCK TIME in millis * 20 = 20 KH/s minimum -// This is to prevent spamming the network with low difficulty blocks -// This is active only on mainnet mode +// This is to prevent spamming the network with low difficulty blocks. +// This is active only on mainnet mode. pub const MAINNET_MINIMUM_DIFFICULTY: Difficulty = Difficulty::from_u64(BLOCK_TIME_MILLIS * 20); // Testnet & Devnet minimum difficulty pub const OTHER_MINIMUM_DIFFICULTY: Difficulty = Difficulty::from_u64(BLOCK_TIME_MILLIS * 2); // This is also used as testnet and devnet minimum difficulty pub const GENESIS_BLOCK_DIFFICULTY: Difficulty = Difficulty::from_u64(1); -// 2 seconds maximum in future (prevent any attack on reducing difficulty but keep margin for unsynced devices) +// Maximum timestamp allowed in the future is 2 seconds. +// This helps prevent attacks that exploit difficulty reduction while allowing a margin for unsynced devices. pub const TIMESTAMP_IN_FUTURE_LIMIT: TimestampSeconds = 2 * 1000; -// keep at least last N blocks until top topoheight when pruning the chain -// WARNING: This must be at least 50 blocks for difficulty adjustement +// Keep at least last N blocks until top topoheight when pruning the chain. +// WARNING: This must be at least 50 blocks for difficulty adjustment. pub const PRUNE_SAFETY_LIMIT: u64 = STABLE_LIMIT * 10; // BlockDAG rules pub const STABLE_LIMIT: u64 = 8; // in how many height we consider the block stable // Emission rules -// 15% (6 months), 10% (6 months), 5% per block going to dev address +// 15% (6 months), 10% (6 months), 5% per block going to dev address. // NOTE: The explained emission above was the expected one -// But due to a bug in the function to calculate the dev fee reward, +// but due to a bug in the function to calculate the dev fee reward, // the actual emission was directly set to 10% per block -// New emission rules are: 10% during 1.5 years, then 5% for the rest -// This is the same for the project but reduce a bit the mining cost as they earn 5% more +// New emission rules are: 10% during 1.5 years, then 5% for the rest. +// This is functionally the same for the project but is slightly better for miners as they receive 5% more. pub const DEV_FEES: [DevFeeThreshold; 2] = [ // Activated for 3M blocks DevFeeThreshold { @@ -69,29 +70,29 @@ pub const DEV_FEES: [DevFeeThreshold; 2] = [ fee_percentage: 5 } ]; -// only 30% of reward for side block +// Only 30% of reward for side block. // This is to prevent spamming side blocks -// and also give rewards for miners with valid work on main chain +// and also give rewards for miners with valid work on main chain. pub const SIDE_BLOCK_REWARD_PERCENT: u64 = 30; -// maximum 3 blocks for side block reward -// Each side block reward will be divided by the number of side blocks * 2 +// Maximum 3 blocks for side block reward. +// Each side block reward will be divided by the number of side blocks * 2. // With a configuration of 3 blocks, we have the following percents: // 1 block: 30% // 2 blocks: 15% // 3 blocks: 7% // 4 blocks: minimum percentage set below pub const SIDE_BLOCK_REWARD_MAX_BLOCKS: u64 = 3; -// minimum 5% of block reward for side block -// This is the minimum given for all others valid side blocks +// Minimum 5% of block reward for side block. +// This is the minimum given for all others valid side blocks. pub const SIDE_BLOCK_REWARD_MIN_PERCENT: u64 = 5; -// Emission speed factor for the emission curve -// It is used to calculate based on the supply the block reward +// Emission speed factor for the emission curve. +// It is used to calculate based on the supply the block reward. pub const EMISSION_SPEED_FACTOR: u64 = 20; -// 30% of the transaction fee is burned +// 30% of the transaction fee is burned. // This is to reduce the supply over time // and also to prevent spamming the network with low fee transactions -// or free tx from miners -// This should be enabled once Smart Contracts are released +// or free tx from miners. +// This should be enabled once Smart Contracts are released. pub const TRANSACTION_FEE_BURN_PERCENT: u64 = 30; // Developer address for paying dev fees until Smart Contracts integration @@ -99,33 +100,33 @@ pub const TRANSACTION_FEE_BURN_PERCENT: u64 = 30; pub const DEV_ADDRESS: &str = "xel:vs3mfyywt0fjys0rgslue7mm4wr23xdgejsjk0ld7f2kxng4d4nqqnkdufz"; // Chain sync config -// minimum X seconds between each chain sync request per peer +// Minimum X seconds between each chain sync request per peer pub const CHAIN_SYNC_DELAY: u64 = 5; -// wait maximum between each chain sync request to peers +// Wait maximum between each chain sync request to peers pub const CHAIN_SYNC_TIMEOUT_SECS: u64 = CHAIN_SYNC_DELAY * 3; -// first 30 blocks are sent in linear way, then it's exponential +// First 30 blocks are sent in linear way, then it's exponential pub const CHAIN_SYNC_REQUEST_EXPONENTIAL_INDEX_START: usize = 30; -// allows up to X blocks id (hash + height) sent for request +// Allows up to X blocks id (hash + height) sent for request pub const CHAIN_SYNC_REQUEST_MAX_BLOCKS: usize = 64; -// minimum X blocks hashes sent for response +// Minimum X blocks hashes sent for response pub const CHAIN_SYNC_RESPONSE_MIN_BLOCKS: usize = 512; // Default response blocks sent/accepted pub const CHAIN_SYNC_DEFAULT_RESPONSE_BLOCKS: usize = 4096; -// allows up to X blocks hashes sent for response +// Allows up to X blocks hashes sent for response pub const CHAIN_SYNC_RESPONSE_MAX_BLOCKS: usize = 16384; -// send last 10 heights +// Send last 10 heights pub const CHAIN_SYNC_TOP_BLOCKS: usize = 10; // P2p rules -// time between each ping +// Time between each ping pub const P2P_PING_DELAY: u64 = 10; -// time in seconds between each update of peerlist +// Time in seconds between each update of peerlist pub const P2P_PING_PEER_LIST_DELAY: u64 = 60 * 5; -// maximum number of addresses to be send +// Maximum number of addresses to be send pub const P2P_PING_PEER_LIST_LIMIT: usize = 16; -// default number of maximum peers +// Default number of maximum peers pub const P2P_DEFAULT_MAX_PEERS: usize = 32; -// time in seconds between each time we try to connect to a new peer +// Time in seconds between each time we try to connect to a new peer pub const P2P_EXTEND_PEERLIST_DELAY: u64 = 60; // Peer wait on error accept new p2p connections in seconds pub const P2P_PEER_WAIT_ON_ERROR: u64 = 15; @@ -136,37 +137,37 @@ pub const P2P_DEFAULT_CONCURRENCY_TASK_COUNT_LIMIT: usize = 4; // Heartbeat interval in seconds to check if peer is still alive pub const P2P_HEARTBEAT_INTERVAL: u64 = P2P_PING_DELAY / 2; // Timeout in seconds -// If we didn't receive any packet from a peer during this time, we disconnect it +// If we didn't receive any packets from a peer during this time, we disconnect it pub const P2P_PING_TIMEOUT: u64 = P2P_PING_DELAY * 6; // Peer rules -// number of seconds to reset the counter -// Set to 30 minutes +// Number of seconds to reset the counter. +// Set to 30 minutes. pub const PEER_FAIL_TIME_RESET: u64 = 30 * 60; -// number of fail to disconnect the peer +// Number of fail to disconnect the peer pub const PEER_FAIL_LIMIT: u8 = 50; -// number of fail during handshake before temp ban +// Number of fail during handshake before temp ban pub const PEER_FAIL_TO_CONNECT_LIMIT: u8 = 3; -// number of seconds to temp ban the peer in case of fail reached -// Set to 15 minutes +// Number of seconds to temp ban the peer in case of fail reached. +// Set to 15 minutes. pub const PEER_TEMP_BAN_TIME: u64 = 15 * 60; -// number of seconds to temp ban the peer in case of fail reached during handshake -// Set to 1 minute +// Number of seconds to temp ban the peer in case of fail reached during handshake. +// Set to 1 minute. pub const PEER_TEMP_BAN_TIME_ON_CONNECT: u64 = 60; -// millis until we timeout +// Millis until we timeout pub const PEER_TIMEOUT_REQUEST_OBJECT: u64 = 15_000; -// millis until we timeout during a bootstrap request +// Millis until we timeout during a bootstrap request pub const PEER_TIMEOUT_BOOTSTRAP_STEP: u64 = 60_000; -// millis until we timeout during a handshake +// Millis until we timeout during a handshake pub const PEER_TIMEOUT_INIT_CONNECTION: u64 = 5_000; -// millis until we timeout during outgoing connection try +// Millis until we timeout during outgoing connection try pub const PEER_TIMEOUT_INIT_OUTGOING_CONNECTION: u64 = 30_000; -// millis until we timeout during a handshake +// Millis until we timeout during a handshake pub const PEER_TIMEOUT_DISCONNECT: u64 = 1_500; // 16 additional bytes are for AEAD from ChaCha20Poly1305 pub const PEER_MAX_PACKET_SIZE: u32 = MAX_BLOCK_SIZE as u32 + 16; -// Peer TX cache size -// This is how many elements are stored in the LRU cache at maximum +// Peer TX cache size. +// This is how many elements are stored in the LRU cache at maximum. pub const PEER_TX_CACHE_SIZE: usize = 10240; // Peer Block cache size pub const PEER_BLOCK_CACHE_SIZE: usize = 1024; @@ -243,7 +244,7 @@ const MAINNET_GENESIS_BLOCK_HASH: Hash = Hash::new([175, 118, 37, 203, 175, 200, const TESTNET_GENESIS_BLOCK_HASH: Hash = Hash::new([171, 50, 219, 186, 28, 164, 189, 225, 197, 167, 187, 143, 213, 59, 217, 238, 51, 242, 133, 181, 188, 235, 151, 50, 110, 33, 185, 188, 100, 146, 23, 132]); // Genesis block getter -// This is necessary to prevent having the same Genesis Block for differents network +// This is necessary to prevent having the same Genesis Block for different network // Dev returns none to generate a new genesis block each time it starts a chain pub fn get_hex_genesis_block(network: &Network) -> Option<&str> { match network { @@ -276,9 +277,9 @@ pub const fn get_seed_nodes(network: &Network) -> &[&str] { } } -// Get minimum difficulty based on the network -// Mainnet has a minimum difficulty to prevent spamming the network -// Testnet has a lower difficulty to allow faster block generation +// Get minimum difficulty based on the network. +// Mainnet has a higher minimum difficulty to prevent spamming the network. +// Testnet has a lower difficulty to allow faster block generation. pub const fn get_minimum_difficulty(network: &Network) -> Difficulty { match network { Network::Mainnet => MAINNET_MINIMUM_DIFFICULTY, diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index ec6e9391..831e1d4b 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -206,42 +206,42 @@ pub struct Config { } pub struct Blockchain { - // current block height + // Current block height height: AtomicU64, - // current topo height + // Current topo height topoheight: AtomicU64, - // current stable height + // Current stable height stable_height: AtomicU64, - // Determine which last block is stable - // It is used mostly for chain rewind limit + // Determine which last block is stable. + // It is used mostly for chain rewind limit. stable_topoheight: AtomicU64, - // mempool to retrieve/add all txs + // Mempool to retrieve/add all txs mempool: RwLock, - // storage to retrieve/add blocks + // Storage to retrieve/add blocks storage: RwLock, // P2p module p2p: RwLock>>>, // RPC module rpc: RwLock>>, - // current difficulty at tips - // its used as cache to display current network hashrate + // Current difficulty at tips. + // Its used as cache to display current network hashrate. difficulty: Mutex, - // if a simulator is set + // If a simulator is set simulator: Option, - // if we should skip PoW verification + // If we should skip PoW verification skip_pow_verification: bool, // Should we skip block template TXs verification skip_block_template_txs_verification: bool, - // current network type on which one we're using/connected to + // Current network type on which one we're using/connected to network: Network, - // this cache is used to avoid to recompute the common base for each block and is mandatory - // key is (tip hash, tip height) while value is (base hash, base height) + // This cache is used to avoid to recompute the common base for each block and is mandatory + // key is (tip hash, tip height) while value is (base hash, base height). tip_base_cache: Mutex>, - // tip work score is used to determine the best tip based on a block, tip base ands a base height + // Tip work score is used to determine the best tip based on a block, tip base ands a base height tip_work_score_cache: Mutex, CumulativeDifficulty)>>, - // using base hash, current tip hash and base height, this cache is used to store the DAG order + // Using base hash, current tip hash and base height, this cache is used to store the DAG order full_order_cache: Mutex>>, - // auto prune mode if enabled, will delete all blocks every N and keep only N top blocks (topoheight based) + // Auto prune mode if enabled, will delete all blocks every N and keep only N top blocks (topoheight based) auto_prune_keep_n_blocks: Option } @@ -308,7 +308,7 @@ impl Blockchain { skip_block_template_txs_verification: config.skip_block_template_txs_verification }; - // include genesis block + // Include genesis block if !on_disk { blockchain.create_genesis_block().await?; } else { @@ -319,7 +319,7 @@ impl Blockchain { blockchain.set_difficulty(difficulty).await; } - // now compute the stable height + // Now compute the stable height { debug!("Retrieving tips for computing current stable height"); let storage = blockchain.get_storage().read().await; @@ -332,10 +332,10 @@ impl Blockchain { } let arc = Arc::new(blockchain); - // create P2P Server + // Create P2P Server if !config.disable_p2p_server { info!("Starting P2p server..."); - // setup exclusive nodes + // Setup exclusive nodes let mut exclusive_nodes: Vec = Vec::with_capacity(config.exclusive_nodes.len()); for peer in config.exclusive_nodes { for peer in peer.split(",") { @@ -352,7 +352,7 @@ impl Blockchain { match P2pServer::new(config.p2p_concurrency_task_count_limit, config.dir_path, config.tag, config.max_peers, config.p2p_bind_address, Arc::clone(&arc), exclusive_nodes.is_empty(), exclusive_nodes, config.allow_fast_sync, config.allow_boost_sync, config.max_chain_response_size, !config.disable_ip_sharing, config.disable_p2p_outgoing_connections) { Ok(p2p) => { - // connect to priority nodes + // Connect to priority nodes for addr in config.priority_nodes { for addr in addr.split(",") { let addr: SocketAddr = match addr.parse() { @@ -383,7 +383,7 @@ impl Blockchain { }; } - // create RPC Server + // Create RPC Server if !config.disable_rpc_server { info!("RPC Server will listen on: {}", config.rpc_bind_address); match DaemonRpcServer::new(config.rpc_bind_address, Arc::clone(&arc), config.disable_getwork_server).await { @@ -414,9 +414,9 @@ impl Blockchain { self.skip_pow_verification } - // Stop all blockchain modules + // Stop all blockchain modules. // Each module is stopped in its own context - // So no deadlock occurs in case they are linked + // so no deadlock occurs in case they are linked. pub async fn stop(&self) { info!("Stopping modules..."); { @@ -448,8 +448,8 @@ impl Blockchain { info!("All modules are now stopped!"); } - // Reload the storage and update all cache values - // Clear the mempool also in case of not being up-to-date + // Reload the storage and update all cache values. + // Also clear the mempool if it's not up-to-date. pub async fn reload_from_disk(&self) -> Result<(), BlockchainError> { trace!("Reloading chain from disk"); let storage = self.storage.write().await; @@ -480,11 +480,11 @@ impl Blockchain { Ok(()) } - // function to include the genesis block and register the public dev key. + // Function to include the genesis block and register the public dev key. async fn create_genesis_block(&self) -> Result<(), BlockchainError> { let mut storage = self.storage.write().await; - // register XELIS asset + // Register XELIS asset debug!("Registering XELIS asset: {} at topoheight 0", XELIS_ASSET); storage.add_asset(&XELIS_ASSET, AssetData::new(0, COIN_DECIMALS)).await?; @@ -515,7 +515,7 @@ impl Blockchain { debug!("Adding genesis block '{}' to chain", genesis_hash); - // hardcode genesis block topoheight + // Hardcode genesis block topoheight storage.set_topo_height_for_block(&genesis_block.hash(), 0).await?; storage.set_top_height(0)?; @@ -524,9 +524,9 @@ impl Blockchain { Ok(()) } - // mine a block for current difficulty + // Mine a block for current difficulty. // This is for testing purpose and shouldn't be directly used as it will mine on async threads - // which will reduce performance of the daemon and can take forever if difficulty is high + // which will reduce performance of the daemon and can take forever if difficulty is high. pub async fn mine_block(&self, key: &PublicKey) -> Result { let (mut header, difficulty) = { let storage = self.storage.read().await; @@ -553,17 +553,17 @@ impl Blockchain { Ok(block) } - // Prune the chain until topoheight - // This will delete all blocks / versioned balances / txs until topoheight in param + // Prune the chain until topoheight. + // This will delete all blocks / versioned balances / txs until topoheight in param. pub async fn prune_until_topoheight(&self, topoheight: u64) -> Result { let mut storage = self.storage.write().await; self.prune_until_topoheight_for_storage(topoheight, &mut storage).await } - // delete all blocks / versioned balances / txs until topoheight in param - // for this, we have to locate the nearest Sync block for DAG under the limit topoheight - // and then delete all blocks before it - // keep a marge of PRUNE_SAFETY_LIMIT + // Delete all blocks / versioned balances / txs until topoheight in parameter. + // For this, we have to locate the nearest Sync block for DAG under the limit topoheight + // and then delete all blocks before it. + // Maintain a margin of PRUNE_SAFETY_LIMIT. pub async fn prune_until_topoheight_for_storage(&self, topoheight: u64, storage: &mut S) -> Result { if topoheight == 0 { return Err(BlockchainError::PruneZero) @@ -580,28 +580,28 @@ impl Blockchain { return Err(BlockchainError::PruneLowerThanLastPruned) } - // find new stable point based on a sync block under the limit topoheight + // Find new stable point based on a sync block under the limit topoheight let located_sync_topoheight = self.locate_nearest_sync_block_for_topoheight::(&storage, topoheight, self.get_height()).await?; debug!("Located sync topoheight found: {}", located_sync_topoheight); if located_sync_topoheight > last_pruned_topoheight { - // create snapshots of balances to located_sync_topoheight + // Create snapshots of balances to located_sync_topoheight storage.create_snapshot_balances_at_topoheight(located_sync_topoheight).await?; storage.create_snapshot_nonces_at_topoheight(located_sync_topoheight).await?; storage.create_snapshot_registrations_at_topoheight(located_sync_topoheight).await?; - // delete all blocks until the new topoheight + // Delete all blocks until the new topoheight for topoheight in last_pruned_topoheight..located_sync_topoheight { trace!("Pruning block at topoheight {}", topoheight); - // delete block + // Delete block let _ = storage.delete_block_at_topoheight(topoheight).await?; } - // delete balances for all assets + // Delete balances for all assets storage.delete_versioned_balances_below_topoheight(located_sync_topoheight).await?; - // delete nonces versions + // Delete nonces versions storage.delete_versioned_nonces_below_topoheight(located_sync_topoheight).await?; - // Also delete registrations + // Delete registrations storage.delete_registrations_below_topoheight(located_sync_topoheight).await?; // Update the pruned topoheight @@ -613,7 +613,7 @@ impl Blockchain { } } - // determine the topoheight of the nearest sync block until limit topoheight + // Determine the topoheight of the nearest sync block until limit topoheight pub async fn locate_nearest_sync_block_for_topoheight

(&self, provider: &P, mut topoheight: u64, current_height: u64) -> Result where P: DifficultyProvider + DagOrderProvider + BlocksAtHeightProvider + PrunedTopoheightProvider @@ -628,16 +628,16 @@ impl Blockchain { topoheight -= 1; } - // genesis block is always a sync block + // Genesis block is always a sync block Ok(0) } - // returns the highest (unstable) height on the chain + // Returns the highest (unstable) height on the chain pub fn get_height(&self) -> u64 { self.height.load(Ordering::Acquire) } - // returns the highest topological height + // Returns the highest topological height pub fn get_topo_height(&self) -> u64 { self.topoheight.load(Ordering::Acquire) } @@ -648,9 +648,9 @@ impl Blockchain { self.stable_height.load(Ordering::Acquire) } - // Get the stable topoheight + // Get the stable topoheight. // It is used to determine at which DAG topological height - // the block is in case of rewind + // the block is in case of rewind. pub fn get_stable_topoheight(&self) -> u64 { self.stable_topoheight.load(Ordering::Acquire) } @@ -676,9 +676,8 @@ impl Blockchain { self.get_top_block_hash_for_storage(&storage).await } - // because we are in chain, we already now the highest topoheight - // we call the get_hash_at_topo_height instead of get_top_block_hash to avoid reading value - // that we already know + // Since we are in the chain, we already know the highest topoheight. + // Call `get_hash_at_topo_height` instead of `get_top_block_hash` to avoid reading values we already know. pub async fn get_top_block_hash_for_storage(&self, storage: &S) -> Result { storage.get_hash_at_topo_height(self.get_topo_height()).await } @@ -695,20 +694,20 @@ impl Blockchain { self.is_sync_block_at_height::(storage, hash, current_height).await } - // Verify if the block is a sync block - // A sync block is a block that is ordered and has the highest cumulative difficulty at its height - // It is used to determine if the block is a stable block or not + // Verify if the block is a sync block. + // A sync block is a block that is ordered and has the highest cumulative difficulty at its height. + // It is used to determine if the block is a stable block or not. async fn is_sync_block_at_height

(&self, provider: &P, hash: &Hash, height: u64) -> Result where P: DifficultyProvider + DagOrderProvider + BlocksAtHeightProvider + PrunedTopoheightProvider { trace!("is sync block {} at height {}", hash, height); let block_height = provider.get_height_for_block_hash(hash).await?; - if block_height == 0 { // genesis block is a sync block + if block_height == 0 { // Genesis block is a sync block return Ok(true) } - // block must be ordered and in stable height + // Block must be ordered and in stable height if block_height + STABLE_LIMIT > height || !provider.is_block_topological_ordered(hash).await { return Ok(false) } @@ -721,14 +720,14 @@ impl Blockchain { } } - // if block is alone at its height, it is a sync block + // If block is alone at its height, it is a sync block let tips_at_height = provider.get_blocks_at_height(block_height).await?; // This may be an issue with orphaned blocks, we can't rely on this // if tips_at_height.len() == 1 { // return Ok(true) // } - // if block is not alone at its height and they are ordered (not orphaned), it can't be a sync block + // If block is not alone at its height and they are ordered (not orphaned), it can't be a sync block let mut blocks_in_main_chain = 0; for hash in tips_at_height { if provider.is_block_topological_ordered(&hash).await { @@ -739,7 +738,7 @@ impl Blockchain { } } - // now lets check all blocks until STABLE_LIMIT height before the block + // Now lets check all blocks until STABLE_LIMIT height before the block let stable_point = if block_height >= STABLE_LIMIT { block_height - STABLE_LIMIT } else { @@ -754,10 +753,10 @@ impl Blockchain { } let sync_block_cumulative_difficulty = provider.get_cumulative_difficulty_for_block_hash(hash).await?; - // if potential sync block has lower cumulative difficulty than one of past blocks, it is not a sync block + // If potential sync block has lower cumulative difficulty than one of past blocks, it is not a sync block for pre_hash in pre_blocks { - // We compare only against block ordered otherwise we can have desync between node which could lead to fork - // This is rare event but can happen + // We compare only against block ordered otherwise we can have desync between node which could lead to fork. + // This is rare event but can happen. if provider.is_block_topological_ordered(&pre_hash).await { let cumulative_difficulty = provider.get_cumulative_difficulty_for_block_hash(&pre_hash).await?; if cumulative_difficulty >= sync_block_cumulative_difficulty { @@ -797,7 +796,7 @@ impl Blockchain { } } - // first, check if we have it in cache + // First, check if we have it in cache if let Some((base_hash, base_height)) = cache.get(&(current_hash.clone(), height)) { trace!("Tip Base for {} at height {} found in cache: {} for height {}", current_hash, height, base_hash, base_height); bases.insert((base_hash.clone(), *base_height)); @@ -806,8 +805,8 @@ impl Blockchain { let tips = provider.get_past_blocks_for_block_hash(¤t_hash).await?; let tips_count = tips.len(); - if tips_count == 0 { // only genesis block can have 0 tips saved - // save in cache + if tips_count == 0 { // Only genesis block can have 0 tips saved + // Save in cache cache.put((hash.clone(), height), (current_hash.clone(), height)); bases.insert((current_hash.clone(), 0)); continue 'main; @@ -825,10 +824,10 @@ impl Blockchain { } } - // if block is sync, it is a tip base + // If block is sync, it is a tip base if self.is_sync_block_at_height(provider, &tip_hash, height).await? { let block_height = provider.get_height_for_block_hash(&tip_hash).await?; - // save in cache + // Save in cache cache.put((hash.clone(), height), (tip_hash.clone(), block_height)); bases.insert((tip_hash.clone(), block_height)); continue 'main; @@ -846,20 +845,20 @@ impl Blockchain { return Err(BlockchainError::ExpectedTips) } - // now we sort descending by height and return the last element deleted + // Now we sort descending by height and return the last element deleted bases.sort_by(|(_, a), (_, b)| b.cmp(a)); debug_assert!(bases[0].1 >= bases[bases.len() - 1].1); let (base_hash, base_height) = bases.pop().ok_or(BlockchainError::ExpectedTips)?; - // save in cache + // Save in cache cache.put((hash.clone(), height), (base_hash.clone(), base_height)); trace!("Tip Base for {} at height {} found: {} for height {}", hash, height, base_hash, base_height); Ok((base_hash, base_height)) } - // find the common base (block hash and block height) of all tips + // Find the common base (block hash and block height) of all tips pub async fn find_common_base<'a, P, I>(&self, provider: &P, tips: I) -> Result<(Hash, u64), BlockchainError> where P: DifficultyProvider + DagOrderProvider + BlocksAtHeightProvider + PrunedTopoheightProvider, @@ -867,7 +866,7 @@ impl Blockchain { { debug!("Searching for common base for tips {}", tips.into_iter().map(|h| h.to_string()).collect::>().join(", ")); let mut best_height = 0; - // first, we check the best (highest) height of all tips + // First, we check the best (highest) height of all tips for hash in tips.into_iter() { let height = provider.get_height_for_block_hash(hash).await?; if height > best_height { @@ -882,20 +881,20 @@ impl Blockchain { bases.push(self.find_tip_base(provider, hash, best_height, pruned_topoheight).await?); } - // check that we have at least one value + // Check that we have at least one value if bases.is_empty() { error!("bases list is empty"); return Err(BlockchainError::ExpectedTips) } - // sort it descending by height + // Sort it descending by height // a = 5, b = 6, b.cmp(a) -> Ordering::Greater bases.sort_by(|(_, a), (_, b)| b.cmp(a)); debug_assert!(bases[0].1 >= bases[bases.len() - 1].1); - // retrieve the first block hash with its height - // we delete the last element because we sorted it descending - // and we want the lowest height + // Retrieve the first block hash with its height. + // We delete the last element because we sorted it descending + // and we want the lowest height. let (base_hash, base_height) = bases.remove(bases.len() - 1); debug!("Common base {} with height {} on {}", base_hash, base_height, bases.len() + 1); Ok((base_hash, base_height)) @@ -925,7 +924,7 @@ impl Blockchain { Ok(set) } - // this function check that a TIP cannot be refered as past block in another TIP + // This function ensures that a TIP cannot be referenced as a past block in another TIP. async fn verify_non_reachability(&self, storage: &S, tips: &IndexSet) -> Result { trace!("Verifying non reachability for block"); let tips_count = tips.len(); @@ -937,7 +936,7 @@ impl Blockchain { for i in 0..tips_count { for j in 0..tips_count { - // if a tip can be referenced as another's past block, its not a tip + // If a tip can be referenced as another's past block, its not a tip if i != j && reach[j].contains(&tips[i]) { debug!("Tip {} (index {}) is reachable from tip {} (index {})", tips[i], i, tips[j], j); trace!("reach: {}", reach[j].iter().map(|x| x.to_string()).collect::>().join(", ")); @@ -948,8 +947,8 @@ impl Blockchain { Ok(true) } - // Search the lowest height available from the tips of a block hash - // We go through all tips and their tips until we have no unordered block left + // Search the lowest height available from the tips of a block hash. + // We go through all tips and their tips until we have no unordered block left. async fn find_lowest_height_from_mainchain

(&self, provider: &P, hash: Hash) -> Result where P: DifficultyProvider + DagOrderProvider @@ -959,7 +958,7 @@ impl Blockchain { // Current stack of blocks to process let mut stack: VecDeque = VecDeque::new(); // Because several blocks can have the same tips, - // prevent to process a block twice + // prevent to process a block twice. let mut processed = HashSet::new(); stack.push_back(hash); @@ -986,10 +985,10 @@ impl Blockchain { Ok(lowest_height) } - // Search the lowest height available from this block hash - // This function is used to calculate the distance from mainchain - // It will recursively search all tips and their height - // If a tip is not ordered, we will search its tips until we find an ordered block + // Search for the lowest height available starting from this block hash. + // This function calculates the distance from the mainchain by recursively + // searching all tips and their heights. If a tip is unordered, it will + // search its child tips until an ordered block is found. async fn calculate_distance_from_mainchain

(&self, provider: &P, hash: &Hash) -> Result where P: DifficultyProvider + DagOrderProvider @@ -1006,8 +1005,8 @@ impl Blockchain { Ok(lowest_height) } - // Verify if the block is not too far from mainchain - // We calculate the distance from mainchain and compare it to the height + // Verify if the block is not too far from mainchain. + // We calculate the distance from mainchain and compare it to the height. async fn verify_distance_from_mainchain

(&self, provider: &P, hash: &Hash, height: u64) -> Result where P: DifficultyProvider + DagOrderProvider @@ -1016,8 +1015,8 @@ impl Blockchain { Ok(!(distance <= height && height - distance >= STABLE_LIMIT)) } - // Find tip work score internal for a block hash - // this will recursively find all tips and their difficulty + // Find tip work score internal for a block hash. + // This will recursively find all tips and their difficulty. async fn find_tip_work_score_internal<'a, P>(&self, provider: &P, map: &mut HashMap, hash: &'a Hash, base_topoheight: u64) -> Result<(), BlockchainError> where P: DifficultyProvider + DagOrderProvider @@ -1047,7 +1046,7 @@ impl Blockchain { Ok(()) } - // find the sum of work done + // Find the sum of work done pub async fn find_tip_work_score

(&self, provider: &P, hash: &Hash, base: &Hash, base_height: u64) -> Result<(HashSet, CumulativeDifficulty), BlockchainError> where P: DifficultyProvider + DagOrderProvider @@ -1082,14 +1081,14 @@ impl Blockchain { score += value; } - // save this result in cache + // Save this result in cache cache.put((hash.clone(), base.clone(), base_height), (set.clone(), score)); Ok((set, score)) } - // find the best tip (highest cumulative difficulty) - // We get their cumulative difficulty and sort them then take the first one + // Find the best tip (highest cumulative difficulty). + // We get their cumulative difficulty and sort them then take the first one. async fn find_best_tip<'a>(&self, storage: &S, tips: &'a HashSet, base: &Hash, base_height: u64) -> Result<&'a Hash, BlockchainError> { if tips.len() == 0 { return Err(BlockchainError::ExpectedTips) @@ -1106,12 +1105,12 @@ impl Blockchain { Ok(best_tip) } - // this function generate a DAG paritial order into a full order using recursive calls. - // hash represents the best tip (biggest cumulative difficulty) - // base represents the block hash of a block already ordered and in stable height - // the full order is re generated each time a new block is added based on new TIPS - // first hash in order is the base hash - // base_height is only used for the cache key + // This function converts a DAG partial order into a full order using recursion. + // "hash" represents the best tip (the block with the highest cumulative difficulty). + // "base" represents the hash of a block that is already ordered and at a stable height. + // The full order is regenerated each time a new block is added, based on the new tips. + // The first hash in the order is the base hash. + // "base_height" is used only for the cache key. async fn generate_full_order

(&self, provider: &P, hash: &Hash, base: &Hash, base_height: u64, base_topo_height: u64) -> Result, BlockchainError> where P: DifficultyProvider + DagOrderProvider @@ -1129,9 +1128,9 @@ impl Blockchain { let mut processed = IndexSet::new(); 'main: while let Some(current_hash) = stack.pop_back() { - // If it is processed and got reinjected, its to maintains right order + // If it is processed and got reinjected, its to maintains right order. // We just need to insert current hash as it the "final hash" that got processed - // after all tips + // after all tips. if processed.contains(¤t_hash) { full_order.insert(current_hash); continue 'main; @@ -1147,7 +1146,7 @@ impl Blockchain { // Retrieve block tips let block_tips = provider.get_past_blocks_for_block_hash(¤t_hash).await?; - // if the block is genesis or its the base block, we can add it to the full order + // If the block is genesis or its the base block, we can add it to the full order if block_tips.is_empty() || current_hash == *base { let mut order = IndexSet::new(); order.insert(current_hash.clone()); @@ -1170,7 +1169,7 @@ impl Blockchain { // We sort by ascending cumulative difficulty because it is faster // than doing a .reverse() on scores and give correct order for tips processing - // using our stack impl + // using our stack impl. blockdag::sort_ascending_by_cumulative_difficulty(&mut scores); processed.insert(current_hash.clone()); @@ -1186,7 +1185,7 @@ impl Blockchain { Ok(full_order) } - // confirms whether the actual tip difficulty is withing 9% deviation with best tip (reference) + // Confirms whether the actual tip difficulty is withing 9% deviation with best tip (reference) async fn validate_tips(&self, provider: &P, best_tip: &Hash, tip: &Hash) -> Result { const MAX_DEVIATION: Difficulty = Difficulty::from_u64(91); const PERCENTAGE: Difficulty = Difficulty::from_u64(100); @@ -1197,12 +1196,14 @@ impl Blockchain { Ok(best_difficulty * MAX_DEVIATION / PERCENTAGE < block_difficulty) } - // Get difficulty at tips - // If tips is empty, returns genesis difficulty - // Find the best tip (highest cumulative difficulty), then its difficulty, timestamp and its own tips - // Same for its parent, then calculate the difficulty between the two timestamps - // For Block C, take the timestamp and difficulty from parent block B, and then from parent of B, take the timestamp - // We take the difficulty from the biggest tip, but compute the solve time from the newest tips + // Get the difficulty at the tips. + // If no tips are available, return the genesis difficulty. + // Find the best tip (the one with the highest cumulative difficulty) + // and get its difficulty, timestamp, and its own tips. + // Repeat the process for its parent tip. + // Calculate the difficulty based on the timestamps and difficulties of these tips. + // For Block C, use the difficulty and timestamp from its parent, Block B, and then from B's parent use the timestamp. + // We use the highest difficulty from the tips but compute the solve time based on the newest tips. pub async fn get_difficulty_at_tips<'a, P, I>(&self, provider: &P, tips: I) -> Result<(Difficulty, VarUint), BlockchainError> where P: DifficultyProvider + DagOrderProvider + PrunedTopoheightProvider, @@ -1255,9 +1256,9 @@ impl Blockchain { *self.difficulty.lock().await } - // pass in params the already computed block hash and its tips - // check the difficulty calculated at tips - // if the difficulty is valid, returns it (prevent to re-compute it) + // Pass in params the already computed block hash and its tips. + // Check the difficulty calculated at tips. + // If the difficulty is valid, returns it (prevent to re-compute it). pub async fn verify_proof_of_work<'a, P, I>(&self, provider: &P, hash: &Hash, tips: I) -> Result<(Difficulty, VarUint), BlockchainError> where P: DifficultyProvider + DagOrderProvider + PrunedTopoheightProvider, @@ -1307,7 +1308,7 @@ impl Blockchain { } // Add a tx to the mempool with the given hash, it will verify the TX and check that it is not already in mempool or in blockchain - // and its validity (nonce, balance, etc...) + // and its validity (nonce, balance, etc...). pub async fn add_tx_to_mempool_with_storage_and_hash<'a>(&'a self, storage: &S, tx: Arc, hash: Hash, broadcast: bool) -> Result<(), BlockchainError> { let tx_size = tx.size(); if tx_size > MAX_TRANSACTION_SIZE { @@ -1321,23 +1322,23 @@ impl Blockchain { return Err(BlockchainError::TxAlreadyInMempool(hash)) } - // check that the TX is not already in blockchain + // Check that the TX is not already in blockchain if storage.is_tx_executed_in_a_block(&hash)? { return Err(BlockchainError::TxAlreadyInBlockchain(hash)) } let stable_topoheight = self.get_stable_topoheight(); let current_topoheight = self.get_topo_height(); - // get the highest nonce available - // if presents, it means we have at least one tx from this owner in mempool + // Get the highest nonce available. + // If presents, it means we have at least one TX from this owner in mempool. if let Some(cache) = mempool.get_cache_for(tx.get_source()) { - // we accept to delete a tx from mempool if the new one has a higher fee + // We accept to delete a TX from mempool if the new one has a higher fee if let Some(hash) = cache.has_tx_with_same_nonce(tx.get_nonce()) { // A TX with the same nonce is already in mempool return Err(BlockchainError::TxNonceAlreadyUsed(tx.get_nonce(), hash.as_ref().clone())) } - // check that the nonce is in the range + // Check that the nonce is in the range if !(tx.get_nonce() <= cache.get_max() + 1 && tx.get_nonce() >= cache.get_min()) { debug!("TX {} nonce is not in the range of the pending TXs for this owner, received: {}, expected between {} and {}", hash, tx.get_nonce(), cache.get_min(), cache.get_max()); return Err(BlockchainError::InvalidTxNonceMempoolCache(tx.get_nonce(), cache.get_min(), cache.get_max())) @@ -1358,7 +1359,7 @@ impl Blockchain { }); } - // broadcast to websocket this tx + // Broadcast to websocket this TX if let Some(rpc) = self.rpc.read().await.as_ref() { // Notify miners if getwork is enabled if let Some(getwork) = rpc.getwork_server() { @@ -1400,10 +1401,10 @@ impl Blockchain { self.get_block_template_for_storage(&storage, address).await } - // check that the TX Hash is present in mempool or in chain disk + // Check that the TX Hash is present in mempool or in chain disk pub async fn has_tx(&self, hash: &Hash) -> Result { - // check in mempool first - // if its present, returns it + // Check in mempool first. + // If its present, returns it. { let mempool = self.mempool.read().await; if mempool.contains_tx(hash) { @@ -1411,16 +1412,16 @@ impl Blockchain { } } - // check in storage now + // Check in storage now let storage = self.storage.read().await; storage.has_transaction(hash).await } - // retrieve the TX based on its hash by searching in mempool then on disk + // Retrieve the TX based on its hash by searching in mempool then on disk pub async fn get_tx(&self, hash: &Hash) -> Result, BlockchainError> { trace!("get tx {} from blockchain", hash); - // check in mempool first - // if its present, returns it + // Check in mempool first. + // If its present, returns it. { trace!("Locking mempool for get tx {}", hash); let mempool = self.mempool.read().await; @@ -1430,7 +1431,7 @@ impl Blockchain { } } - // check in storage now + // Check in storage now let storage = self.storage.read().await; storage.get_transaction(hash).await } @@ -1443,7 +1444,7 @@ impl Blockchain { // Generate a block header template without transactions pub async fn get_block_header_template_for_storage(&self, storage: &S, address: PublicKey) -> Result { trace!("get block header template"); - let extra_nonce: [u8; EXTRA_NONCE_SIZE] = rand::thread_rng().gen::<[u8; EXTRA_NONCE_SIZE]>(); // generate random bytes + let extra_nonce: [u8; EXTRA_NONCE_SIZE] = rand::thread_rng().gen::<[u8; EXTRA_NONCE_SIZE]>(); // Generate random bytes let tips_set = storage.get_tips().await?; let mut tips = Vec::with_capacity(tips_set.len()); for hash in tips_set { @@ -1480,11 +1481,11 @@ impl Blockchain { let mut sorted_tips = blockdag::sort_tips(storage, tips.into_iter()).await?; if sorted_tips.len() > TIPS_LIMIT { - let dropped_tips = sorted_tips.drain(TIPS_LIMIT..); // keep only first 3 heavier tips + let dropped_tips = sorted_tips.drain(TIPS_LIMIT..); // Keep only first 3 heavier tips debug!("Dropping tips {} because they are not in the first 3 heavier tips", dropped_tips.map(|h| h.to_string()).collect::>().join(", ")); } - // find the newest timestamp + // Find the newest timestamp let mut timestamp = 0; for tip in sorted_tips.iter() { let tip_timestamp = storage.get_timestamp_for_block_hash(tip).await?; @@ -1507,9 +1508,9 @@ impl Blockchain { Ok(block) } - // Get the mining block template for miners - // This function is called when a miner request a new block template - // We create a block candidate with selected TXs from mempool + // Get the mining block template for miners. + // This function is called when a miner request a new block template. + // We create a block candidate with selected TXs from mempool. pub async fn get_block_template_for_storage(&self, storage: &S, address: PublicKey) -> Result { let mut block = self.get_block_header_template_for_storage(storage, address).await?; @@ -1517,7 +1518,7 @@ impl Blockchain { let mempool = self.mempool.read().await; trace!("Mempool locked for building block template"); - // use the mempool cache to get all availables txs grouped by account + // Use the mempool cache to get all available txs grouped by account let caches = mempool.get_caches(); let mut entries: Vec> = Vec::with_capacity(caches.len()); for cache in caches.values() { @@ -1534,11 +1535,11 @@ impl Blockchain { // Build the tx selector using the mempool let mut tx_selector = TxSelector::grouped(entries.into_iter()); - // size of block + // Size of block let mut block_size = block.size(); let mut total_txs_size = 0; - // data used to verify txs + // Data used to verify txs let stable_topoheight = self.get_stable_topoheight(); let topoheight = self.get_topo_height(); trace!("build chain state for block template"); @@ -1568,9 +1569,9 @@ impl Blockchain { } trace!("Selected {} (nonce: {}, fees: {}) for mining", hash, tx.get_nonce(), format_xelis(tx.get_fee())); - // TODO no clone + // TODO: no clone block.txs_hashes.insert(hash.as_ref().clone()); - block_size += HASH_SIZE; // add the hash size + block_size += HASH_SIZE; // Add the hash size total_txs_size += size; } @@ -1587,7 +1588,7 @@ impl Blockchain { trace!("Mempool lock acquired for building block from header"); for hash in header.get_txs_hashes() { trace!("Searching TX {} for building block", hash); - // at this point, we don't want to lose/remove any tx, we clone it only + // At this point, we don't want to lose/remove any TX, we clone it only let tx = if mempool.contains_tx(hash) { mempool.get_tx(hash)? } else { @@ -1627,14 +1628,14 @@ impl Blockchain { debug!("Block {} is not in chain, processing it", block_hash); let current_timestamp = get_current_time_in_millis(); - if block.get_timestamp() > current_timestamp + TIMESTAMP_IN_FUTURE_LIMIT { // accept 2s in future + if block.get_timestamp() > current_timestamp + TIMESTAMP_IN_FUTURE_LIMIT { // Accept 2s in future debug!("Block timestamp is too much in future!"); return Err(BlockchainError::TimestampIsInFuture(current_timestamp, block.get_timestamp())); } let tips_count = block.get_tips().len(); debug!("Tips count for this new {}: {}", block, tips_count); - // only 3 tips are allowed + // Only 3 tips are allowed if tips_count > TIPS_LIMIT { debug!("Invalid tips count, got {} but maximum allowed is {}", tips_count, TIPS_LIMIT); return Err(BlockchainError::InvalidTipsCount(block_hash, tips_count)) @@ -1656,7 +1657,7 @@ impl Blockchain { return Err(BlockchainError::InvalidTipsCount(block_hash, tips_count)) } - // block contains header and full TXs + // Block contains header and full TXs let block_size = block.size(); if block_size > MAX_BLOCK_SIZE { debug!("Block size ({} bytes) is greater than the limit ({} bytes)", block.size(), MAX_BLOCK_SIZE); @@ -1693,7 +1694,7 @@ impl Blockchain { for hash in block.get_tips() { let previous_timestamp = storage.get_timestamp_for_block_hash(hash).await?; - // block timestamp can't be less than previous block. + // Block timestamp can't be less than previous block. if block.get_timestamp() < previous_timestamp { debug!("Invalid block timestamp, parent ({}) is less than new block {}", hash, block_hash); return Err(BlockchainError::TimestampIsLessThanParent(block.get_timestamp())); @@ -1719,7 +1720,7 @@ impl Blockchain { } } - // verify PoW and get difficulty for this block based on tips + // Verify PoW and get difficulty for this block based on tips let skip_pow = self.skip_pow_verification(); let pow_hash = if skip_pow { // Simulator is enabled, we don't need to compute the PoW hash @@ -1734,11 +1735,11 @@ impl Blockchain { let mut current_topoheight = self.get_topo_height(); // Transaction verification - // Here we are going to verify all TXs in the block - // For this, we must select TXs that are not doing collisions with other TXs in block - // TX already added in the same DAG branch (block tips) are rejected because miner should be aware of it - // TXs that are already executed in stable height are also rejected whatever DAG branch it is - // If the TX is executed by another branch, we skip the verification because DAG will choose which branch will execute the TX + // Here we are going to verify all TXs in the block. + // Select TXs that do not collide with other TXs in the block. + // Reject TXs already added in the same DAG branch (block tips) because the miner should be aware of them. + // Reject TXs that have already been executed at stable height, regardless of the DAG branch. + // Skip verification for TXs executed by another branch since the DAG will determine which branch executes the TX. { let hashes_len = block.get_txs_hashes().len(); let txs_len = block.get_transactions().len(); @@ -1758,7 +1759,7 @@ impl Blockchain { return Err(BlockchainError::TxTooBig(tx_size, MAX_TRANSACTION_SIZE)) } - // verification that the real TX Hash is the same as in block header (and also check the correct order) + // Verification that the real TX Hash is the same as in block header (and also check the correct order) let tx_hash = tx.hash(); if tx_hash != *hash { debug!("Invalid tx {} vs {} in block header", tx_hash, hash); @@ -1766,42 +1767,42 @@ impl Blockchain { } debug!("Verifying TX {}", tx_hash); - // check that the TX included is not executed in stable height or in block TIPS + // Check that the TX included is not executed in stable height or in block TIPS if chain_state.get_storage().is_tx_executed_in_a_block(hash)? { let block_executor = chain_state.get_storage().get_block_executor_for_tx(hash)?; debug!("Tx {} was executed in {}", hash, block_executor); let block_executor_height = chain_state.get_storage().get_height_for_block_hash(&block_executor).await?; - // if the tx was executed below stable height, reject whole block! + // If the tx was executed below stable height, reject whole block! if block_executor_height <= stable_height { debug!("Block {} contains a dead tx {} from stable height {}", block_hash, tx_hash, stable_height); return Err(BlockchainError::DeadTxFromStableHeight(block_hash, tx_hash, stable_height, block_executor)) } else { debug!("Tx {} was executed in block {} at height {} (stable height: {})", tx_hash, block, block_executor_height, stable_height); - // now we should check that the TX was not executed in our TIP branch - // because that mean the miner was aware of the TX execution and still include it + // Now we should check that the TX was not executed in our TIP branch + // because that means the miner was aware of the TX execution and still include it. if all_parents_txs.is_none() { - // load it only one time + // Load it only one time all_parents_txs = Some(self.get_all_executed_txs_until_height(chain_state.get_storage(), stable_height, block.get_tips().iter().map(Hash::clone)).await?); } - // if its the case, we should reject the block + // If its the case, we should reject the block if let Some(txs) = all_parents_txs.as_ref() { - // miner knows this tx was already executed because its present in block tips - // reject the whole block + // Miner knows this TX was already executed because its present in block tips. + // Reject the whole block. if txs.contains(&tx_hash) { debug!("Malicious Block {} formed, contains a dead tx {}", block_hash, tx_hash); return Err(BlockchainError::DeadTxFromTips(block_hash, tx_hash)) } else { - // otherwise, all looks good but because the TX was executed in another branch, we skip verification - // DAG will choose which branch will execute the TX + // Otherwise, all looks good but because the TX was executed in another branch, we skip verification. + // DAG will choose which branch will execute the TX. debug!("TX {} was executed in another branch, skipping verification", tx_hash); - // because TX was already validated & executed and is not in block tips - // we can safely skip the verification of this TX + // because TX was already validated & executed and is not in block tips. + // We can safely skip the verification of this TX. continue; } } else { - // impossible to happens because we compute it if value is None + // Impossible to happens because we compute it if value is None error!("FATAL ERROR! Unable to load all TXs until height {}", stable_height); return Err(BlockchainError::Unknown) } @@ -1855,11 +1856,11 @@ impl Blockchain { debug!("Best tip selected: {}", best_tip); let base_topo_height = storage.get_topo_height_for_hash(&base_hash).await?; - // generate a full order until base_topo_height + // Generate a full order until base_topo_height let mut full_order = self.generate_full_order(storage, &best_tip, &base_hash, base_height, base_topo_height).await?; debug!("Generated full order size: {}, with base ({}) topo height: {}", full_order.len(), base_hash, base_topo_height); - // rpc server lock + // RPC server lock let rpc_server = self.rpc.read().await; let should_track_events = if let Some(rpc) = rpc_server.as_ref() { rpc.get_tracked_events().await @@ -1867,19 +1868,19 @@ impl Blockchain { HashSet::new() }; - // track all events to notify websocket + // Track all events to notify websocket let mut events: HashMap> = HashMap::new(); - // Track all orphaned tranasctions + // Track all orphaned transactions let mut orphaned_transactions = HashSet::new(); - // order the DAG (up to TOP_HEIGHT - STABLE_LIMIT) + // Order the DAG (up to TOP_HEIGHT - STABLE_LIMIT) let mut highest_topo = 0; // Tells if the new block added is ordered in DAG or not let block_is_ordered = full_order.contains(&block_hash); { let mut is_written = base_topo_height == 0; let mut skipped = 0; - // detect which part of DAG reorg stay, for other part, undo all executed txs + // Detect which part of DAG reorg stay, for other part, undo all executed txs debug!("Detecting stable point of DAG and cleaning txs above it"); { let mut topoheight = base_topo_height; @@ -1891,14 +1892,14 @@ impl Blockchain { // Verify that the block is still at the same topoheight if storage.is_block_topological_ordered(order).await && *order == hash_at_topo { trace!("Hash {} at topo {} stay the same, skipping cleaning", hash_at_topo, topoheight); - // remove the hash from the order because we don't need to recompute it + // Remove the hash from the order because we don't need to recompute it full_order.shift_remove_index(0); topoheight += 1; skipped += 1; continue; } } - // if we are here, it means that the block was re-ordered + // If we are here, it means that the block was re-ordered is_written = true; } @@ -1917,7 +1918,7 @@ impl Blockchain { events.entry(NotifyEvent::BlockOrphaned).or_insert_with(Vec::new).push(value); } - // mark txs as unexecuted if it was executed in this block + // Mark txs as unexecuted if it was executed in this block for tx_hash in block.get_txs_hashes() { if storage.is_tx_executed_in_block(tx_hash, &hash_at_topo)? { trace!("Removing execution of {}", tx_hash); @@ -1942,12 +1943,12 @@ impl Blockchain { let mut nonce_checker = NonceChecker::new(); // Side blocks counter per height let mut side_blocks: HashMap = HashMap::new(); - // time to order the DAG that is moving + // Time to order the DAG that is moving debug!("Ordering blocks based on generated DAG order ({} blocks)", full_order.len()); for (i, hash) in full_order.into_iter().enumerate() { highest_topo = base_topo_height + skipped + i as u64; - // if block is not re-ordered and it's not genesis block + // If block is not re-ordered and it's not genesis block // because we don't need to recompute everything as it's still good in chain if !is_written && tips_count != 0 && storage.is_block_topological_ordered(&hash).await && storage.get_topo_height_for_hash(&hash).await? == highest_topo { trace!("Block ordered {} stay at topoheight {}. Skipping...", hash, highest_topo); @@ -1967,8 +1968,8 @@ impl Blockchain { // Block for this hash let block = storage.get_block_by_hash(&hash).await?; - // Reward the miner of this block - // We have a decreasing block reward if there is too much side block + // Reward the miner of this block. + // We have a decreasing block reward if there is too much side block. let is_side_block = self.is_side_block_internal(storage, &hash, highest_topo).await?; let height = block.get_height(); let side_blocks_count = match side_blocks.entry(height) { @@ -2005,19 +2006,19 @@ impl Blockchain { trace!("building chain state to execute TXs in block {}", block_hash); let mut chain_state = ApplicableChainState::new(storage, base_topo_height, highest_topo, version); - // compute rewards & execute txs + // Compute rewards & execute TXs for (tx, tx_hash) in block.get_transactions().iter().zip(block.get_txs_hashes()) { // execute all txs // Link the transaction hash to this block if !chain_state.get_mut_storage().add_block_linked_to_tx_if_not_present(&tx_hash, &hash)? { trace!("Block {} is now linked to tx {}", hash, tx_hash); } - // check that the tx was not yet executed in another tip branch + // Check that the TX was not yet executed in another tip branch if chain_state.get_storage().is_tx_executed_in_a_block(tx_hash)? { trace!("Tx {} was already executed in a previous block, skipping...", tx_hash); } else { - // tx was not executed, but lets check that it is not a potential double spending - // check that the nonce is not already used + // TX was not executed, but lets check that it is not a potential double spending. + // Check that the nonce is not already used. if !nonce_checker.use_nonce(chain_state.get_storage(), tx.get_source(), tx.get_nonce(), highest_topo).await? { warn!("Malicious TX {}, it is a potential double spending with same nonce {}, skipping...", tx_hash, tx.get_nonce()); // TX will be orphaned @@ -2034,12 +2035,12 @@ impl Blockchain { continue; } - // Calculate the new nonce - // This has to be done in case of side blocks where TX B would be before TX A + // Calculate the new nonce. + // This has to be done in case of side blocks where TX B would be before TX A. let next_nonce = nonce_checker.get_new_nonce(tx.get_source(), self.network.is_mainnet())?; chain_state.as_mut().update_account_nonce(tx.get_source(), next_nonce).await?; - // mark tx as executed + // Mark tx as executed chain_state.get_mut_storage().set_tx_executed_in_block(tx_hash, &hash)?; // Delete the transaction from the list if it was marked as orphaned @@ -2047,7 +2048,7 @@ impl Blockchain { trace!("Transaction {} was marked as orphaned, but got executed again", tx_hash); } - // if the rpc_server is enable, track events + // If the rpc_server is enable, track events if should_track_events.contains(&NotifyEvent::TransactionExecuted) { let value = json!(TransactionExecutedEvent { tx_hash: Cow::Borrowed(&tx_hash), @@ -2071,10 +2072,10 @@ impl Blockchain { block_reward -= dev_fee_part; } - // reward the miner + // Reward the miner chain_state.reward_miner(block.get_miner(), block_reward + total_fees).await?; - // apply changes from Chain State + // Apply changes from Chain State chain_state.apply_changes().await?; if should_track_events.contains(&NotifyEvent::BlockOrdered) { @@ -2114,7 +2115,7 @@ impl Blockchain { } tips.insert(best_tip); - // save highest topo height + // Save highest topo height debug!("Highest topo height found: {}", highest_topo); let extended = highest_topo > current_topoheight; if current_height == 0 || extended { @@ -2124,8 +2125,8 @@ impl Blockchain { current_topoheight = highest_topo; } - // If block is directly orphaned - // Mark all TXs ourself as linked to it + // If block is directly orphaned, + // mark all TXs ourself as linked to it. if !block_is_ordered { trace!("Block {} is orphaned, marking all TXs as linked to it", block_hash); for tx_hash in block.get_txs_hashes() { @@ -2133,11 +2134,11 @@ impl Blockchain { } } - // auto prune mode + // Auto prune mode if extended { if let Some(keep_only) = self.auto_prune_keep_n_blocks { - // check that the topoheight is greater than the safety limit - // and that we can prune the chain using the config while respecting the safety limit + // Check that the topoheight is greater than the safety limit + // and that we can prune the chain using the config while respecting the safety limit. if current_topoheight % keep_only == 0 && current_topoheight - keep_only > 0 { info!("Auto pruning chain until topoheight {} (keep only {} blocks)", current_topoheight - keep_only, keep_only); if let Err(e) = self.prune_until_topoheight_for_storage(current_topoheight - keep_only, storage).await { @@ -2157,10 +2158,10 @@ impl Blockchain { current_height = block.get_height(); } - // update stable height and difficulty in cache + // Update stable height and difficulty in cache { if should_track_events.contains(&NotifyEvent::StableHeightChanged) { - // detect the change in stable height + // Detect the change in stable height let previous_stable_height = self.get_stable_height(); if base_height != previous_stable_height { let value = json!(StableHeightChangedEvent { @@ -2172,7 +2173,7 @@ impl Blockchain { } if should_track_events.contains(&NotifyEvent::StableTopoHeightChanged) { - // detect the change in stable topoheight + // Detect the change in stable topoheight let previous_stable_topoheight = self.get_stable_topoheight(); if base_topo_height != previous_stable_topoheight { let value = json!(StableTopoHeightChangedEvent { @@ -2195,7 +2196,7 @@ impl Blockchain { // Check if the event is tracked let orphan_event_tracked = should_track_events.contains(&NotifyEvent::TransactionOrphaned); - // Clean mempool from old txs if the DAG has been updated + // Clean mempool from old TXs if the DAG has been updated let mempool_deleted_txs = if highest_topo >= current_topoheight { debug!("Locking mempool write mode"); let mut mempool = self.mempool.write().await; @@ -2208,9 +2209,9 @@ impl Blockchain { if orphan_event_tracked { for (tx_hash, sorted_tx) in mempool_deleted_txs { - // Delete it from our orphaned transactions list - // This save some performances as it will not try to add it back and - // consume resources for verifying the ZK Proof if we already know the answer + // Remove the transaction from our orphaned list. + // This improves performance by avoiding re-addition and + // unnecessary verification of the ZK Proof if the outcome is already known. if orphaned_transactions.remove(&tx_hash) { trace!("Transaction {} was marked as orphaned, but got deleted from mempool. Prevent adding it back", tx_hash); } @@ -2234,8 +2235,8 @@ impl Blockchain { // Now we can try to add back all transactions for tx_hash in orphaned_transactions { debug!("Adding back orphaned tx {}", tx_hash); - // It is verified in add_tx_to_mempool function too - // But to prevent loading the TX from storage and to fire wrong event + // It is verified in add_tx_to_mempool function too, + // but to prevent loading the TX from storage and triggering incorrect events. if !storage.is_tx_executed_in_a_block(&tx_hash)? { let tx = match storage.get_transaction(&tx_hash).await { Ok(tx) => tx, @@ -2285,9 +2286,9 @@ impl Blockchain { } } - // broadcast to websocket new block + // Broadcast to websocket new block if let Some(rpc) = rpc_server.as_ref() { - // if we have a getwork server, and that its not from syncing, notify miners + // If we have a getwork server, and that its not from syncing, notify miners if broadcast { if let Some(getwork) = rpc.getwork_server() { let getwork = getwork.clone(); @@ -2299,7 +2300,7 @@ impl Blockchain { } } - // atm, we always notify websocket clients + // ATM, we always notify websocket clients trace!("Notifying websocket clients"); if should_track_events.contains(&NotifyEvent::NewBlock) { match get_block_response(self, storage, &block_hash, &Block::new(Immutable::Arc(block), txs), block_size).await { @@ -2313,7 +2314,7 @@ impl Blockchain { } let rpc = rpc.clone(); - // don't block mutex/lock more than necessary, we move it in another task + // Don't block mutex/lock more than necessary, we move it in another task spawn_task("rpc-notify-events", async move { for (event, values) in events { for value in values { @@ -2328,8 +2329,8 @@ impl Blockchain { Ok(()) } - // Get block reward based on the type of the block - // Block shouldn't be orphaned + // Get block reward based on the type of the block. + // Block shouldn't be orphaned. pub async fn internal_get_block_reward(&self, past_supply: u64, is_side_block: bool, side_blocks_count: u64) -> Result { trace!("internal get block reward"); let block_reward = if is_side_block { @@ -2344,13 +2345,13 @@ impl Blockchain { Ok(block_reward) } - // Get the block reward for a block - // This will search all blocks at same height and verify which one are side blocks + // Get the block reward for a block. + // This will search all blocks at same height and verify which one are side blocks. pub async fn get_block_reward(&self, storage: &S, hash: &Hash, past_supply: u64, current_topoheight: u64) -> Result { let is_side_block = self.is_side_block(storage, hash).await?; let mut side_blocks_count = 0; if is_side_block { - // get the block height for this hash + // Get the block height for this hash let height = storage.get_height_for_block_hash(hash).await?; let blocks_at_height = storage.get_blocks_at_height(height).await?; for block in blocks_at_height { @@ -2363,8 +2364,8 @@ impl Blockchain { self.internal_get_block_reward(past_supply, is_side_block, side_blocks_count).await } - // retrieve all txs hashes until height or until genesis block that were executed in a block - // for this we get all tips and recursively retrieve all txs from tips until we reach height + // Retrieve all txs hashes until height or until genesis block that were executed in a block. + // For this we get all tips and recursively retrieve all txs from tips until we reach height. async fn get_all_executed_txs_until_height

(&self, provider: &P, until_height: u64, tips: impl Iterator) -> Result, BlockchainError> where P: DifficultyProvider + ClientProtocolProvider @@ -2378,25 +2379,25 @@ impl Blockchain { let mut processed = IndexSet::new(); queue.extend(tips); - // get last element from queue (order doesn't matter and its faster than moving all elements) + // Get last element from queue (order doesn't matter and its faster than moving all elements) while let Some(hash) = queue.pop() { let block = provider.get_block_header_by_hash(&hash).await?; - // check that the block height is higher than the height passed in param + // Check that the block height is higher than the height passed in param if until_height < block.get_height() { - // add all txs from block + // Add all TXs from block for tx in block.get_txs_hashes() { // Check that we don't have it yet if !hashes.contains(tx) { // Then check that it's executed in this block if provider.is_tx_executed_in_block(tx, &hash)? { - // add it to the list + // Add it to the list hashes.insert(tx.clone()); } } } - // add all tips from block (but check that we didn't already added it) + // Add all tips from block (but check that we didn't already added it) for tip in block.get_tips() { if !processed.contains(tip) { processed.insert(tip.clone()); @@ -2409,7 +2410,7 @@ impl Blockchain { Ok(hashes) } - // if a block is not ordered, it's an orphaned block and its transactions are not honoured + // If a block is not ordered, it's an orphaned block and its transactions are not honoured pub async fn is_block_orphaned_for_storage(&self, storage: &S, hash: &Hash) -> bool { trace!("is block {} orphaned", hash); !storage.is_block_topological_ordered(hash).await @@ -2419,7 +2420,7 @@ impl Blockchain { self.is_side_block_internal(storage, hash, self.get_topo_height()).await } - // a block is a side block if its ordered and its block height is less than or equal to height of past 8 topographical blocks + // A block is a side block if its ordered and its block height is less than or equal to height of past 8 topographical blocks pub async fn is_side_block_internal

(&self, provider: &P, hash: &Hash, current_topoheight: u64) -> Result where P: DifficultyProvider + DagOrderProvider @@ -2430,14 +2431,14 @@ impl Blockchain { } let topoheight = provider.get_topo_height_for_hash(hash).await?; - // genesis block can't be a side block + // Genesis block can't be a side block if topoheight == 0 || topoheight > current_topoheight { return Ok(false) } let height = provider.get_height_for_block_hash(hash).await?; - // verify if there is a block with height higher than this block in past 8 topo blocks + // Verify that there is a block with height higher than this block in past 8 topo blocks let mut counter = 0; let mut i = topoheight - 1; while counter < STABLE_LIMIT && i > 0 { @@ -2454,7 +2455,7 @@ impl Blockchain { Ok(false) } - // to have stable order: it must be ordered, and be under the stable height limit + // For a stable order, the block must be both ordered and below the stable height limit. pub async fn has_block_stable_order

(&self, provider: &P, hash: &Hash, topoheight: u64) -> Result where P: DagOrderProvider @@ -2493,27 +2494,27 @@ impl Blockchain { txs.extend(mempool.drain()); } - // Try to add all txs back to mempool if possible - // We try to prevent lost/to be orphaned - // We try to add back all txs already in mempool just in case + // Try to add all txs back to mempool if possible. + // We try to prevent lost/to be orphaned. + // We try to add back all txs already in mempool just in case. { for (hash, tx) in txs { debug!("Trying to add TX {} to mempool again", hash); if let Err(e) = self.add_tx_to_mempool_with_storage_and_hash(storage, tx, hash, false).await { - debug!("TX rewinded is not compatible anymore: {}", e); + debug!("TX rewind is not compatible anymore: {}", e); } } } self.height.store(new_height, Ordering::Release); self.topoheight.store(new_topoheight, Ordering::Release); - // update stable height if it's allowed + // Update stable height if it's allowed if !stop_at_stable_height { let tips = storage.get_tips().await?; let (stable_hash, stable_height) = self.find_common_base::(&storage, &tips).await?; let stable_topoheight = storage.get_topo_height_for_hash(&stable_hash).await?; - // if we have a RPC server, propagate the StableHeightChanged if necessary + // If we have a RPC server, propagate the StableHeightChanged if necessary if let Some(rpc) = self.rpc.read().await.as_ref() { let previous_stable_height = self.get_stable_height(); let previous_stable_topoheight = self.get_stable_topoheight(); @@ -2557,21 +2558,21 @@ impl Blockchain { Ok(new_topoheight) } - // Calculate the average block time on the last 50 blocks - // It will return the target block time if we don't have enough blocks - // We calculate it by taking the timestamp of the block at topoheight - 50 and the timestamp of the block at topoheight - // It is the same as computing the average time between the last 50 blocks but much faster - // Genesis block timestamp isn't take in count for this calculation + // Calculate the average block time on the last 50 blocks. + // If fewer than 50 blocks are available, return the target block time. + // The calculation uses timestamps of the block at topoheight - 50 and at topoheight. + // This is the same as computing the average time between the last 50 blocks but much faster. + // The genesis block's timestamp is not included in this calculation. pub async fn get_average_block_time

(&self, provider: &P) -> Result where P: DifficultyProvider + PrunedTopoheightProvider + DagOrderProvider { - // current topoheight + // Current topoheight let topoheight = self.get_topo_height(); - // we need to get the block hash at topoheight - 50 to compare - // if topoheight is 0, returns the target as we don't have any block - // otherwise returns topoheight + // We need to get the block hash at topoheight - 50 to compare. + // If topoheight is 0, returns the target as we don't have any block. + // Otherwise returns topoheight. let mut count = if topoheight > 50 { 50 } else if topoheight <= 1 { @@ -2580,7 +2581,7 @@ impl Blockchain { topoheight - 1 }; - // check that we are not under the pruned topoheight + // Check that we are not under the pruned topoheight if let Some(pruned_topoheight) = provider.get_pruned_topoheight().await? { if topoheight - count < pruned_topoheight { count = pruned_topoheight @@ -2598,8 +2599,8 @@ impl Blockchain { } } -// Estimate the required fees for a transaction -// For V1, new keys are only counted one time for creation fee instead of N transfers to it +// Estimate the required fees for a transaction. +// For V1, new keys are only counted one time for creation fee instead of N transfers to it. pub async fn estimate_required_tx_fees(provider: &P, current_topoheight: u64, tx: &Transaction, version: BlockVersion) -> Result { let mut output_count = 0; let mut new_addresses = 0; @@ -2627,7 +2628,7 @@ pub fn side_block_reward_percentage(side_blocks: u64) -> u64 { side_block_percent = SIDE_BLOCK_REWARD_PERCENT / (side_blocks * 2); } else { // If we have more than 3 side blocks at same height - // we reduce the reward to 5% + // we reduce the reward to 5%. side_block_percent = SIDE_BLOCK_REWARD_MIN_PERCENT; } } diff --git a/xelis_daemon/src/core/blockdag.rs b/xelis_daemon/src/core/blockdag.rs index 7c0cfe5c..7abb672b 100644 --- a/xelis_daemon/src/core/blockdag.rs +++ b/xelis_daemon/src/core/blockdag.rs @@ -13,7 +13,7 @@ use super::{ error::BlockchainError, }; -// sort the scores by cumulative difficulty and, if equals, by hash value +// Sort the scores by cumulative difficulty and, if equal, by hash value pub fn sort_descending_by_cumulative_difficulty(scores: &mut Vec<(T, CumulativeDifficulty)>) where T: AsRef, @@ -32,7 +32,7 @@ where } } -// sort the scores by cumulative difficulty and, if equals, by hash value +// Sort the scores by cumulative difficulty and, if equal, by hash value pub fn sort_ascending_by_cumulative_difficulty(scores: &mut Vec<(T, CumulativeDifficulty)>) where T: AsRef, @@ -51,9 +51,9 @@ where } } -// Sort the TIPS by cumulative difficulty -// If the cumulative difficulty is the same, the hash value is used to sort -// Hashes are sorted in descending order +// Sort the TIPS by cumulative difficulty. +// If the cumulative difficulty is the same, the hash value is used to sort. +// Hashes are sorted in descending order. pub async fn sort_tips(storage: &S, tips: I) -> Result, BlockchainError> where S: Storage, @@ -77,7 +77,7 @@ where } } -// determine he lowest height possible based on tips and do N+1 +// Determine the lowest height possible based on tips and do N+1 pub async fn calculate_height_at_tips<'a, D, I>(provider: &D, tips: I) -> Result where D: DifficultyProvider, @@ -99,7 +99,7 @@ where Ok(height) } -// find the best tip based on cumulative difficulty of the blocks +// Find the best tip based on cumulative difficulty of the blocks pub async fn find_best_tip_by_cumulative_difficulty<'a, D, I>(provider: &D, tips: I) -> Result<&'a Hash, BlockchainError> where D: DifficultyProvider, diff --git a/xelis_daemon/src/core/difficulty/v1.rs b/xelis_daemon/src/core/difficulty/v1.rs index 58daf258..37829a34 100644 --- a/xelis_daemon/src/core/difficulty/v1.rs +++ b/xelis_daemon/src/core/difficulty/v1.rs @@ -15,12 +15,12 @@ const LEFT_SHIFT: VarUint = VarUint::from_u64(1 << SHIFT); // Process noise covariance: 5% of shift const PROCESS_NOISE_COVAR: VarUint = VarUint::from_u64((1 << SHIFT) / 100 * 5); -// Initial estimate covariance -// It is used by first blocks +// Initial estimate covariance. +// It is used by first blocks. pub const P: VarUint = LEFT_SHIFT; -// Calculate the required difficulty for the next block based on the solve time of the previous block -// We are using a Kalman filter to estimate the hashrate and adjust the difficulty +// Calculate the required difficulty for the next block based on the solve time of the previous block. +// We are using a Kalman filter to estimate the hashrate and adjust the difficulty. pub fn calculate_difficulty(solve_time: TimestampMillis, previous_difficulty: Difficulty, p: VarUint, minimum_difficulty: Difficulty) -> (Difficulty, VarUint) { let z = previous_difficulty / solve_time; trace!("Calculating difficulty v1, solve time: {}, previous_difficulty: {}, z: {}, p: {}", format_duration(Duration::from_millis(solve_time)), format_difficulty(previous_difficulty), z, p); diff --git a/xelis_daemon/src/core/difficulty/v2.rs b/xelis_daemon/src/core/difficulty/v2.rs index e45222b3..2c8ca35a 100644 --- a/xelis_daemon/src/core/difficulty/v2.rs +++ b/xelis_daemon/src/core/difficulty/v2.rs @@ -22,8 +22,8 @@ const PROCESS_NOISE_COVAR: VarUint = VarUint::from_u64((1 << SHIFT) * SHIFT / MI // It is used by first blocks pub const P: VarUint = LEFT_SHIFT; -// Calculate the required difficulty for the next block based on the solve time of the previous block -// We are using a Kalman filter to estimate the hashrate and adjust the difficulty +// Calculate the required difficulty for the next block based on the solve time of the previous block. +// We are using a Kalman filter to estimate the hashrate and adjust the difficulty. pub fn calculate_difficulty(solve_time: TimestampMillis, previous_difficulty: Difficulty, p: VarUint, minimum_difficulty: Difficulty) -> (Difficulty, VarUint) { let z = previous_difficulty * MILLIS_PER_SECOND / solve_time; trace!("Calculating difficulty v2, solve time: {}, previous_difficulty: {}, z: {}, p: {}", format_duration(Duration::from_millis(solve_time)), format_difficulty(previous_difficulty), z, p); diff --git a/xelis_daemon/src/core/error.rs b/xelis_daemon/src/core/error.rs index 171f9ca0..14da0b6c 100644 --- a/xelis_daemon/src/core/error.rs +++ b/xelis_daemon/src/core/error.rs @@ -112,7 +112,7 @@ pub enum BlockchainError { #[error("Timestamp {} is less than parent", _0)] TimestampIsLessThanParent(TimestampMillis), #[error("Timestamp {} is greater than current time {}", _0, _1)] - TimestampIsInFuture(TimestampMillis, TimestampMillis), // left is expected, right is got + TimestampIsInFuture(TimestampMillis, TimestampMillis), // Left is expected, right is got #[error("Block height mismatch, expected {}, got {}.", _0, _1)] InvalidBlockHeight(u64, u64), #[error("Block height is zero which is not allowed")] @@ -152,10 +152,10 @@ pub enum BlockchainError { #[error("Tx {} is already in block", _0)] TxAlreadyInBlock(Hash), #[error("Duplicate registration tx for address '{}' found in same block", _0)] - DuplicateRegistration(Address), // address + DuplicateRegistration(Address), // Address #[error("Invalid Tx fee, expected at least {}, got {}", format_xelis(*_0), format_xelis(*_1))] InvalidTxFee(u64, u64), - #[error("Fees are lower for this TX than the overrided TX, expected at least {}, got {}", format_xelis(*_0), format_xelis(*_1))] + #[error("Fees are lower for this TX than the overridden TX, expected at least {}, got {}", format_xelis(*_0), format_xelis(*_1))] FeesToLowToOverride(u64, u64), #[error("No account found for {}", _0)] AccountNotFound(Address), @@ -237,7 +237,7 @@ pub enum BlockchainError { UnsupportedOperation, #[error("Data not found on disk: {}", _0)] NotFoundOnDisk(DiskContext), - #[error("Invalid paramater: max chain response size isn't in range")] + #[error("Invalid parameter: max chain response size isn't in range")] ConfigMaxChainResponseSize, #[error("Invalid config sync mode")] ConfigSyncMode, diff --git a/xelis_daemon/src/core/hard_fork.rs b/xelis_daemon/src/core/hard_fork.rs index 311a8a09..903b849b 100644 --- a/xelis_daemon/src/core/hard_fork.rs +++ b/xelis_daemon/src/core/hard_fork.rs @@ -21,7 +21,7 @@ pub fn get_hard_fork_at_height(network: &Network, height: u64) -> Option<&HardFo } // Get the version of the hard fork at a given height -// and returns true if there is a hard fork (version change) at that height +// and returns true if there is a hard fork (version change) at that height. pub fn has_hard_fork_at_height(network: &Network, height: u64) -> (bool, BlockVersion) { match get_hard_fork_at_height(network, height) { Some(hard_fork) => (hard_fork.height == height, hard_fork.version), @@ -42,9 +42,9 @@ pub fn get_pow_algorithm_for_version(version: BlockVersion) -> Algorithm { } } -// This function checks if a version is matching the requirements -// it split the version if it contains a `-` and only takes the first part -// to support our git commit hash +// This function checks if a version is matching the requirements. +// If the version contains a `-`, split it and use only the first part, +// which supports our git commit hash. fn is_version_matching_requirement(version: &str, req: &str) -> Result { let r = semver::VersionReq::parse(req)?; let str_version = match version.split_once('-') { diff --git a/xelis_daemon/src/core/mempool.rs b/xelis_daemon/src/core/mempool.rs index fde7d18a..a02cd21c 100644 --- a/xelis_daemon/src/core/mempool.rs +++ b/xelis_daemon/src/core/mempool.rs @@ -24,27 +24,27 @@ use xelis_common::{ block::BlockVersion }; -// Wrap a TX with its hash and size in bytes for faster access -// size of tx can be heavy to compute, so we store it here +// Wrap a TX with its hash and size in bytes for faster access. +// Size of tx can be heavy to compute, so we store it here. #[derive(serde::Serialize)] pub struct SortedTx { tx: Arc, - first_seen: TimestampSeconds, // timestamp when the tx was added + first_seen: TimestampSeconds, // Timestamp when the tx was added size: usize } -// This struct is used to keep nonce cache for a specific key for faster verification -// But we also include a sorted list of txs for this key, ordered by nonce -// and a "expected balance" for this key -// Min/max bounds are used to compute the index of the tx in the sorted list based on its nonce -// You can get the TX at nonce N by computing the index with (N - min) % (max + 1 - min) +// This struct is used to keep nonce cache for a specific key for faster verification. +// We also include a sorted list of txs for this key, ordered by nonce +// and a "expected balance" for this key. +// Min/max bounds are used to compute the index of the tx in the sorted list based on its nonce. +// You can get the TX at nonce N by computing the index with (N - min) % (max + 1 - min). #[derive(Serialize, Deserialize)] pub struct AccountCache { - // lowest nonce used + // Lowest nonce used min: u64, - // highest nonce used + // Highest nonce used max: u64, - // all txs for this user ordered by nonce + // All txs for this user ordered by nonce txs: IndexSet>, // Expected balances after all txs in this cache // This is also used to verify the validity of the TX spendings @@ -54,9 +54,9 @@ pub struct AccountCache { pub struct Mempool { // Used for log purpose mainnet: bool, - // store all txs waiting to be included in a block + // Store all txs waiting to be included in a block txs: HashMap, SortedTx>, - // store all sender's nonce for faster finding + // Store all sender's nonce for faster finding caches: HashMap } @@ -81,25 +81,25 @@ impl Mempool { let hash = Arc::new(hash); let nonce = tx.get_nonce(); - // update the cache for this owner + // Update the cache for this owner let mut must_update = true; if let Some(cache) = self.caches.get_mut(tx.get_source()) { - // delete the TX if its in the range of already tracked nonces + // Delete the TX if its in the range of already tracked nonces trace!("Cache found for owner {} with nonce range {}-{}, nonce = {}", tx.get_source().as_address(self.mainnet), cache.get_min(), cache.get_max(), nonce); - // Support the case where the nonce is already used in cache - // If a user want to cancel its TX, he can just resend a TX with same nonce and higher fee - // NOTE: This is not possible anymore, disabled in blockchain function + // Support the case where the nonce is already used in cache. + // If a user want to cancel its TX, he can just resend a TX with same nonce and higher fee. + // NOTE: This is not possible anymore, disabled in blockchain function. if nonce >= cache.get_min() && nonce <= cache.get_max() { trace!("nonce {} is in range {}-{}", nonce, cache.get_min(), cache.get_max()); - // because it's based on order and we may have the same order + // Because it's based on order and we may have the same order let index = ((nonce - cache.get_min()) % (cache.get_max() + 1 - cache.get_min())) as usize; cache.txs.insert(hash.clone()); must_update = false; if let Some(tx_hash) = cache.txs.swap_remove_index(index) { trace!("TX {} with same nonce found in cache, removing it from sorted txs", tx_hash); - // remove the tx hash from sorted txs + // Remove the tx hash from sorted txs if self.txs.remove(&tx_hash).is_none() { warn!("TX {} not found in mempool while deleting collision with {}", tx_hash, hash); } @@ -117,7 +117,7 @@ impl Mempool { let mut txs = IndexSet::new(); txs.insert(hash.clone()); - // init the cache + // Init the cache let cache = AccountCache { max: nonce, min: nonce, @@ -133,17 +133,17 @@ impl Mempool { tx }; - // insert in map + // Insert in map self.txs.insert(hash, sorted_tx); Ok(()) } - // Remove a TX using its hash from mempool - // This will recalculate the cache bounds + // Remove a TX using its hash from mempool. + // This will recalculate the cache bounds. pub fn remove_tx(&mut self, hash: &Hash) -> Result<(), BlockchainError> { let tx = self.txs.remove(hash).ok_or_else(|| BlockchainError::TxNotFound(hash.clone()))?; - // remove the tx hash from sorted txs + // Remove the tx hash from sorted txs let key = tx.get_tx().get_source(); let mut delete = false; if let Some(cache) = self.caches.get_mut(key) { @@ -276,11 +276,11 @@ impl Mempool { txs } - // delete all old txs not compatible anymore with current state of chain - // this is called when a new block is added to the chain - // Because of DAG reorg, we can't only check updated keys from new block, - // as a block could be orphaned and the nonce order would change - // So we need to check all keys from mempool and compare it from storage + // Remove all outdated TXs that are no longer compatible with the current chain state. + // This is done when a new block is added to the chain. + // Due to DAG reorgs, we can't just check updated keys from the new block, + // because a block could be orphaned, changing the nonce order. + // Thus, we need to compare all keys in the mempool with those in storage. pub async fn clean_up(&mut self, storage: &S, stable_topoheight: u64, topoheight: u64, block_version: BlockVersion) -> Vec<(Arc, SortedTx)> { trace!("Cleaning up mempool..."); @@ -296,9 +296,8 @@ impl Mempool { let nonce = match storage.get_last_nonce(&key).await { Ok((_, version)) => version.get_nonce(), Err(e) => { - // We get an error while retrieving the last nonce for this key, - // that means the key is not in storage anymore, so we can delete safely - // we just have to skip this iteration so it's not getting re-injected + // An error retrieving the last nonce for this key indicates it's no longer in storage. + // Therefore, we can safely delete it and skip this iteration to avoid re-injection. warn!("Error while getting nonce for owner {}, he maybe has no nonce anymore, skipping: {}", key.as_address(self.mainnet), e); // Delete all txs from this cache @@ -316,10 +315,10 @@ impl Mempool { debug!("Owner {} has nonce {}, cache min: {}, max: {}", key.as_address(self.mainnet), nonce, cache.get_min(), cache.get_max()); let mut delete_cache = false; - // Check if the minimum nonce is higher than the new nonce, that means - // all TXs will be orphaned as its suite got broken - // or, check and delete txs if the nonce is lower than the new nonce - // otherwise the cache is still up to date + // Check if the minimum nonce is higher than the new nonce. If so, + // all TXs are orphaned because the nonce sequence is broken. + // Alternatively, delete TXs if their nonce is lower than the new nonce. + // Otherwise, the cache is still up to date. if cache.get_min() > nonce { debug!("All TXs for key {} are orphaned, deleting them", key.as_address(self.mainnet)); @@ -335,12 +334,12 @@ impl Mempool { delete_cache = true; } else if cache.get_min() <= nonce { debug!("Verifying TXs for owner {} with nonce <= {}", key.as_address(self.mainnet), nonce); - // txs hashes to delete + // TXs hashes to delete let mut hashes: HashSet> = HashSet::with_capacity(cache.txs.len()); - // filter all txs hashes which are not found - // or where its nonce is smaller than the new nonce - // TODO when drain_filter is stable, use it (allow to get all hashes deleted) + // Filter all TXs hashes which are not found + // or where its nonce is smaller than the new nonce. + // TODO: When drain_filter is stable, use it (allow to get all hashes deleted). let mut max: Option = None; let mut min: Option = None; cache.txs.retain(|hash| { @@ -385,11 +384,11 @@ impl Mempool { cache.max = max; } - // delete the nonce cache if no txs are left + // Delete the nonce cache if no txs are left delete_cache = cache.txs.is_empty(); - // Cache is not empty yet, but we deleted some TXs from it, balances may be out-dated, verify TXs left - // TODO: there may be a way to optimize this even more, by checking if deleted TXs are those who got mined - // Which mean, expected balances are still up to date with chain state + // Cache is not empty yet, but we deleted some TXs from it, balances may be out-dated, verify TXs left. + // TODO: there may be a way to optimize this even more, by checking if deleted TXs are already mined. + // This way, we ensure expected balances match the chain state. if !delete_cache { let mut txs = Vec::with_capacity(cache.txs.len()); let mut txs_hashes = Vec::with_capacity(cache.txs.len()); @@ -406,11 +405,10 @@ impl Mempool { } if !delete_cache { - // Instead of verifiying each TX one by one, we verify them all at once - // This is much faster and is basically the same because: - // If one TX is invalid, all next TXs are invalid - // NOTE: this can be revert easily in case we are deleting valid TXs also, - // But will be slower during high traffic + // Verify all TXs at once instead of individually. + // If one TX is invalid, all subsequent TXs are invalid too. + // NOTE: This approach can be reverted if valid TXs are mistakenly deleted, + // but will be slower during high traffic debug!("Verifying TXs ({}) for sender {} at topoheight {}", txs_hashes.iter().map(|hash| hash.to_string()).collect::>().join(", "), key.as_address(self.mainnet), topoheight); let mut state = MempoolState::new(&self, storage, stable_topoheight, topoheight, block_version); if let Err(e) = Transaction::verify_batch(txs.as_slice(), &mut state).await { @@ -439,14 +437,14 @@ impl Mempool { hashes.extend(local_cache); } - // now delete all necessary txs + // Now delete all necessary txs for hash in hashes { debug!("Deleting TX {} for owner {}", hash, key.as_address(self.mainnet)); if let Some(sorted_tx) = self.txs.remove(&hash) { deleted_transactions.push((hash, sorted_tx)); } else { // This should never happen, but better to put a warning here - // in case of a lurking bug + // in case of a lurking bug. warn!("TX {} not found in mempool while deleting", hash); } } @@ -500,8 +498,8 @@ impl AccountCache { self.max } - // Get the next nonce for this cache - // This is necessary when we have several TXs + // Get the next nonce for this cache. + // This is necessary when we have several TXs. pub fn get_next_nonce(&self) -> u64 { self.max + 1 } diff --git a/xelis_daemon/src/core/merkle.rs b/xelis_daemon/src/core/merkle.rs index 3504a011..fcde95c8 100644 --- a/xelis_daemon/src/core/merkle.rs +++ b/xelis_daemon/src/core/merkle.rs @@ -2,11 +2,10 @@ use std::borrow::Cow; use xelis_common::{crypto::{hash, Hash, HASH_SIZE}, serializer::Serializer}; -// This builder is used to build a merkle tree from a list of hashes -// It uses a bottom-up approach to build the tree -// The tree is built by taking pairs of hashes and hashing them together -// The resulting hash is then added to the list of hashes -// This process is repeated until there is only one hash left +// This builder constructs a Merkle tree from a list of hashes. +// It uses a bottom-up approach, pairing and hashing hashes together. +// The resulting hash from each pair is added to the list of hashes. +// This process continues until only one hash remains. pub struct MerkleBuilder<'a> { hashes: Vec> } diff --git a/xelis_daemon/src/core/nonce_checker.rs b/xelis_daemon/src/core/nonce_checker.rs index 2741c1a4..73f1e0b1 100644 --- a/xelis_daemon/src/core/nonce_checker.rs +++ b/xelis_daemon/src/core/nonce_checker.rs @@ -38,8 +38,8 @@ impl AccountEntry { } } -// A simple cache that checks if a nonce has already been used -// Stores the topoheight of the block that used the nonce +// A simple cache that checks if a nonce has already been used. +// Stores the topoheight of the block that used the nonce. pub struct NonceChecker { cache: HashMap, } @@ -51,7 +51,7 @@ impl NonceChecker { } } - // Undo the nonce usage + // Undo the nonce usage. // We remove it from the used nonces list and revert the expected nonce to the previous nonce if present. pub fn undo_nonce(&mut self, key: &PublicKey, nonce: u64) { if let Some(entry) = self.cache.get_mut(key) { @@ -65,8 +65,8 @@ impl NonceChecker { } } - // Key may be cloned on first entry - // Returns false if nonce is already used + // Key may be cloned on first entry. + // Returns false if nonce is already used. pub async fn use_nonce(&mut self, storage: &S, key: &PublicKey, nonce: u64, topoheight: u64) -> Result { trace!("use_nonce {} for {} at topoheight {}", nonce, key.as_address(storage.is_mainnet()), topoheight); diff --git a/xelis_daemon/src/core/simulator.rs b/xelis_daemon/src/core/simulator.rs index b42a09a5..a790c912 100644 --- a/xelis_daemon/src/core/simulator.rs +++ b/xelis_daemon/src/core/simulator.rs @@ -50,8 +50,8 @@ impl Display for Simulator { } impl Simulator { - // Start the Simulator mode to generate new blocks automatically - // It generates random miner keys and mine blocks with them + // Start the Simulator mode to generate new blocks automatically. + // It generates random miner keys and mine blocks with them. pub async fn start(&self, blockchain: Arc>) { let millis_interval = match self { Self::Stress => 300, @@ -90,7 +90,7 @@ impl Simulator { } } - // TODO + // TODO: // let max_txs = match self { // Self::Stress => 200, // _ => 15 @@ -118,7 +118,7 @@ impl Simulator { blocks } - // TODO use transaction builder + // TODO: Use transaction builder // async fn generate_txs_in_mempool(&self, max_txs: usize, max_transfers: usize, max_amount: u64, rng: &mut OsRng, keys: &Vec, blockchain: &Arc>) { // info!("Adding simulated TXs in mempool"); // let n = rng.gen_range(0..max_txs); diff --git a/xelis_daemon/src/core/state/chain_state.rs b/xelis_daemon/src/core/state/chain_state.rs index ae338b63..f274104b 100644 --- a/xelis_daemon/src/core/state/chain_state.rs +++ b/xelis_daemon/src/core/state/chain_state.rs @@ -29,12 +29,12 @@ use crate::core::{ // Sender changes // This contains its expected next balance for next outgoing transactions -// But also contains the ciphertext changes happening (so a sum of each spendings for transactions) -// This is necessary to easily build the final user balance +// but also contains the ciphertext changes happening (so a sum of each spendings for transactions). +// This is necessary to easily build the final user balance. struct Echange { // If we are allowed to use the output balance for verification allow_output_balance: bool, - // if the versioned balance below is new for the current topoheight + // If the versioned balance below is new for the current topoheight new_version: bool, // Version balance of the account used for the verification version: VersionedBalance, @@ -55,10 +55,10 @@ impl Echange { } } - // Get the right balance to use for TX verification - // TODO we may need to check previous balances and up to the last output balance made - // So if in block A we spent TX A, and block B we got some funds, then we spent TX B in block C - // We are still able to use it even if it was built at same time as TX A + // Get the right balance to use for TX verification. + // TODO: We may need to check previous balances and up to the last output balance made. + // So if in block A we spent TX A, and block B we got some funds, then we spent TX B in block C, + // we are still able to use it even if it was built at same time as TX A. fn get_balance(&mut self) -> &mut CiphertextCache { let output = self.output_balance_used || self.allow_output_balance; let (ct, used) = self.version.select_balance(output); @@ -77,10 +77,10 @@ impl Echange { struct Account<'a> { // Account nonce used to verify valid transaction nonce: VersionedNonce, - // Assets ready as source for any transfer/transaction - // TODO: they must store also the ciphertext change - // It will be added by next change at each TX - // This is necessary to easily build the final user balance + // Assets ready as source for any transfer/transaction. + // TODO: They must store also the ciphertext change. + // It will be added by next change at each TX. + // This is necessary to easily build the final user balance. assets: HashMap<&'a Hash, Echange> } @@ -124,16 +124,16 @@ impl <'a, S: Storage> DerefMut for StorageReference<'a, S> { } } -// This struct is used to verify the transactions executed at a snapshot of the blockchain -// It is read-only but write in memory the changes to the balances and nonces -// Once the verification is done, the changes are written to the storage +// This struct is used to verify the transactions executed at a snapshot of the blockchain. +// It is read-only but temporarily holds changes to balances and nonces in memory. +// After the verification process, the changes are saved to the storage. pub struct ChainState<'a, S: Storage> { // Storage to read and write the balances and nonces storage: StorageReference<'a, S>, // Balances of the receiver accounts receiver_balances: HashMap<&'a PublicKey, HashMap<&'a Hash, VersionedBalance>>, - // Sender accounts - // This is used to verify ZK Proofs and store/update nonces + // Sender accounts. + // This is used to verify ZK Proofs and store/update nonces. accounts: HashMap<&'a PublicKey, Account<'a>>, // Current stable topoheight of the snapshot stable_topoheight: u64, @@ -186,9 +186,9 @@ impl<'a, S: Storage> ApplicableChainState<'a, S> { self.inner.storage.as_mut() } - // This function is called after the verification of all needed transactions - // This will consume ChainState and apply all changes to the storage - // In case of incoming and outgoing transactions in same state, the final balance will be computed + // This function is called after the verification of all needed transactions. + // This will consume ChainState and apply all changes to the storage. + // In case of incoming and outgoing transactions in same state, the final balance will be computed. pub async fn apply_changes(mut self) -> Result<(), BlockchainError> { // Apply changes for sender accounts for (key, account) in &mut self.inner.accounts { @@ -196,11 +196,11 @@ impl<'a, S: Storage> ApplicableChainState<'a, S> { self.inner.storage.set_last_nonce_to(key, self.inner.topoheight, &account.nonce).await?; let balances = self.inner.receiver_balances.entry(&key).or_insert_with(HashMap::new); - // Because account balances are only used to verify the validity of ZK Proofs, we can't store them - // We have to recompute the final balance for each asset using the existing current balance - // Otherwise, we could have a front running problem - // Example: Alice sends 100 to Bob, Bob sends 100 to Charlie - // But Bob built its ZK Proof with the balance before Alice's transaction + // Because account balances are only used to verify the validity of ZK Proofs, we can't store them. + // We have to recompute the final balance for each asset using the existing current balance. + // Otherwise, we could have a front running problem. + // Example: Alice sends 100 to Bob, Bob sends 100 to Charlie, + // but Bob built its ZK Proof with the balance before Alice's transaction. for (asset, echange) in account.assets.drain() { trace!("{} {} updated for {} at topoheight {}", echange.version, asset, key.as_address(self.inner.storage.is_mainnet()), self.inner.topoheight); let Echange { mut version, output_sum, output_balance_used, new_version, .. } = echange; @@ -208,52 +208,52 @@ impl<'a, S: Storage> ApplicableChainState<'a, S> { match balances.entry(asset) { Entry::Occupied(mut o) => { trace!("{} already has a balance for {} at topoheight {}", key.as_address(self.inner.storage.is_mainnet()), asset, self.inner.topoheight); - // We got incoming funds while spending some - // We need to split the version in two - // Output balance is the balance after outputs spent without incoming funds - // Final balance is the balance after incoming funds + outputs spent + // We got incoming funds while spending some, + // we need to split the version in two. + // Output balance is the balance after outputs spent without incoming funds. + // Final balance is the balance after incoming funds + outputs spent. // This is a necessary process for the following case: // Alice sends 100 to Bob in block 1000 - // But Bob build 2 txs before Alice, one to Charlie and one to David - // First Tx of Blob is in block 1000, it will be valid - // But because of Alice incoming, the second Tx of Bob will be invalid + // But Bob build 2 txs before Alice, one to Charlie and one to David. + // first Tx of Blob is in block 1000, it will be valid + // But because of Alice incoming, the second Tx of Bob will be invalid. let final_version = o.get_mut(); // We got input and output funds, mark it final_version.set_balance_type(BalanceType::Both); - // We must build output balance correctly + // We must build output balance correctly. // For that, we use the same balance before any inputs - // And deduct outputs + // and deduct outputs. // let clean_version = self.storage.get_new_versioned_balance(key, asset, self.topoheight).await?; // let mut output_balance = clean_version.take_balance(); // *output_balance.computable()? -= &output_sum; - // Determine which balance to use as next output balance + // Determine which balance to use as next output balance. // This is used in case TXs that are built at same reference, but - // executed in differents topoheights have the output balance reported - // to the next topoheight each time to stay valid during ZK Proof verification + // executed in different topoheights have the output balance reported + // to the next topoheight each time to stay valid during ZK Proof verification. let output_balance = version.take_balance_with(output_balance_used); // Set to our final version the new output balance final_version.set_output_balance(Some(output_balance)); - // Build the final balance - // All inputs are already added, we just need to substract the outputs + // Build the final balance. + // All inputs are already added, we just need to subtract the outputs. let final_balance = final_version.get_mut_balance().computable()?; *final_balance -= output_sum; }, Entry::Vacant(e) => { trace!("{} has no balance for {} at topoheight {}", key.as_address(self.inner.storage.is_mainnet()), asset, self.inner.topoheight); - // We have no incoming update for this key - // Select the right final version - // For that, we must check if we used the output balance and/or if we are not on the last version + // We have no incoming update for this key. + // Select the right final version. + // For that, we must check if we used the output balance and/or if we are not on the last version. let version = if output_balance_used || !new_version { - // We must fetch again the version to sum it with the output - // This is necessary to build the final balance + // We must fetch again the version to sum it with the output. + // This is necessary to build the final balance. let mut new_version = self.inner.storage.get_new_versioned_balance(key, asset, self.inner.topoheight).await?; - // Substract the output sum - trace!("{} has no balance for {} at topoheight {}, substract output sum", key.as_address(self.inner.storage.is_mainnet()), asset, self.inner.topoheight); + // Subtract the output sum + trace!("{} has no balance for {} at topoheight {}, subtract output sum", key.as_address(self.inner.storage.is_mainnet()), asset, self.inner.topoheight); *new_version.get_mut_balance().computable()? -= output_sum; if self.inner.block_version == BlockVersion::V0 { @@ -265,7 +265,7 @@ impl<'a, S: Storage> ApplicableChainState<'a, S> { // Balance updated at topo 1001 as input // TX A is built with reference 1000 but executed at topo 1002 // TX B reference 1000 but output balance is at topo 1002 and it include the final balance of (TX A + input at 1001) - // So we report the output balance for next TX verification + // So we report the output balance for next TX verification. new_version.set_output_balance(Some(version.take_balance_with(output_balance_used))); new_version.set_balance_type(BalanceType::Both); } @@ -285,8 +285,8 @@ impl<'a, S: Storage> ApplicableChainState<'a, S> { } } - // Apply all balances changes at topoheight - // We injected the sender balances in the receiver balances previously + // Apply all balances changes at topoheight. + // We injected the sender balances in the receiver balances previously. for (account, balances) in self.inner.receiver_balances { for (asset, version) in balances { trace!("Saving versioned balance {} for {} at topoheight {}", version, account.as_address(self.inner.storage.is_mainnet()), self.inner.topoheight); @@ -342,7 +342,7 @@ impl<'a, S: Storage> ChainState<'a, S> { } // Create a sender account by fetching its nonce and create a empty HashMap for balances, - // those will be fetched lazily + // those will be fetched lazily. async fn create_sender_account(key: &PublicKey, storage: &S, topoheight: u64) -> Result, BlockchainError> { let (topo, mut version) = storage .get_nonce_at_maximum_topoheight(key, topoheight).await? @@ -355,8 +355,8 @@ impl<'a, S: Storage> ChainState<'a, S> { }) } - // Retrieve the receiver balance of an account - // This is mostly the final balance where everything is added (outputs and inputs) + // Retrieve the receiver balance of an account. + // This is mostly the final balance where everything is added (outputs and inputs). async fn internal_get_receiver_balance<'b>(&'b mut self, key: &'a PublicKey, asset: &'a Hash) -> Result<&'b mut Ciphertext, BlockchainError> { match self.receiver_balances.entry(key).or_insert_with(HashMap::new).entry(asset) { Entry::Occupied(o) => Ok(o.into_mut().get_mut_balance().computable()?), @@ -367,9 +367,9 @@ impl<'a, S: Storage> ChainState<'a, S> { } } - // Retrieve the sender balance of an account - // This is used for TX outputs verification - // This depends on the transaction and can be final balance or output balance + // Retrieve the sender balance of an account. + // This is used for TX outputs verification. + // This depends on the transaction and can be final balance or output balance. async fn internal_get_sender_verification_balance<'b>(&'b mut self, key: &'a PublicKey, asset: &'a Hash, reference: &Reference) -> Result<&'b mut CiphertextCache, BlockchainError> { trace!("getting sender verification balance for {} at topoheight {}, reference: {}", key.as_address(self.storage.is_mainnet()), self.topoheight, reference.topoheight); match self.accounts.entry(key) { @@ -395,8 +395,8 @@ impl<'a, S: Storage> ChainState<'a, S> { } } - // Update the output echanges of an account - // Account must have been fetched before calling this function + // Update the output echanges of an account. + // Account must have been fetched before calling this function. async fn internal_update_sender_echange(&mut self, key: &'a PublicKey, asset: &'a Hash, new_ct: Ciphertext) -> Result<(), BlockchainError> { trace!("update sender echange: {:?}", new_ct.compress()); let change = self.accounts.get_mut(key) @@ -409,8 +409,8 @@ impl<'a, S: Storage> ChainState<'a, S> { Ok(()) } - // Retrieve the account nonce - // Only sender accounts should be used here + // Retrieve the account nonce. + // Only sender accounts should be used here. async fn internal_get_account_nonce(&mut self, key: &'a PublicKey) -> Result { match self.accounts.entry(key) { Entry::Occupied(o) => Ok(o.get().nonce.get_nonce()), @@ -421,9 +421,9 @@ impl<'a, S: Storage> ChainState<'a, S> { } } - // Update the account nonce - // Only sender accounts should be used here - // For each TX, we must update the nonce by one + // Update the account nonce. + // Only sender accounts should be used here. + // For each TX, we must update the nonce by one. async fn internal_update_account_nonce(&mut self, account: &'a PublicKey, new_nonce: u64) -> Result<(), BlockchainError> { trace!("Updating nonce for {} to {} at topoheight {}", account.as_address(self.storage.is_mainnet()), new_nonce, self.topoheight); match self.accounts.entry(account) { diff --git a/xelis_daemon/src/core/state/mempool_state.rs b/xelis_daemon/src/core/state/mempool_state.rs index d58a7b4e..34a7609a 100644 --- a/xelis_daemon/src/core/state/mempool_state.rs +++ b/xelis_daemon/src/core/state/mempool_state.rs @@ -22,10 +22,10 @@ use crate::core::{ struct Account<'a> { // Account nonce used to verify valid transaction nonce: u64, - // Assets ready as source for any transfer/transaction + // Assets ready as source for any transfer/transaction. // TODO: they must store also the ciphertext change - // It will be added by next change at each TX - // This is necessary to easily build the final user balance + // It will be added by next change at each TX. + // This is necessary to easily build the final user balance. assets: HashMap<&'a Hash, Ciphertext> } @@ -36,8 +36,8 @@ pub struct MempoolState<'a, S: Storage> { storage: &'a S, // Receiver balances receiver_balances: HashMap<&'a PublicKey, HashMap<&'a Hash, Ciphertext>>, - // Sender accounts - // This is used to verify ZK Proofs and store/update nonces + // Sender accounts. + // This is used to verify ZK Proofs and store/update nonces. accounts: HashMap<&'a PublicKey, Account<'a>>, // The current stable topoheight of the chain stable_topoheight: u64, @@ -66,9 +66,9 @@ impl<'a, S: Storage> MempoolState<'a, S> { Some(account.assets) } - // Retrieve the receiver balance - // We never store the receiver balance in mempool, only outgoing balances - // So we just get it from our internal cache or from storage + // Retrieve the receiver balance. + // We never store the receiver balance in mempool, only outgoing balances. + // So we just get it from our internal cache or from storage. async fn internal_get_receiver_balance<'b>(&'b mut self, account: &'a PublicKey, asset: &'a Hash) -> Result<&'b mut Ciphertext, BlockchainError> { match self.receiver_balances.entry(account).or_insert_with(HashMap::new).entry(asset) { Entry::Occupied(entry) => Ok(entry.into_mut()), @@ -86,11 +86,11 @@ impl<'a, S: Storage> MempoolState<'a, S> { Ok(version.take_balance_with(output).take_ciphertext()?) } - // Retrieve the sender balance + // Retrieve the sender balance. // For this, we first look in our internal cache, - // If not found, we check in mempool cache, - // If still not present, we check in storage and determine using reference - // Which version to use + // if not found, we check in mempool cache, + // if still not present, we check in storage and determine using reference + // which version to use. async fn internal_get_sender_balance<'b>(&'b mut self, key: &'a PublicKey, asset: &'a Hash, reference: &Reference) -> Result<&'b mut Ciphertext, BlockchainError> { match self.accounts.entry(key) { Entry::Occupied(o) => { @@ -133,8 +133,8 @@ impl<'a, S: Storage> MempoolState<'a, S> { } } - // Retrieve the account nonce - // Only sender accounts should be used here + // Retrieve the account nonce. + // Only sender accounts should be used here. async fn internal_get_account_nonce(&mut self, key: &'a PublicKey) -> Result { match self.accounts.entry(key) { Entry::Occupied(o) => Ok(o.get().nonce), @@ -155,9 +155,9 @@ impl<'a, S: Storage> MempoolState<'a, S> { } } - // Update the account nonce - // Only sender accounts should be used here - // For each TX, we must update the nonce by one + // Update the account nonce. + // Only sender accounts should be used here. + // For each TX, we must update the nonce by one. async fn internal_update_account_nonce(&mut self, account: &'a PublicKey, new_nonce: u64) -> Result<(), BlockchainError> { match self.accounts.entry(account) { Entry::Occupied(mut o) => { @@ -207,8 +207,8 @@ impl<'a, S: Storage> BlockchainVerificationState<'a, BlockchainError> for Mempoo self.internal_get_sender_balance(account, asset, reference).await } - /// Apply new output to a sender account - /// In this state, we don't need to store the output + /// Apply new output to a sender account. + /// In this state, we don't need to store the output. async fn add_sender_output( &mut self, _: &'a PublicKey, diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index eaae490a..d6533ecb 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -24,42 +24,42 @@ pub trait Storage: BlockExecutionOrderProvider + DagOrderProvider + PrunedTopohe // Clear caches if exists async fn clear_caches(&mut self) -> Result<(), BlockchainError>; - // delete block at topoheight, and all pointers (hash_at_topo, topo_by_hash, reward, supply, diff, cumulative diff...) + // Delete block at topoheight, and all pointers (hash_at_topo, topo_by_hash, reward, supply, diff, cumulative diff...) async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result<(Hash, Arc, Vec<(Hash, Arc)>), BlockchainError>; - // delete versioned balances at topoheight + // Delete versioned balances at topoheight async fn delete_versioned_balances_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - // delete versioned nonces at topoheight + // Delete versioned nonces at topoheight async fn delete_versioned_nonces_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - // delete versioned balances above topoheight + // Delete versioned balances above topoheight async fn delete_versioned_balances_above_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - // delete versioned nonces above topoheight + // Delete versioned nonces above topoheight async fn delete_versioned_nonces_above_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - // delete account registrations above topoheight + // Delete account registrations above topoheight async fn delete_registrations_above_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - // delete account registrations below topoheight + // Delete account registrations below topoheight async fn delete_registrations_below_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - // delete versioned balances below topoheight + // Delete versioned balances below topoheight async fn delete_versioned_balances_below_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - // delete versioned nonces below topoheight + // Delete versioned nonces below topoheight async fn delete_versioned_nonces_below_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - // delete all versions of balances under the specified topoheight - // for those who don't have more recents, set it to the topoheight - // for those above it, cut the chain by deleting the previous topoheight when it's going under + // Delete all balance versions below the specified topoheight. + // For entries with no more recent versions, set their height to the specified topoheight. + // For entries with newer versions, truncate the chain by removing older versions that fall below the topoheight. async fn create_snapshot_balances_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - // same as above but for nonces + // Same as above but for nonces async fn create_snapshot_nonces_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - // same as above but for registrations + // Same as above but for registrations async fn create_snapshot_registrations_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; // Count is the number of blocks (topoheight) to rewind diff --git a/xelis_daemon/src/core/storage/providers/account.rs b/xelis_daemon/src/core/storage/providers/account.rs index 90b951d8..8340b767 100644 --- a/xelis_daemon/src/core/storage/providers/account.rs +++ b/xelis_daemon/src/core/storage/providers/account.rs @@ -6,24 +6,24 @@ use crate::core::{error::{BlockchainError, DiskContext}, storage::SledStorage}; #[async_trait] pub trait AccountProvider { - // first time we saw this account on chain + // First time we saw this account on chain async fn get_account_registration_topoheight(&self, key: &PublicKey) -> Result; - // set the registration topoheight + // Set the registration topoheight async fn set_account_registration_topoheight(&mut self, key: &PublicKey, topoheight: u64) -> Result<(), BlockchainError>; // Check if account is registered async fn is_account_registered(&self, key: &PublicKey) -> Result; - // Check if account is registered at topoheight - // This will check that the registration topoheight is less or equal to the given topoheight + // Check if account is registered at topoheight. + // This will check that the registration topoheight is less or equal to the given topoheight. async fn is_account_registered_at_topoheight(&self, key: &PublicKey, topoheight: u64) -> Result; // Delete all registrations at a certain topoheight async fn delete_registrations_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - // Get registered accounts supporting pagination and filtering by topoheight - // Returned keys must have a nonce or a balance updated in the range given + // Get registered accounts supporting pagination and filtering by topoheight. + // Returned keys must have a nonce or a balance updated in the range given. async fn get_registered_keys(&self, maximum: usize, skip: usize, minimum_topoheight: u64, maximum_topoheight: u64) -> Result, BlockchainError>; } diff --git a/xelis_daemon/src/core/storage/providers/asset.rs b/xelis_daemon/src/core/storage/providers/asset.rs index 77b323b9..dd4390ea 100644 --- a/xelis_daemon/src/core/storage/providers/asset.rs +++ b/xelis_daemon/src/core/storage/providers/asset.rs @@ -21,11 +21,11 @@ pub trait AssetProvider { async fn get_asset(&self, hash: &Hash) -> Result; // Get all available assets - // TODO: replace with impl Iterator> when async trait methods are stable + // TODO: Replace with impl Iterator> when async trait methods are stable async fn get_assets(&self) -> Result, BlockchainError>; // Get a partial list of assets supporting pagination and filtering by topoheight - // TODO: replace with impl Iterator> when async trait methods are stable + // TODO: Replace with impl Iterator> when async trait methods are stable async fn get_partial_assets(&self, maximum: usize, skip: usize, minimum_topoheight: u64, maximum_topoheight: u64) -> Result, BlockchainError>; // Get chunked assets @@ -55,7 +55,7 @@ impl AssetProvider for SledStorage { self.load_from_disk(&self.assets, asset.as_bytes(), DiskContext::Asset) } - // we are forced to read from disk directly because cache may don't have all assets in memory + // We are forced to read from disk directly because cache may not have all assets in memory async fn get_assets(&self) -> Result, BlockchainError> { trace!("get assets"); @@ -71,7 +71,7 @@ impl AssetProvider for SledStorage { for el in self.assets.iter() { let (key, value) = el?; let data = AssetData::from_bytes(&value)?; - // check that we have a registered asset before the maximum topoheight + // Check that we have a registered asset before the maximum topoheight if data.get_topoheight() >= minimum_topoheight && data.get_topoheight() <= maximum_topoheight { if skip_count < skip { skip_count += 1; @@ -108,7 +108,7 @@ impl AssetProvider for SledStorage { }).collect() } - // count assets in storage + // Dount assets in storage async fn count_assets(&self) -> Result { trace!("count assets"); Ok(self.assets_count.load(Ordering::SeqCst)) diff --git a/xelis_daemon/src/core/storage/providers/balance.rs b/xelis_daemon/src/core/storage/providers/balance.rs index d32000b0..fbaef79c 100644 --- a/xelis_daemon/src/core/storage/providers/balance.rs +++ b/xelis_daemon/src/core/storage/providers/balance.rs @@ -34,12 +34,13 @@ pub trait BalanceProvider: AssetProvider + NetworkProvider { // Get the last topoheight that the account has a balance async fn get_last_topoheight_for_balance(&self, key: &PublicKey, asset: &Hash) -> Result; - // Get a new versioned balance of the account, this is based on the requested topoheight - // And is returning the versioned balance at maximum topoheight - // Versioned balance as the previous topoheight set also based on which height it is set - // So, if we are at topoheight 50 and we have a balance at topoheight 40, the previous topoheight will be 40 - // But also if we have a balance at topoheight 50, the previous topoheight will also be 50 - // This must be called only to create a new versioned balance for the next topoheight as it's keeping changes from the balance at same topo + // Retrieve a versioned balance of the account based on the requested topoheight. + // This returns the versioned balance at the maximum topoheight. + // The versioned balance reflects the previous topoheight based on its set height. + // For example, if at topoheight 50 we have a balance at topoheight 40, + // the previous topoheight will be 40. If a balance exists at topoheight 50, + // the previous topoheight will also be 50. + // This must be called only to create a new versioned balance for the next topoheight as it's keeping changes from the balance at same topo. async fn get_new_versioned_balance(&self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result; // Search the highest balance where we have a outgoing TX @@ -54,8 +55,8 @@ pub trait BalanceProvider: AssetProvider + NetworkProvider { // Set the last topoheight for this asset and key to the requested topoheight fn set_last_topoheight_for_balance(&mut self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError>; - // Set the last balance of the account, update the last topoheight pointer for asset and key - // This is same as `set_last_topoheight_for_balance` but will also update the versioned balance + // Set the last balance of the account, update the last topoheight pointer for asset and key. + // This is same as `set_last_topoheight_for_balance` but will also update the versioned balance. async fn set_last_balance_to(&mut self, key: &PublicKey, asset: &Hash, topoheight: u64, version: &VersionedBalance) -> Result<(), BlockchainError>; // Set the balance at specific topoheight for asset and key @@ -64,18 +65,18 @@ pub trait BalanceProvider: AssetProvider + NetworkProvider { // Delete the balance at specific topoheight for asset and key async fn delete_balance_at_topoheight(&mut self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result; - // Delete the last topoheight for asset and key - // This will only remove the pointer, not the version itself + // Delete the last topoheight for asset and key. + // This will only remove the pointer, not the version itself. fn delete_last_topoheight_for_balance(&mut self, key: &PublicKey, asset: &Hash) -> Result<(), BlockchainError>; - // Get the account summary for a key and asset on the specified topoheight range - // If None is returned, that means there was no changes that occured in the specified topoheight range + // Get the account summary for a key and asset on the specified topoheight range. + // If None is returned, that means there was no changes that occurred in the specified topoheight range. async fn get_account_summary_for(&self, key: &PublicKey, asset: &Hash, min_topoheight: u64, max_topoheight: u64) -> Result, BlockchainError>; } impl SledStorage { - // Generate a key including the key and its asset - // It is used to store/retrieve the highest topoheight version available + // Generate a key including the key and its asset. + // It is used to store/retrieve the highest topoheight version available. pub fn get_balance_key_for(&self, key: &PublicKey, asset: &Hash) -> [u8; 64] { trace!("get balance {} key for {}", asset, key.as_address(self.is_mainnet())); let mut bytes = [0; 64]; @@ -114,7 +115,7 @@ impl BalanceProvider for SledStorage { self.has_balance_internal(&self.get_balance_key_for(key, asset)).await } - // returns the highest topoheight where a balance changes happened + // Returns the highest topoheight where a balance changes happened async fn get_last_topoheight_for_balance(&self, key: &PublicKey, asset: &Hash) -> Result { trace!("get last topoheight for balance {} for {}", asset, key.as_address(self.is_mainnet())); let key = self.get_balance_key_for(key, asset); @@ -125,7 +126,7 @@ impl BalanceProvider for SledStorage { self.get_cacheable_data(&self.balances, &None, &key, DiskContext::LastTopoHeightForBalance).await } - // set in storage the new top topoheight (the most up-to-date versioned balance) + // Set in storage the new top topoheight (the most up-to-date versioned balance) fn set_last_topoheight_for_balance(&mut self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError> { trace!("set last topoheight to {} for balance {} for {}", topoheight, asset, key.as_address(self.is_mainnet())); let key = self.get_balance_key_for(key, asset); @@ -133,11 +134,11 @@ impl BalanceProvider for SledStorage { Ok(()) } - // get the balance at a specific topoheight - // if there is no balance change at this topoheight just return an error + // Get the balance at a specific topoheight. + // If there is no balance change at this topoheight just return an error. async fn has_balance_at_exact_topoheight(&self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result { trace!("has balance {} for {} at exact topoheight {}", asset, key.as_address(self.is_mainnet()), topoheight); - // check first that this address has balance, if no returns + // Check first that this address has balance, if no returns if !self.has_balance_for(key, asset).await? { return Ok(false) } @@ -146,11 +147,11 @@ impl BalanceProvider for SledStorage { self.contains_data::<_, ()>(&self.versioned_balances, &None, &key).await } - // get the balance at a specific topoheight - // if there is no balance change at this topoheight just return an error + // Get the balance at a specific topoheight. + // If there is no balance change at this topoheight just return an error. async fn get_balance_at_exact_topoheight(&self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result { trace!("get balance {} for {} at exact topoheight {}", asset, key.as_address(self.is_mainnet()), topoheight); - // check first that this address has balance, if no returns + // Check first that this address has balance, if no returns if !self.has_balance_at_exact_topoheight(key, asset, topoheight).await? { trace!("No balance {} found for {} at exact topoheight {}", asset, key.as_address(self.is_mainnet()), topoheight); return Err(BlockchainError::NoBalanceChanges(key.as_address(self.is_mainnet()), topoheight, asset.clone())) @@ -161,9 +162,9 @@ impl BalanceProvider for SledStorage { .map_err(|_| BlockchainError::NoBalanceChanges(key.as_address(self.is_mainnet()), topoheight, asset.clone())) } - // delete the last topoheight registered for this key - // it can happens when rewinding chain and we don't have any changes (no transaction in/out) for this key - // because all versioned balances got deleted + // Delete the last topoheight registered for this key. + // It can happens when rewinding chain and we don't have any changes (no transaction in/out) for this key + // because all versioned balances got deleted. fn delete_last_topoheight_for_balance(&mut self, key: &PublicKey, asset: &Hash) -> Result<(), BlockchainError> { trace!("delete last topoheight balance {} for {}", asset, key.as_address(self.is_mainnet())); let key = self.get_balance_key_for(key, asset); @@ -171,13 +172,13 @@ impl BalanceProvider for SledStorage { Ok(()) } - // get the latest balance at maximum specified topoheight - // when a DAG re-ordering happens, we need to select the right balance and not the last one - // returns None if the key has no balances for this asset - // Maximum topoheight is inclusive + // Get the latest balance at maximum specified topoheight. + // When a DAG re-ordering happens, we need to select the right balance and not the last one. + // Returns None if the key has no balances for this asset. + // Maximum topoheight is inclusive. async fn get_balance_at_maximum_topoheight(&self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result, BlockchainError> { trace!("get balance {} for {} at maximum topoheight {}", asset, key.as_address(self.is_mainnet()), topoheight); - // check first that this address has balance for this asset, if no returns None + // Check first that this address has balance for this asset, if no returns None if !self.has_balance_for(key, asset).await? { trace!("No balance {} found for {} at maximum topoheight {}", asset, key.as_address(self.is_mainnet()), topoheight); return Ok(None) @@ -191,13 +192,13 @@ impl BalanceProvider for SledStorage { let (topo, mut version) = self.get_last_balance(key, asset).await?; trace!("Last version balance {} for {} is at topoheight {}", asset, key.as_address(self.is_mainnet()), topo); - // if it's the latest and its under the maximum topoheight + // If it's the latest and its under the maximum topoheight if topo <= topoheight { trace!("Last version balance (valid) found at {} (maximum topoheight = {})", topo, topoheight); return Ok(Some((topo, version))) } - // otherwise, we have to go through the whole chain + // Otherwise, we have to go through the whole chain while let Some(previous) = version.get_previous_topoheight() { let previous_version = self.get_balance_at_exact_topoheight(key, asset, previous).await?; trace!("previous version {}", previous); @@ -226,9 +227,9 @@ impl BalanceProvider for SledStorage { None => return Ok(None) }; - // if we have an output balance, we can return it - // It is only marked as "usable" if its in the max topoheight range - // Otherwise we return None has we have no usable balance anymore for this range + // If we have an output balance, we can return it. + // It is only marked as "usable" if its in the max topoheight range. + // Otherwise we return None has we have no usable balance anymore for this range. if version.contains_output() { if topoheight <= max_topoheight { trace!("Output balance found at topoheight {}", topoheight); @@ -240,7 +241,7 @@ impl BalanceProvider for SledStorage { } } - // if we don't have an output balance, we need to search through the whole history + // If we don't have an output balance, we need to search through the whole history while let Some(previous) = version.get_previous_topoheight() { let previous_version = self.get_balance_at_exact_topoheight(key, asset, previous).await?; let is_in_range = previous <= max_topoheight; @@ -262,16 +263,16 @@ impl BalanceProvider for SledStorage { Ok(None) } - // delete versioned balances for this topoheight + // Delete versioned balances for this topoheight async fn delete_balance_at_topoheight(&mut self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result { trace!("delete balance {} for {} at topoheight {}", asset, key.as_address(self.is_mainnet()), topoheight); let disk_key = self.get_versioned_balance_key(key, asset, topoheight); self.delete_cacheable_data(&self.versioned_balances, &None, &disk_key).await.map_err(|_| BlockchainError::NoBalanceChanges(key.as_address(self.is_mainnet()), topoheight, asset.clone())) } - // returns a new versioned balance with already-set previous topoheight - // Topoheight is the new topoheight for the versioned balance, - // We create a new versioned balance by taking the previous version and setting it as previous topoheight + // Returns a new versioned balance with already-set previous topoheight. + // Topoheight is the new topoheight for the versioned balance. + // We create a new versioned balance by taking the previous version and setting it as previous topoheight. async fn get_new_versioned_balance(&self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result { trace!("get new versioned balance {} for {} at {}", asset, key.as_address(self.is_mainnet()), topoheight); @@ -282,7 +283,7 @@ impl BalanceProvider for SledStorage { version.prepare_new(Some(topo)); version }, - // if its the first balance, then we return a zero balance + // If its the first balance, then we return a zero balance None => VersionedBalance::zero() }; @@ -296,7 +297,7 @@ impl BalanceProvider for SledStorage { return Ok(Some((topo, version))) } - // TODO: maybe we can optimize this by storing the last output balance topoheight as pointer + // TODO: Maybe we can optimize this by storing the last output balance topoheight as pointer let mut previous = version.get_previous_topoheight(); while let Some(topo) = previous { let previous_version = self.get_balance_at_exact_topoheight(key, asset, topo).await?; @@ -311,7 +312,7 @@ impl BalanceProvider for SledStorage { Ok(None) } - // save a new versioned balance in storage and update the pointer + // Save a new versioned balance in storage and update the pointer async fn set_last_balance_to(&mut self, key: &PublicKey, asset: &Hash, topoheight: u64, version: &VersionedBalance) -> Result<(), BlockchainError> { trace!("set balance {} for {} to topoheight {}", asset, key.as_address(self.is_mainnet()), topoheight); self.set_balance_at_topoheight(asset, topoheight, key, &version).await?; @@ -319,7 +320,7 @@ impl BalanceProvider for SledStorage { Ok(()) } - // get the last version of balance and returns topoheight + // Get the last version of balance and returns topoheight async fn get_last_balance(&self, key: &PublicKey, asset: &Hash) -> Result<(u64, VersionedBalance), BlockchainError> { trace!("get last balance {} for {}", asset, key.as_address(self.is_mainnet())); if !self.has_balance_for(key, asset).await? { @@ -347,7 +348,7 @@ impl BalanceProvider for SledStorage { Ok(balances) } - // save the asset balance at specific topoheight + // Save the asset balance at specific topoheight async fn set_balance_at_topoheight(&mut self, asset: &Hash, topoheight: u64, key: &PublicKey, balance: &VersionedBalance) -> Result<(), BlockchainError> { trace!("set balance {} at topoheight {} for {}", asset, topoheight, key.as_address(self.is_mainnet())); let key = self.get_versioned_balance_key(key, asset, topoheight); @@ -358,7 +359,7 @@ impl BalanceProvider for SledStorage { async fn get_account_summary_for(&self, key: &PublicKey, asset: &Hash, min_topoheight: u64, max_topoheight: u64) -> Result, BlockchainError> { trace!("get account summary {} for {} at maximum topoheight {}", asset, key.as_address(self.is_mainnet()), max_topoheight); - // first search if we have a valid balance at the maximum topoheight + // First search if we have a valid balance at the maximum topoheight if let Some((topo, version)) = self.get_balance_at_maximum_topoheight(key, asset, max_topoheight).await? { if topo < min_topoheight { trace!("No changes found for {} above min topoheight {}", key.as_address(self.is_mainnet()), min_topoheight); diff --git a/xelis_daemon/src/core/storage/providers/block.rs b/xelis_daemon/src/core/storage/providers/block.rs index a321d82e..a69cb4ec 100644 --- a/xelis_daemon/src/core/storage/providers/block.rs +++ b/xelis_daemon/src/core/storage/providers/block.rs @@ -66,7 +66,7 @@ impl BlockProvider for SledStorage { // Store transactions let mut txs_count = 0; - for (hash, tx) in block.get_transactions().iter().zip(txs) { // first save all txs, then save block + for (hash, tx) in block.get_transactions().iter().zip(txs) { // First save all txs, then save block if !self.has_transaction(hash).await? { self.transactions.insert(hash.as_bytes(), tx.to_bytes())?; txs_count += 1; diff --git a/xelis_daemon/src/core/storage/providers/block_execution_order.rs b/xelis_daemon/src/core/storage/providers/block_execution_order.rs index 3ccb2855..8f1f9ee5 100644 --- a/xelis_daemon/src/core/storage/providers/block_execution_order.rs +++ b/xelis_daemon/src/core/storage/providers/block_execution_order.rs @@ -9,7 +9,7 @@ use crate::core::{ }; // This provider tracks the order in which blocks are added in the chain. -// This is independant of the DAG order and is used for debug purposes. +// This is independent of the DAG order and is used for debug purposes. #[async_trait] pub trait BlockExecutionOrderProvider { // Get the blocks execution order diff --git a/xelis_daemon/src/core/storage/providers/blocks_at_height.rs b/xelis_daemon/src/core/storage/providers/blocks_at_height.rs index 6bead7eb..49dea3a5 100644 --- a/xelis_daemon/src/core/storage/providers/blocks_at_height.rs +++ b/xelis_daemon/src/core/storage/providers/blocks_at_height.rs @@ -10,8 +10,8 @@ use crate::core::{ storage::SledStorage, }; -// This struct is used to store the blocks hashes at a specific height -// We use an IndexSet to store the hashes and maintains the order we processed them +// This struct is used to store the blocks hashes at a specific height. +// We use an IndexSet to store the hashes and maintains the order we processed them. struct OrderedHashes(IndexSet); #[async_trait] diff --git a/xelis_daemon/src/core/storage/providers/dag_order.rs b/xelis_daemon/src/core/storage/providers/dag_order.rs index 759bc5de..f8398783 100644 --- a/xelis_daemon/src/core/storage/providers/dag_order.rs +++ b/xelis_daemon/src/core/storage/providers/dag_order.rs @@ -25,7 +25,7 @@ impl DagOrderProvider for SledStorage { self.topo_by_hash.insert(hash.as_bytes(), topoheight.to_bytes())?; self.hash_at_topo.insert(topoheight.to_be_bytes(), hash.as_bytes())?; - // save in cache + // Save in cache if let Some(cache) = &self.topo_by_hash_cache { let mut topo = cache.lock().await; topo.put(hash.clone(), topoheight); diff --git a/xelis_daemon/src/core/storage/providers/difficulty.rs b/xelis_daemon/src/core/storage/providers/difficulty.rs index 98df3545..76874def 100644 --- a/xelis_daemon/src/core/storage/providers/difficulty.rs +++ b/xelis_daemon/src/core/storage/providers/difficulty.rs @@ -19,7 +19,7 @@ use crate::core::{ storage::SledStorage, }; -// this trait is useful for P2p to check itself the validty of a chain +// This trait is useful for P2p to check itself the validity of a chain #[async_trait] pub trait DifficultyProvider { // Get the block height using its hash @@ -52,7 +52,7 @@ pub trait DifficultyProvider { #[async_trait] impl DifficultyProvider for SledStorage { - // TODO optimize all these functions to read only what is necessary + // TODO: Optimize all these functions to read only what is necessary async fn get_height_for_block_hash(&self, hash: &Hash) -> Result { trace!("get height for block hash {}", hash); let block = self.get_block_header_by_hash(hash).await?; diff --git a/xelis_daemon/src/core/storage/providers/merkle.rs b/xelis_daemon/src/core/storage/providers/merkle.rs index f55138f0..32beb90d 100644 --- a/xelis_daemon/src/core/storage/providers/merkle.rs +++ b/xelis_daemon/src/core/storage/providers/merkle.rs @@ -3,11 +3,11 @@ use log::trace; use xelis_common::{crypto::Hash, serializer::Serializer}; use crate::core::{error::{BlockchainError, DiskContext}, storage::SledStorage}; -// Merkle Hash provider allow to give a Hash at a specific topoheight -// The merkle hash only contains account balances -// Because TXs and block rewards are applied on account balances -// Balances are the only thing that needs to be proven -// NOTE: We are based on the topoheight because of DAG reorgs as it's the main consensus +// Merkle Hash provider gives a hash at a specific topoheight. +// The Merkle hash includes only account balances. +// Transactions and block rewards affect account balances. +// Thus, balances are the primary data that needs to be proven. +// NOTE: We use topoheight due to DAG reorgs, as it is the main consensus metric. #[async_trait] pub trait MerkleHashProvider { // Get the merkle hash at a specific topoheight diff --git a/xelis_daemon/src/core/storage/providers/nonce.rs b/xelis_daemon/src/core/storage/providers/nonce.rs index 985ad430..80354d08 100644 --- a/xelis_daemon/src/core/storage/providers/nonce.rs +++ b/xelis_daemon/src/core/storage/providers/nonce.rs @@ -25,7 +25,7 @@ pub trait NonceProvider: BalanceProvider { // Check if the account has a nonce at a specific topoheight async fn has_nonce_at_exact_topoheight(&self, key: &PublicKey, topoheight: u64) -> Result; - // Get the last topoheigh that the account has a nonce + // Get the last topoheight that the account has a nonce async fn get_last_topoheight_for_nonce(&self, key: &PublicKey) -> Result; // Get the last nonce of the account, this is based on the last topoheight available @@ -37,22 +37,22 @@ pub trait NonceProvider: BalanceProvider { // Get the nonce under or equal topoheight requested for an account async fn get_nonce_at_maximum_topoheight(&self, key: &PublicKey, topoheight: u64) -> Result, BlockchainError>; - // Check if the account has a nonce updated in the range given - // It will also check balances if no nonce found + // Check if the account has a nonce updated in the range given. + // It will also check balances if no nonce found. async fn has_key_updated_in_range(&self, key: &PublicKey, minimum_topoheight: u64, maximum_topoheight: u64) -> Result; // Set the last topoheight that the account has a nonce changed async fn set_last_topoheight_for_nonce(&mut self, key: &PublicKey, topoheight: u64) -> Result<(), BlockchainError>; - // Delete the last topoheight that the account has a nonce - // This is only removing the pointer, not the version itself + // Delete the last topoheight that the account has a nonce. + // This is only removing the pointer, not the version itself. async fn delete_last_topoheight_for_nonce(&mut self, key: &PublicKey) -> Result<(), BlockchainError>; - // set the new nonce at exact topoheight for account - // This will do like `set_nonce_at_topoheight` but will also update the pointer + // Set the new nonce at the specified topoheight for the account. + // This updates the nonce similarly to `set_nonce_at_topoheight` but also adjusts the pointer. async fn set_last_nonce_to(&mut self, key: &PublicKey, topoheight: u64, nonce: &VersionedNonce) -> Result<(), BlockchainError>; - // set a new nonce at specific topoheight for account + // Set a new nonce at specific topoheight for account async fn set_nonce_at_topoheight(&mut self, key: &PublicKey, topoheight: u64, version: &VersionedNonce) -> Result<(), BlockchainError>; } @@ -131,23 +131,23 @@ impl NonceProvider for SledStorage { self.load_from_disk(&self.versioned_nonces, &key, DiskContext::NonceAtTopoHeight) } - // topoheight is inclusive bounds + // Topoheight is inclusive bounds async fn get_nonce_at_maximum_topoheight(&self, key: &PublicKey, topoheight: u64) -> Result, BlockchainError> { trace!("get nonce at maximum topoheight {} for {}", topoheight, key.as_address(self.is_mainnet())); - // check first that this address has nonce, if no returns None + // Check first that this address has nonce, if no returns None if !self.has_nonce(key).await? { return Ok(None) } let (topo, mut version) = self.get_last_nonce(key).await?; trace!("Last version of nonce for {} is at topoheight {}", key.as_address(self.is_mainnet()), topo); - // if it's the latest and its under the maximum topoheight + // If it's the latest and its under the maximum topoheight if topo <= topoheight { trace!("Last version nonce (valid) found at {} (maximum topoheight = {})", topo, topoheight); return Ok(Some((topo, version))) } - // otherwise, we have to go through the whole chain + // Otherwise, we have to go through the whole chain while let Some(previous) = version.get_previous_topoheight() { let previous_version = self.get_nonce_at_exact_topoheight(key, previous).await?; trace!("previous nonce version is at {}", previous); @@ -170,24 +170,24 @@ impl NonceProvider for SledStorage { async fn has_key_updated_in_range(&self, key: &PublicKey, minimum_topoheight: u64, maximum_topoheight: u64) -> Result { trace!("has key {} updated in range min topoheight {} and max topoheight {}", key.as_address(self.is_mainnet()), minimum_topoheight, maximum_topoheight); - // check first that this address has nonce, if no returns None + // Check first that this address has nonce, if no returns None if !self.has_nonce(key).await? { return Ok(false) } - // fast path check the latest nonce + // Fast path check the latest nonce let (topo, mut version) = self.get_last_nonce(key).await?; trace!("Last version of nonce for {} is at topoheight {}", key.as_address(self.is_mainnet()), topo); - // if it's the latest and its under the maximum topoheight and above minimum topoheight + // If it's the latest and its under the maximum topoheight and above minimum topoheight if topo >= minimum_topoheight && topo <= maximum_topoheight { trace!("Last version nonce (valid) found at {} (maximum topoheight = {})", topo, maximum_topoheight); return Ok(true) } - // otherwise, we have to go through the whole chain + // Otherwise, we have to go through the whole chain while let Some(previous) = version.get_previous_topoheight() { - // we are under the minimum topoheight, we can stop + // We are under the minimum topoheight, we can stop if previous < minimum_topoheight { break; } @@ -199,7 +199,7 @@ impl NonceProvider for SledStorage { return Ok(true) } - // security in case of DB corruption + // Security in case of DB corruption if let Some(value) = previous_version.get_previous_topoheight() { if value > previous { error!("FATAL ERROR: Previous topoheight ({}) should not be higher than current version ({})!", value, previous); @@ -209,11 +209,11 @@ impl NonceProvider for SledStorage { version = previous_version; } - // if we are here, we didn't find any nonce in the range - // it start to be more and more heavy... - // lets check on balances now... + // If we are here, we didn't find any nonce in the range + // it starts to be more and more heavy... + // Lets check on balances now... - // check that we have a VersionedBalance between range given + // Check that we have a VersionedBalance between range given for asset in self.get_assets_for(key).await? { let (topo, mut version) = self.get_last_balance(key, &asset).await?; if topo >= minimum_topoheight && topo <= maximum_topoheight { @@ -221,7 +221,7 @@ impl NonceProvider for SledStorage { } while let Some(previous) = version.get_previous_topoheight() { - // we are under the minimum topoheight, we can stop + // We are under the minimum topoheight, we can stop if previous < minimum_topoheight { break; } @@ -231,7 +231,7 @@ impl NonceProvider for SledStorage { return Ok(true) } - // security in case of DB corruption + // Security in case of DB corruption if let Some(value) = previous_version.get_previous_topoheight() { if value > previous { error!("FATAL ERROR: Previous topoheight for balance ({}) should not be higher than current version of balance ({})!", value, previous); diff --git a/xelis_daemon/src/core/storage/providers/pruned_topoheight.rs b/xelis_daemon/src/core/storage/providers/pruned_topoheight.rs index 21c5585a..867c663d 100644 --- a/xelis_daemon/src/core/storage/providers/pruned_topoheight.rs +++ b/xelis_daemon/src/core/storage/providers/pruned_topoheight.rs @@ -7,10 +7,10 @@ use crate::core::{ // This trait is used for pruning #[async_trait] pub trait PrunedTopoheightProvider { - // get the pruned topoheight + // Get the pruned topoheight async fn get_pruned_topoheight(&self) -> Result, BlockchainError>; - // set the pruned topoheight on disk + // Set the pruned topoheight on disk async fn set_pruned_topoheight(&mut self, pruned_topoheight: u64) -> Result<(), BlockchainError>; } diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 24e8eb3f..92006ba6 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -58,37 +58,37 @@ pub struct SledStorage { // Network used by the storage pub(super) network: Network, // All trees used to store data - // all txs stored on disk + // All TXs stored on disk pub(super) transactions: Tree, - // all txs executed in block + // All TXs executed in block pub(super) txs_executed: Tree, - // all blocks execution order + // All blocks execution order pub(super) blocks_execution_order: Tree, - // all blocks on disk + // All blocks on disk pub(super) blocks: Tree, - // all blocks height at specific height + // All blocks height at specific height pub(super) blocks_at_height: Tree, - // all extra data saved on disk + // All extra data saved on disk pub(super) extra: Tree, - // topo at hash on disk + // Topo at hash on disk pub(super) topo_by_hash: Tree, - // hash at topo height on disk + // Hash at topo height on disk pub(super) hash_at_topo: Tree, - // cumulative difficulty for each block hash on disk + // Cumulative difficulty for each block hash on disk pub(super) cumulative_difficulty: Tree, // Difficulty estimated covariance (P) pub(super) difficulty_covariance: Tree, - // keep tracks of all available assets on network + // Keep tracks of all available assets on network pub(super) assets: Tree, - // account nonces to prevent TX replay attack + // Account nonces to prevent TX replay attack pub(super) nonces: Tree, - // block reward for each block topoheight + // Block reward for each block topoheight pub(super) rewards: Tree, - // supply for each block topoheight + // Supply for each block topoheight pub(super) supply: Tree, - // difficulty for each block hash + // Difficulty for each block hash pub(super) difficulty: Tree, - // tree to store all blocks hashes where a tx was included in + // Tree to store all blocks hashes where a tx was included in pub(super) tx_blocks: Tree, // Tree that store all versioned nonces using hashed keys pub(super) versioned_nonces: Tree, @@ -102,10 +102,10 @@ pub struct SledStorage { pub(super) registrations: Tree, // Account registrations prefixed by their topoheight for easier deletion pub(super) registrations_prefixed: Tree, - // opened DB used for assets to create dynamic assets + // Opened DB used for assets to create dynamic assets db: sled::Db, - // all available caches + // All available caches // Transaction cache pub(super) transactions_cache: Option>>>, // Block header cache @@ -198,8 +198,8 @@ impl SledStorage { blocks_execution_count: AtomicU64::new(0) }; - // Verify that we are opening a DB on same network - // This prevent any corruption made by user + // Verify that we are opening a DB on same network. + // This prevent any corruption made by user. if storage.has_network()? { let storage_network = storage.load_from_disk::(&storage.extra, NETWORK, DiskContext::Network)?; if storage_network != network { @@ -464,7 +464,7 @@ impl Storage for SledStorage { async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result<(Hash, Arc, Vec<(Hash, Arc)>), BlockchainError> { trace!("Delete block at topoheight {topoheight}"); - // delete topoheight<->hash pointers + // Delete topoheight<->hash pointers let hash = self.delete_cacheable_data(&self.hash_at_topo, &self.hash_at_topo_cache, &topoheight).await?; trace!("Deleting block execution order"); @@ -507,8 +507,9 @@ impl Storage for SledStorage { self.remove_tx_executed(&tx_hash)?; } - // We have to check first as we may have already deleted it because of client protocol - // which allow multiple time the same txs in differents blocks + // Check if the TX still exists. It may have been deleted due to the client protocol, + // which allows the same TX to be included in different blocks. If found, delete it + // and add it to the list for further handling. if self.contains_data(&self.transactions, &self.transactions_cache, tx_hash).await? { trace!("Deleting TX {} in block {}", tx_hash, hash); let tx: Arc = self.delete_data(&self.transactions, &self.transactions_cache, tx_hash).await?; @@ -516,7 +517,7 @@ impl Storage for SledStorage { } } - // remove the block hash from the set, and delete the set if empty + // Remove the block hash from the set, and delete the set if empty if self.has_blocks_at_height(block.get_height()).await? { self.remove_block_hash_at_height(&hash, block.get_height()).await?; } @@ -551,8 +552,8 @@ impl Storage for SledStorage { if let Some(previous_topoheight) = versioned_balance.get_previous_topoheight() { self.balances.insert(&db_key, &previous_topoheight.to_be_bytes())?; } else { - // if there is no previous topoheight, it means that this is the first version - // so we can delete the balance + // If there is no previous topoheight, it means that this is the first version + // so we can delete the balance. self.balances.remove(&db_key)?; } } @@ -580,8 +581,8 @@ impl Storage for SledStorage { if let Some(previous_topoheight) = version.get_previous_topoheight() { self.set_last_topoheight_for_nonce(&key, previous_topoheight).await?; } else { - // if there is no previous topoheight, it means that this is the first version - // so we can delete the balance + // If there is no previous topoheight, it means that this is the first version + // so we can delete the balance. self.delete_last_topoheight_for_nonce(&key).await?; } } @@ -647,34 +648,34 @@ impl Storage for SledStorage { // The first versioned balance that is under the topoheight is bumped to topoheight async fn create_snapshot_balances_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError> { - // asset tree where PublicKey are stored with the highest balance topoheight in it + // Asset tree where PublicKey are stored with the highest balance topoheight in it for el in self.balances.iter() { let (key_bytes, value) = el?; let key = PublicKey::from_bytes(&key_bytes[0..32])?; let asset = Hash::from_bytes(&key_bytes[32..64])?; let highest_balance_topoheight = u64::from_bytes(&value)?; - // retrieve the highest versioned balance + // Retrieve the highest versioned balance let mut versioned_balance = self.get_balance_at_exact_topoheight(&key, &asset, highest_balance_topoheight).await?; - // if the highest topoheight for this account is less than the snapshot topoheight - // update it to the topoheight - // otherwise, delete the previous topoheight in VersionedBalance which is under topoheight + // If the highest topoheight for this account is less than the snapshot topoheight, + // update it to the topoheight. + // Otherwise, delete the previous topoheight in VersionedBalance which is under topoheight. if highest_balance_topoheight <= topoheight { - // save the new highest topoheight + // Save the new highest topoheight self.balances.insert(&key_bytes, &topoheight.to_be_bytes())?; - // remove the previous topoheight + // Remove the previous topoheight versioned_balance.set_previous_topoheight(None); - // save it + // Save it let key = self.get_versioned_balance_key(&key, &asset, topoheight); self.versioned_balances.insert(key, versioned_balance.to_bytes())?; } else { - // find the first VersionedBalance which is under topoheight + // Find the first VersionedBalance which is under topoheight let mut current_version_topoheight = highest_balance_topoheight; while let Some(previous_topoheight) = versioned_balance.get_previous_topoheight() { if previous_topoheight <= topoheight { - // update the current versioned balance that refer to the pruned versioned balance + // Update the current versioned balance that refer to the pruned versioned balance { versioned_balance.set_previous_topoheight(Some(topoheight)); let key = self.get_versioned_balance_key(&key, &asset, current_version_topoheight); @@ -691,7 +692,7 @@ impl Storage for SledStorage { break; } - // keep searching + // Keep searching versioned_balance = self.get_balance_at_exact_topoheight(&key, &asset, previous_topoheight).await?; current_version_topoheight = previous_topoheight; } @@ -703,34 +704,34 @@ impl Storage for SledStorage { // The first versioned balance that is under the topoheight is bumped to topoheight async fn create_snapshot_nonces_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError> { - // tree where VersionedNonce are stored - // tree where PublicKey are stored with the highest noce topoheight in it + // Tree where VersionedNonce are stored. + // Tree where PublicKey are stored with the highest nonce topoheight in it. for el in self.nonces.iter() { let (key_bytes, value) = el?; let key = PublicKey::from_bytes(&key_bytes)?; let highest_topoheight = u64::from_bytes(&value)?; - // retrieve the highest versioned nonce + // Retrieve the highest versioned nonce let mut versioned_nonce = self.get_nonce_at_exact_topoheight(&key, highest_topoheight).await?; - // if the highest topoheight for this account is less than the snapshot topoheight - // update it to the topoheight - // otherwise, delete the previous topoheight in VersionedNonce which is under topoheight + // If the highest topoheight for this account is less than the snapshot topoheight, + // update it to the topoheight. + // Otherwise, delete the previous topoheight in VersionedNonce which is under topoheight. if highest_topoheight <= topoheight { - // save the new highest topoheight + // Save the new highest topoheight self.nonces.insert(&key_bytes, &topoheight.to_be_bytes())?; - // remove the previous topoheight + // Remove the previous topoheight versioned_nonce.set_previous_topoheight(None); - // save it + // Save it let key = self.get_versioned_nonce_key(&key, topoheight); self.versioned_nonces.insert(key, versioned_nonce.to_bytes())?; } else { - // find the first VersionedBalance which is under topoheight + // Find the first VersionedBalance which is under topoheight let mut current_version_topoheight = highest_topoheight; while let Some(previous_topoheight) = versioned_nonce.get_previous_topoheight() { if previous_topoheight <= topoheight { - // update the current versioned balance that refer to the pruned versioned balance + // Update the current versioned balance that refer to the pruned versioned balance { versioned_nonce.set_previous_topoheight(Some(topoheight)); let key = self.get_versioned_nonce_key(&key, current_version_topoheight); @@ -747,7 +748,7 @@ impl Storage for SledStorage { break; } - // keep searching + // Keep searching versioned_nonce = self.get_nonce_at_exact_topoheight(&key, previous_topoheight).await?; current_version_topoheight = previous_topoheight; } @@ -759,13 +760,13 @@ impl Storage for SledStorage { async fn create_snapshot_registrations_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError> { trace!("create snapshot registrations at topoheight {}", topoheight); - // tree where PublicKey are stored with the registration topoheight in it + // Tree where PublicKey are stored with the registration topoheight in it let mut buf = [0u8; 40]; for el in self.registrations.iter() { let (key, value) = el?; let registration_topo = u64::from_bytes(&value)?; - // if the registration topoheight for this account is less than the snapshot topoheight + // If the registration topoheight for this account is less than the snapshot topoheight, // update it to the topoheight if registration_topo <= topoheight { // Delete the prefixed registration @@ -773,7 +774,7 @@ impl Storage for SledStorage { buf[8..40].copy_from_slice(&key); self.registrations_prefixed.remove(&buf)?; - // save the new registration topoheight + // Save the new registration topoheight self.registrations.insert(&key, &topoheight.to_be_bytes())?; // Overwrite with the new topoheight @@ -787,11 +788,11 @@ impl Storage for SledStorage { async fn pop_blocks(&mut self, mut height: u64, mut topoheight: u64, count: u64, stable_topo_height: u64) -> Result<(u64, u64, Vec<(Hash, Arc)>), BlockchainError> { trace!("pop blocks from height: {}, topoheight: {}, count: {}", height, topoheight, count); - if topoheight < count as u64 { // also prevent removing genesis block + if topoheight < count as u64 { // Also prevent removing genesis block return Err(BlockchainError::NotEnoughBlocks); } - // search the lowest topo height available based on count + 1 + // Search the lowest topo height available based on count + 1 // (last lowest topo height accepted) let mut lowest_topo = topoheight - count; trace!("Lowest topoheight for rewind: {}", lowest_topo); @@ -805,7 +806,7 @@ impl Storage for SledStorage { } } - // new TIPS for chain + // New TIPS for chain let mut tips = self.get_tips().await?; // Delete all orphaned blocks tips @@ -816,11 +817,11 @@ impl Storage for SledStorage { } } - // all txs to be rewinded + // All txs to be rewound let mut txs = Vec::new(); let mut done = 0; 'main: loop { - // stop rewinding if its genesis block or if we reached the lowest topo + // Stop rewinding if its genesis block or if we reached the lowest topo if topoheight <= lowest_topo || topoheight <= stable_topo_height || height == 0 { // prevent removing genesis block trace!("Done: {done}, count: {count}, height: {height}, topoheight: {topoheight}, lowest topo: {lowest_topo}, stable topo: {stable_topo_height}"); break 'main; @@ -831,7 +832,7 @@ impl Storage for SledStorage { trace!("Block {} at topoheight {} deleted", hash, topoheight); txs.extend(block_txs); - // generate new tips + // Generate new tips trace!("Removing {} from {} tips", hash, tips.len()); tips.remove(&hash); @@ -855,7 +856,7 @@ impl Storage for SledStorage { } topoheight -= 1; - // height of old block become new height + // Height of old block become new height if block.get_height() < height { height = block.get_height(); } @@ -869,7 +870,7 @@ impl Storage for SledStorage { // All deleted assets let mut deleted_assets = HashSet::new(); - // clean all assets + // Clean all assets for el in self.assets.iter() { let (key, value) = el.context("error on asset iterator")?; let asset = Hash::from_bytes(&key)?; @@ -881,7 +882,7 @@ impl Storage for SledStorage { // Delete it from registered assets self.assets.remove(&key).context(format!("Error while deleting asset {asset} from registered assets"))?; - // drop the tree for this asset + // Drop the tree for this asset self.db.drop_tree(key).context(format!("error on dropping asset {asset} tree"))?; deleted_assets.insert(asset); @@ -889,8 +890,8 @@ impl Storage for SledStorage { } trace!("Cleaning nonces"); - // now let's process nonces versions - // we set the new highest topoheight to the highest found under the new topoheight + // Now let's process nonces versions. + // We set the new highest topoheight to the highest found under the new topoheight. for el in self.nonces.iter() { let (key, value) = el?; let highest_topoheight = u64::from_bytes(&value)?; @@ -905,14 +906,14 @@ impl Storage for SledStorage { self.store_accounts_count(self.count_accounts().await? - 1)?; } - // find the first version which is under topoheight + // Find the first version which is under topoheight let pkey = PublicKey::from_bytes(&key)?; let mut version = self.get_nonce_at_exact_topoheight(&pkey, highest_topoheight).await .context(format!("Error while retrieving nonce at exact topoheight {highest_topoheight}"))?; while let Some(previous_topoheight) = version.get_previous_topoheight() { if previous_topoheight <= topoheight { - // we find the new highest version which is under new topoheight + // We find the new highest version which is under new topoheight trace!("New highest version nonce for {} is at topoheight {}", pkey.as_address(self.is_mainnet()), previous_topoheight); if self.nonces.insert(&key, &previous_topoheight.to_be_bytes())?.is_none() { self.store_accounts_count(self.count_accounts().await? + 1)?; @@ -920,27 +921,27 @@ impl Storage for SledStorage { break; } - // keep searching + // Keep searching version = self.get_nonce_at_exact_topoheight(&pkey, previous_topoheight).await .context(format!("Error while searching nonce at exact topoheight"))?; } } else { - // nothing to do as its under the rewinded topoheight + // Nothing to do as its under the rewound topoheight } } trace!("Cleaning balances"); - // do balances too + // Do balances too for el in self.balances.iter() { let (key, value) = el?; let asset = Hash::from_bytes(&key[32..64])?; let mut delete = false; - // if the asset is not deleted, we can process it + // If the asset is not deleted, we can process it if !deleted_assets.contains(&asset) { let highest_topoheight = u64::from_bytes(&value)?; if highest_topoheight > topoheight && highest_topoheight >= pruned_topoheight { - // find the first version which is under topoheight + // Find the first version which is under topoheight let pkey = PublicKey::from_bytes(&key[0..32])?; trace!("Highest topoheight for balance {} is {}, above {}", pkey.as_address(self.is_mainnet()), highest_topoheight, topoheight); @@ -952,14 +953,14 @@ impl Storage for SledStorage { while let Some(previous_topoheight) = version.get_previous_topoheight() { if previous_topoheight <= topoheight { - // we find the new highest version which is under new topoheight + // We find the new highest version which is under new topoheight trace!("New highest version balance for {} is at topoheight {} with asset {}", pkey.as_address(self.is_mainnet()), previous_topoheight, asset); self.balances.insert(&key, &previous_topoheight.to_be_bytes())?; delete = false; break; } - // keep searching + // Keep searching version = self.get_balance_at_exact_topoheight(&pkey, &asset, previous_topoheight).await?; } } @@ -972,11 +973,11 @@ impl Storage for SledStorage { } } - warn!("Blocks rewinded: {}, new topoheight: {}, new height: {}", done, topoheight, height); + warn!("Blocks rewound: {}, new topoheight: {}, new height: {}", done, topoheight, height); trace!("Cleaning versioned balances and nonces"); - // now delete all versioned balances and nonces above the new topoheight + // Now delete all versioned balances and nonces above the new topoheight self.delete_versioned_balances_above_topoheight(topoheight).await?; self.delete_versioned_nonces_above_topoheight(topoheight).await?; // Delete also registrations @@ -987,7 +988,7 @@ impl Storage for SledStorage { self.clear_caches().await?; trace!("Storing new pointers"); - // store the new tips and topo topoheight + // Store the new tips and topo topoheight self.store_tips(&tips)?; self.set_top_topoheight(topoheight)?; self.set_top_height(height)?; diff --git a/xelis_daemon/src/core/tx_selector.rs b/xelis_daemon/src/core/tx_selector.rs index 83850634..5b5ccac4 100644 --- a/xelis_daemon/src/core/tx_selector.rs +++ b/xelis_daemon/src/core/tx_selector.rs @@ -16,7 +16,7 @@ use xelis_common::{ } }; -// this struct is used to store transaction with its hash and its size in bytes +// This struct is used to store transaction with its hash and its size in bytes pub struct TxSelectorEntry<'a> { // Hash of the transaction pub hash: &'a Arc, @@ -34,9 +34,9 @@ impl PartialEq for TxSelectorEntry<'_> { impl Eq for TxSelectorEntry<'_> {} -// this struct is used to store transactions in a queue -// and to order them by fees -// Each Transactions is for a specific sender +// This struct is used to store transactions in a queue +// and to order them by fees. +// Each Transactions is for a specific sender. #[derive(PartialEq, Eq)] struct Transactions<'a>(VecDeque>); @@ -52,9 +52,9 @@ impl Ord for Transactions<'_> { } } -// TX selector is used to select transactions from the mempool -// It create sub groups of transactions by sender and order them by nonces -// It joins all sub groups in a queue that is ordered by fees +// TX selector is used to select transactions from the mempool. +// It create sub groups of transactions by sender and order them by nonces. +// It joins all sub groups in a queue that is ordered by fees. pub struct TxSelector<'a> { queue: BinaryHeap> } @@ -67,7 +67,7 @@ impl<'a> TxSelector<'a> { { let mut queue = BinaryHeap::new(); - // push every group to the queue + // Push every group to the queue for group in groups { queue.push(Transactions(VecDeque::from(group))); } @@ -112,12 +112,12 @@ impl<'a> TxSelector<'a> { // Get the next transaction with the highest fee pub fn next(&mut self) -> Option> { - // get the group with the highest fee + // Get the group with the highest fee let mut group = self.queue.pop()?; - // get the entry with the highest fee from this group + // Get the entry with the highest fee from this group let entry = group.0.pop_front()?; - // if its not empty, push it back to the queue + // If its not empty, push it back to the queue if !group.0.is_empty() { self.queue.push(group); } diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index 0b609f72..1037ab92 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -100,15 +100,15 @@ pub struct NodeConfig { /// Disable the log file #[clap(long)] disable_file_logging: bool, - /// Disable the log filename date based + /// Disable the log filename date based. /// If disabled, the log file will be named xelis-daemon.log instead of YYYY-MM-DD.xelis-daemon.log #[clap(long)] disable_file_log_date_based: bool, /// Disable the usage of colors in log #[clap(long)] disable_log_color: bool, - /// Disable terminal interactive mode - /// You will not be able to write CLI commands in it or to have an updated prompt + /// Disable terminal interactive mode. + /// You will not be able to write CLI commands in it or to have an updated prompt. #[clap(long)] disable_interactive_mode: bool, /// Log filename @@ -208,7 +208,7 @@ async fn run_prompt(prompt: ShareablePrompt, blockchain: Arc))))?; command_manager.add_command(Command::new("swap_blocks_executions_positions", "Swap the position of two blocks executions", CommandHandler::Async(async_handler!(swap_blocks_executions_positions::))))?; - // Don't keep the lock for ever + // Don't keep the lock forever let (p2p, getwork) = { let p2p: Option>> = match blockchain.get_p2p().read().await.as_ref() { Some(p2p) => Some(p2p.clone()), @@ -364,8 +364,8 @@ async fn verify_chain(manager: &CommandManager, mut args: ArgumentMa } block_reward } else { - // We are too near from the pruned topoheight, as we don't know previous blocks we can't verify if block was side block or not for rewards - // Let's trust its stored reward + // We are too near from the pruned topoheight, as we don't know previous blocks we can't verify if block was side block or not for rewards. + // Let's trust its stored reward. storage.get_block_reward_at_topo_height(topo).context("Error while retrieving block reward for pruned topo")? }; @@ -520,7 +520,7 @@ async fn list_assets(manager: &CommandManager, _: ArgumentManager) - async fn show_balance(manager: &CommandManager, mut arguments: ArgumentManager) -> Result<(), CommandError> { let prompt = manager.get_prompt(); - // read address + // Read address let str_address = prompt.read_input( prompt.colorize_str(Color::Green, "Address: "), false @@ -605,7 +605,7 @@ async fn pop_blocks(manager: &CommandManager, mut arguments: Argumen info!("Trying to pop {} blocks from chain...", amount); let topoheight = blockchain.rewind_chain(amount, false).await.context("Error while rewinding chain")?; - info!("Chain as been rewinded until topoheight {}", topoheight); + info!("Chain as been rewound until topoheight {}", topoheight); Ok(()) } @@ -621,7 +621,7 @@ async fn clear_mempool(manager: &CommandManager, _: ArgumentManager) Ok(()) } -// add manually a TX in mempool +// Add manually a TX in mempool async fn add_tx(manager: &CommandManager, mut arguments: ArgumentManager) -> Result<(), CommandError> { let hex = arguments.get_value("hex")?.to_string_value()?; let broadcast = if arguments.has_argument("broadcast") { @@ -898,7 +898,7 @@ async fn mine_block(manager: &CommandManager, mut arguments: Argumen let context = manager.get_context().lock()?; let blockchain: &Arc> = context.get()?; - // Prevent trying to mine a block on mainnet through this as it will keep busy the node for nothing + // Prevent attempting to mine a block on mainnet, as it will unnecessarily occupy the node if *blockchain.get_network() == Network::Mainnet { manager.error("This command is not allowed on mainnet"); return Ok(()) diff --git a/xelis_daemon/src/p2p/chain_validator.rs b/xelis_daemon/src/p2p/chain_validator.rs index d4f02016..124fe694 100644 --- a/xelis_daemon/src/p2p/chain_validator.rs +++ b/xelis_daemon/src/p2p/chain_validator.rs @@ -37,18 +37,18 @@ struct BlockData { p: VarUint } -// Chain validator is used to validate the blocks received from the network -// We store the blocks in topological order and we verify the proof of work validity -// This is doing only minimal checks and valid chain order based on topoheight and difficulty +// Chain validator is used to validate the blocks received from the network. +// We store the blocks in topological order and we verify the proof of work validity. +// This involves minimal checks, ensuring valid chain order based on topoheight and difficulty. pub struct ChainValidator<'a, S: Storage> { - // store all blocks data in topological order + // Store all blocks data in topological order blocks: IndexMap, - // store all blocks hashes at a specific height + // Store all blocks hashes at a specific height blocks_at_height: IndexMap>, // Blockchain reference used to verify current chain state blockchain: &'a Blockchain, - // This is used to compute the expected topoheight of each new block - // It must be 1 topoheight above the common point + // This is used to compute the expected topoheight of each new block. + // It must be 1 topoheight above the common point. starting_topoheight: u64, } @@ -63,8 +63,8 @@ impl<'a, S: Storage> ChainValidator<'a, S> { } } - // Check if the chain validator has a higher cumulative difficulty than our blockchain - // This is used to determine if we should switch to the new chain by popping blocks or not + // Check if the chain validator has a higher cumulative difficulty than our blockchain. + // This is used to determine if we should switch to the new chain by popping blocks or not. pub async fn has_higher_cumulative_difficulty(&self) -> Result { let new_cumulative_difficulty = self.get_chain_cumulative_difficulty().ok_or(BlockchainError::NotEnoughBlocks)?; @@ -78,15 +78,15 @@ impl<'a, S: Storage> ChainValidator<'a, S> { Ok(*new_cumulative_difficulty > current_cumulative_difficulty) } - // Retrieve the cumulative difficulty of the chain validator - // It is the cumulative difficulty of the last block added + // Retrieve the cumulative difficulty of the chain validator. + // It is the cumulative difficulty of the last block added. pub fn get_chain_cumulative_difficulty(&self) -> Option<&CumulativeDifficulty> { let (_, data) = self.blocks.last()?; data.cumulative_difficulty.as_ref() } - // validate the basic chain structure - // We expect that the block added is the next block ordered by topoheight + // Validate the basic chain structure. + // We expect that the block added is the next block ordered by topoheight. pub async fn insert_block(&mut self, hash: Hash, header: BlockHeader) -> Result<(), BlockchainError> { trace!("Inserting block {} into chain validator", hash); @@ -117,13 +117,13 @@ impl<'a, S: Storage> ChainValidator<'a, S> { let tips = header.get_tips(); let tips_count = tips.len(); - // verify tips count + // Verify tips count if tips_count == 0 || tips_count > TIPS_LIMIT { debug!("Block {} contains {} tips while only {} is accepted", hash, tips_count, TIPS_LIMIT); return Err(BlockchainError::InvalidTipsCount(hash, tips_count)) } - // verify that we have already all its tips + // Verify that we have already all its tips { for tip in tips { trace!("Checking tip {} for block {}", tip, hash); @@ -154,8 +154,8 @@ impl<'a, S: Storage> ChainValidator<'a, S> { trace!("Common base: {} at height {} and hash {}", base, base_height, hash); - // Store the block in both maps - // One is for blocks at height and the other is for the block data + // Store the block in both maps. + // One is for blocks at height and the other is for the block data. self.blocks_at_height.entry(header.get_height()).or_insert_with(IndexSet::new).insert(hash.clone()); self.blocks.insert(hash.clone(), BlockData { header: Arc::new(header), difficulty, cumulative_difficulty: None, p }); diff --git a/xelis_daemon/src/p2p/connection.rs b/xelis_daemon/src/p2p/connection.rs index 987824df..b4f02eec 100644 --- a/xelis_daemon/src/p2p/connection.rs +++ b/xelis_daemon/src/p2p/connection.rs @@ -39,10 +39,10 @@ type P2pResult = Result; #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum State { - Pending, // connection is new, no handshake received - KeyExchange, // start exchanging keys - Handshake, // handshake received, not checked - Success // connection is ready + Pending, // Connection is new, no handshake received + KeyExchange, // Start exchanging keys + Handshake, // Handshake received, not checked + Success // Connection is ready } pub struct Connection { @@ -50,21 +50,21 @@ pub struct Connection { out: bool, // State of the connection state: State, - // write to stream + // Write to stream write: Mutex, - // read from stream + // Read from stream read: Mutex, // TCP Address addr: SocketAddr, - // total bytes read + // Total bytes read bytes_in: AtomicUsize, - // total bytes sent + // Total bytes sent bytes_out: AtomicUsize, - // total bytes sent using current key + // Total bytes sent using current key bytes_out_key: AtomicUsize, - // when the connection was established + // When the connection was established connected_on: TimestampSeconds, - // if Connection#close() is called, close is set to true + // If Connection#close() is called, close is set to true closed: AtomicBool, // How many key rotation we got rotate_key_in: AtomicUsize, @@ -97,17 +97,15 @@ impl Connection { } } - // Do a key exchange with the peer - // If we are the client, we send our key first in plaintext - // We wait for the peer to send its key - // If we are the server, we send back our key - // NOTE: This doesn't prevent any MITM at this point - // Because a MITM could intercept the key and send its own key to the peer - // and play the role as a proxy. - // Afaik, there is no way to have a decentralized way to prevent MITM without trusting a third party - // (That's what TLS/SSL does with the CA, but it's not decentralized and it's not trustless) - // A potential idea would be to hardcode seed nodes keys, - // and each nodes share the key of other along the socket address + // Perform a key exchange with the peer. + // If we're the client, we send our key in plaintext first. + // We then wait for the peer to send its key. + // If we're the server, we respond with our key. + // NOTE: This does not prevent MITM attacks. + // A MITM could intercept the key and send its own key to the peer, acting as a proxy. + // Currently, there is no decentralized method to prevent MITM without a third party. + // TLS/SSL uses CA certificates for this purpose, but it's not decentralized or trustless. + // One idea is to hardcode seed node keys and have nodes share keys with each other along with their socket addresses. pub async fn exchange_keys(&mut self, buffer: &mut [u8]) -> P2pResult<()> { trace!("Exchanging keys with {}", self.addr); @@ -182,11 +180,11 @@ impl Connection { Ok(packet) } - // Rotate the peer symetric key - // We update our state - // Because we use TCP and packets are read/executed in sequential order, - // We don't need to send a ACK to the peer to confirm the key rotation - // as all next packets will be encrypted with the new key and we have updated it before + // Rotate the peer's symmetric key. + // Update our state accordingly. + // Since we're using TCP and packets are processed in sequential order, + // there's no need to send an ACK to the peer for the key rotation. + // All subsequent packets will be encrypted with the new key, which we've updated beforehand. pub async fn rotate_peer_key(&self, key: EncryptionKey) -> P2pResult<()> { trace!("Rotating encryption key of peer {}", self.get_address()); self.encryption.rotate_key(key, false).await?; @@ -195,9 +193,9 @@ impl Connection { Ok(()) } - // This function will send the packet to the peer without flushing the stream - // Packet length is ALWAYS sent in raw (not encrypted) - // Otherwise, we can't know how much bytes to read for each ciphertext/packet + // This function will send the packet to the peer without flushing the stream. + // Packet length is ALWAYS sent in raw (not encrypted). + // Otherwise, we can't know how much bytes to read for each ciphertext/packet. async fn send_packet_bytes_internal(&self, stream: &mut OwnedWriteHalf, packet: &[u8]) -> P2pResult<()> { let packet_len = packet.len() as u32; stream.write_all(&packet_len.to_be_bytes()).await?; @@ -224,8 +222,8 @@ impl Connection { } } - // Send bytes to the peer - // Encrypt must be used all time starting handshake + // Send bytes to the peer. + // Encryption must be used at all times starting from the handshake. async fn send_bytes_internal(&self, packet: &[u8]) -> P2pResult<()> { trace!("Sending {} bytes to {}", packet.len(), self.get_address()); let mut stream = self.write.lock().await; @@ -286,15 +284,15 @@ impl Connection { Ok(packet) } - // Read a packet and deserialize it - // This will read the packet size and then read the packet bytes + // Read a packet and deserialize it. + // This will read the packet size and then read the packet bytes. pub async fn read_packet(&self, buf: &mut [u8], max_size: u32) -> P2pResult> { let bytes = self.read_packet_bytes(buf, max_size).await?; self.read_packet_from_bytes(&bytes).await } - // Read the packet size, this is always sent in raw (not encrypted) - // And packet size must be a u32 in big endian + // Read the packet size, this is always sent in raw (not encrypted). + // Packet size must be a u32 in big endian. async fn read_packet_size(&self, stream: &mut OwnedReadHalf, buf: &mut [u8], max_usize: u32) -> P2pResult { let read = self.read_bytes_from_stream(stream, &mut buf[0..4]).await?; if read != 4 { @@ -317,8 +315,8 @@ impl Connection { Ok(size) } - // Read all bytes until the the buffer is full with the requested size - // This support fragmented packets and encryption + // Read all bytes until the the buffer is full with the requested size. + // This support fragmented packets and encryption. async fn read_all_bytes(&self, stream: &mut OwnedReadHalf, buf: &mut [u8], mut left: u32) -> P2pResult> { let buf_size = buf.len() as u32; let mut bytes = Vec::new(); @@ -342,9 +340,9 @@ impl Connection { } } - // this function will wait until something is sent to the socket if it's in blocking mode - // this return the size of data read & set in the buffer. - // used to only lock one time the stream and read on it + // This function waits until data is sent to the socket if it's in blocking mode. + // It returns the size of the data read and set in the buffer. + // This ensures the stream is locked only once and data is read efficiently. async fn read_bytes_from_stream_internal(&self, stream: &mut OwnedReadHalf, buf: &mut [u8]) -> P2pResult { let mut read = 0; let buf_len = buf.len(); @@ -363,10 +361,10 @@ impl Connection { Ok(read) } - // this function will wait until something is sent to the socket if it's in blocking mode - // this return the size of data read & set in the buffer. - // used to only lock one time the stream and read on it - // on any error, it will considered as disconnected + // This function waits until something is sent to the socket if it's in blocking mode. + // It returns the size of the data read and set in the buffer. + // This ensures the stream is locked only once and data is read efficiently. + // Any error encountered will be treated as a disconnection. async fn read_bytes_from_stream(&self, stream: &mut OwnedReadHalf, buf: &mut [u8]) -> P2pResult { match self.read_bytes_from_stream_internal(stream, buf).await { Ok(read) => Ok(read), @@ -378,8 +376,8 @@ impl Connection { } } - // Close internal close directly the stream - // This must be called only from the write connection task + // Close internal close directly the stream. + // This must be called only from the write connection task. pub async fn close(&self) -> P2pResult<()> { trace!("Closing internal connection with {}", self.addr); if self.closed.swap(true, Ordering::SeqCst) { @@ -387,7 +385,7 @@ impl Connection { return Ok(()); } - // sometimes the peer is not removed on other peer side + // Occasionally, the peer may not be removed on the other peer's side. let mut stream = self.write.lock().await; timeout(Duration::from_secs(PEER_TIMEOUT_DISCONNECT), stream.shutdown()).await??; diff --git a/xelis_daemon/src/p2p/encryption.rs b/xelis_daemon/src/p2p/encryption.rs index 7f048e3a..506dd7b3 100644 --- a/xelis_daemon/src/p2p/encryption.rs +++ b/xelis_daemon/src/p2p/encryption.rs @@ -5,17 +5,17 @@ use rand::rngs::OsRng; use thiserror::Error; use tokio::sync::Mutex; -// This symetric key is used to encrypt/decrypt the data +// This symmetric key is used to encrypt/decrypt the data pub type EncryptionKey = [u8; 32]; -// Each peer has its own key and can rotate as he want -// The nonce is incremented by one on each encrypt/decrypt -// This allows us to not send the generated nonce and reduce bandwidth usage -// Using a 64 bits nonce is enough for our use case -// We use the first 8 bytes to store the nonce and the last 4 bytes are set to 0 -// Also, we rotate the keys every 1 GB of data to avoid any potential attack -// We would reach 1 GB much before the nonce overflow -// This is a simple implementation and we can improve it later +// Each peer has its own key and can rotate it as needed. +// The nonce increments by one with each encryption/decryption. +// This reduces bandwidth usage by eliminating the need to send the nonce. +// A 64-bit nonce is sufficient for our use case. +// The first 8 bytes store the nonce, and the last 4 bytes are set to 0. +// Keys are rotated every 1 GB of data to mitigate potential attacks. +// We will reach 1 GB well before the nonce overflows. +// This is a basic implementation and can be improved in the future. struct CipherState { cipher: ChaCha20Poly1305, @@ -80,12 +80,12 @@ impl Encryption { ChaCha20Poly1305::generate_key(&mut OsRng).into() } - // Encrypt a packet using the shared symetric key + // Encrypt a packet using the shared symmetric key pub async fn encrypt_packet(&self, input: &[u8]) -> Result, EncryptionError> { let mut lock = self.our_cipher.lock().await; let cipher_state = lock.as_mut().ok_or(EncryptionError::WriteNotReady)?; - // fill our buffer + // Fill our buffer cipher_state.nonce_buffer[0..8].copy_from_slice(&cipher_state.nonce.to_be_bytes()); // Encrypt the packet @@ -98,12 +98,12 @@ impl Encryption { Ok(res) } - // Decrypt a packet using the shared symetric key + // Decrypt a packet using the shared symmetric key pub async fn decrypt_packet(&self, buf: &[u8]) -> Result, EncryptionError> { let mut lock = self.peer_cipher.lock().await; let cipher_state = lock.as_mut().ok_or(EncryptionError::WriteNotReady)?; - // fill our buffer + // Fill our buffer cipher_state.nonce_buffer[0..8].copy_from_slice(&cipher_state.nonce.to_be_bytes()); // Decrypt packet diff --git a/xelis_daemon/src/p2p/error.rs b/xelis_daemon/src/p2p/error.rs index abfebfb9..efb86870 100644 --- a/xelis_daemon/src/p2p/error.rs +++ b/xelis_daemon/src/p2p/error.rs @@ -164,7 +164,7 @@ pub enum P2pError { #[error("Peer sent us a peerlist faster than protocol rules, expected to wait {} seconds more", _0)] PeerInvalidPeerListCountdown(u64), #[error("Peer sent us a ping packet faster than protocol rules")] - PeerInvalidPingCoutdown, + PeerInvalidPingCountdown, #[error(transparent)] BlockchainError(#[from] Box), #[error("Invalid content in peerlist shared")] diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 9823a87c..58d06efe 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -118,53 +118,54 @@ use std::{ use bytes::Bytes; use rand::{seq::IteratorRandom, Rng}; -// P2pServer is a fully async TCP server +// P2pServer is a fully async TCP server. // Each connection will block on a data to send or to receive -// useful for low end hardware +// making it suitable for low-end hardware. pub struct P2pServer { - // unique peer id + // Unique peer id peer_id: u64, - // node tag sent on handshake + // Node tag sent on handshake tag: Option, - // max peers accepted by this server + // Max peers accepted by this server max_peers: usize, // ip:port address to receive connections bind_address: SocketAddr, - // all peers accepted + // All peers accepted peer_list: SharedPeerList, - // reference to the chain to add blocks/txs + // Reference to the chain to add blocks/txs blockchain: Arc>, - // this sender allows to create a queue system in one task only + // This sender allows to create a queue system in one task only connections_sender: Sender<(SocketAddr, bool)>, - // used to requests objects to peers and avoid requesting the same object to multiple peers + // Used to requests objects to peers and avoid requesting the same object to multiple peers object_tracker: SharedObjectTracker, - // used to check if the server is running or not in tasks + // Used to check if the server is running or not in tasks is_running: AtomicBool, // Synced cache to prevent concurrent tasks adding the block blocks_propagation_queue: Mutex>, // Sender for the blocks processing task to have a ordered queue blocks_processor: Sender<(Arc, BlockHeader, Hash)>, - // allow fast syncing (only balances / assets / Smart Contracts changes) - // without syncing the history + // Allow fast syncing (only balances / assets / Smart Contracts changes) + // without syncing the history. allow_fast_sync_mode: bool, // This can be used safely from a trusted node - // to boost the sync speed by allowing to request several blocks at same time + // to boost the sync speed by allowing the request of several blocks at same time. allow_boost_sync_mode: bool, - // max size of the chain response - // this is a configurable paramater for nodes to manage their resources - // Can be reduced for low devices, and increased for high end devices - // You may sync faster or slower depending on this value + // Max size of the chain response. + // This is a configurable parameter for nodes to manage their resources. + // Can be reduced for low devices, and increased for high end devices. + // You may sync faster or slower depending on this value. max_chain_response_size: usize, - // Configured exclusive nodes - // If not empty, no other peer than those listed can connect to this node + // Configured exclusive nodes. + // If not empty, only the listed peers can connect + // to this node; no other peer will be allowed. exclusive_nodes: IndexSet, - // Are we allowing others nodes to share us as a potential peer ? - // Also if we allows to be listed in get_peers RPC API + // Are we allowing other nodes to share us as a potential peer? + // This also controls if we can be listed in the get_peers RPC API. sharable: bool, - // Do we try to connect to others nodes - // If this is enabled, only way to have peers is to let them connect to us + // Do we try to connect to others nodes? + // If this is enabled, only way to have peers is to let them connect to us. outgoing_connections_disabled: AtomicBool, - // Are we syncing the chain with another peer + // Are we syncing the chain with another peer? is_syncing: AtomicBool, // Exit channel to notify all tasks to stop exit_sender: broadcast::Sender<()> @@ -184,11 +185,11 @@ impl P2pServer { return Err(P2pError::InvalidMaxPeers); } - // set channel to communicate with listener thread + // Set channel to communicate with listener thread let mut rng = rand::thread_rng(); let peer_id: u64 = rng.gen(); // generate a random peer id for network let addr: SocketAddr = bind_address.parse()?; // parse the bind address - // create mspc channel for connections to peers + // Create mspc channel for connections to peers let (connections_sender, connections_receiver) = mpsc::channel(max_peers); let (blocks_processor, blocks_processor_receiver) = mpsc::channel(TIPS_LIMIT * STABLE_LIMIT as usize); @@ -262,7 +263,7 @@ impl P2pServer { self.outgoing_connections_disabled.store(disable, Ordering::Release); } - // every 10 seconds, verify and connect if necessary to a random node + // Every 10 seconds, verify and connect if necessary to a random node async fn maintains_connection_to_nodes(self: &Arc, nodes: IndexSet, sender: Sender) -> Result<(), P2pError> { debug!("Starting maintains seed nodes task..."); let mut interval = interval(Duration::from_secs(P2P_AUTO_CONNECT_PRIORITY_NODES_DELAY)); @@ -281,7 +282,7 @@ impl P2pServer { } let connect = if self.peer_list.size().await >= self.max_peers { - // if we have already reached the limit, we ignore this new connection + // If we have already reached the limit, we ignore this new connection None } else { let mut potential_nodes = Vec::new(); @@ -306,8 +307,8 @@ impl P2pServer { Ok(()) } - // connect to seed nodes, start p2p server - // and wait on all new connections + // Connect to seed nodes, start p2p server, + // and wait on all new connections. async fn start(self: &Arc, receiver: Receiver<(SocketAddr, bool)>, blocks_processor_receiver: Receiver<(Arc, BlockHeader, Hash)>, event_receiver: Receiver>, use_peerlist: bool, concurrency: usize) -> Result<(), P2pError> { let listener = TcpListener::bind(self.get_bind_address()).await?; info!("P2p Server will listen on: {}", self.get_bind_address()); @@ -321,29 +322,29 @@ impl P2pServer { } let (priority_sender, priority_connections) = mpsc::channel(1); - // create tokio task to maintains connection to exclusive nodes or seed nodes + // Create tokio task to maintains connection to exclusive nodes or seed nodes let zelf = Arc::clone(self); spawn_task("p2p-maintain-nodes", async move { info!("Connecting to seed nodes..."); if let Err(e) = zelf.maintains_connection_to_nodes(exclusive_nodes, priority_sender).await { - error!("Error while maintening connection with seed nodes: {}", e); + error!("Error while maintaining connection with seed nodes: {}", e); }; }); - // start a new task for chain sync + // Start a new task for chain sync spawn_task("p2p-chain-sync", Arc::clone(&self).chain_sync_loop()); - // start another task for ping loop + // Start another task for ping loop spawn_task("p2p-ping", Arc::clone(&self).ping_loop()); - // start the blocks processing task to have a queued handler + // Start the blocks processing task to have a queued handler spawn_task("p2p-blocks", Arc::clone(&self).blocks_processing_task(blocks_processor_receiver)); - // start the event loop task to handle peer disconnect events + // Start the event loop task to handle peer disconnect events spawn_task("p2p-events", Arc::clone(&self).event_loop(event_receiver)); - // start another task for peerlist loop + // Start another task for peerlist loop if use_peerlist { spawn_task("p2p-peerlist", Arc::clone(&self).peerlist_loop()); } @@ -401,7 +402,7 @@ impl P2pServer { } async fn handle_outgoing_connections(self: Arc, mut priority_connections: Receiver, mut receiver: Receiver<(SocketAddr, bool)>, tx: Sender<(Peer, Rx)>) { - // only allocate one time the buffer for this packet + // Only allocate one time the buffer for this packet let mut handshake_buffer = [0; 512]; let mut exit_receiver = self.exit_sender.subscribe(); loop { @@ -435,7 +436,7 @@ impl P2pServer { trace!("Trying to connect to {}", addr); if !priority { trace!("checking if connection can be accepted"); - // check that this incoming peer isn't blacklisted + // Check that this incoming peer isn't blacklisted if !self.accept_new_connections().await || !self.peer_list.is_allowed(&addr.ip()).await { debug!("{} is not allowed, we can't connect to it", addr); continue; @@ -480,15 +481,15 @@ impl P2pServer { debug!("handle outgoing connections task has exited"); } - // This task will handle an incoming connection request - // It will verify if we can accept this connection - // If we can, we will create a new peer and send it to the listener + // This task will handle an incoming connection request. + // It will verify if we can accept this connection. + // If we can, we will create a new peer and send it to the listener. async fn handle_incoming_connection(self: &Arc, res: io::Result<(TcpStream, SocketAddr)>, thread_pool: &ThreadPool, tx: &Sender<(Peer, Rx)>) -> Result<(), P2pError> { let (mut stream, addr) = res?; // Verify if we can accept new connections let reject = !self.is_compatible_with_exclusive_nodes(&addr) - // check that this incoming peer isn't blacklisted + // Check that this incoming peer isn't blacklisted || !self.accept_new_connections().await || !self.peer_list.is_allowed(&addr.ip()).await || self.is_connected_to_addr(&addr).await; @@ -521,9 +522,9 @@ impl P2pServer { Ok(()) } - // This task will handle all incoming connections requests - // Based on the concurrency set, it will create a thread pool to handle requests and wait when - // a worker is free to accept a new connection + // This task will handle all incoming connections requests. + // Based on the concurrency set, it will create a thread pool to handle requests and wait until + // a worker is free to accept a new connection. async fn handle_incoming_connections(self: Arc, listener: TcpListener, tx: Sender<(Peer, Rx)>, concurrency: usize) { let mut thread_pool = ThreadPool::new(concurrency); let mut exit_receiver = self.exit_sender.subscribe(); @@ -552,9 +553,9 @@ impl P2pServer { debug!("incoming connections task has exited"); } - // Verify handshake send by a new connection - // based on data size, network ID, peers address validity - // block height and block top hash of this peer (to know if we are on the same chain) + // Verify the handshake sent by a new connection. + // This verification checks the data size, network ID, validity of the peer's address, + // block height, and the top block hash of the peer (to ensure we are on the same chain). async fn verify_handshake(&self, connection: &mut Connection, handshake: &mut Handshake<'_>) -> Result<(), P2pError> { if handshake.get_network() != self.blockchain.get_network() { trace!("{} has an invalid network: {}", connection, handshake.get_network()); @@ -582,7 +583,7 @@ impl P2pServer { } } - // check if the version of this peer is allowed + // Check if the version of this peer is allowed if !is_version_allowed_at_height(self.blockchain.get_network(), self.blockchain.get_height(), handshake.get_version()).map_err(|e| P2pError::InvalidP2pVersion(e.to_string()))? { return Err(P2pError::InvalidP2pVersion(handshake.get_version().clone())); } @@ -590,8 +591,8 @@ impl P2pServer { Ok(()) } - // Build a handshake packet - // We feed the packet with all chain data + // Build a handshake packet. + // We feed the packet with all chain data. async fn build_handshake(&self) -> Result, P2pError> { let storage = self.blockchain.get_storage().read().await; let (block, top_hash) = storage.get_top_block_header().await?; @@ -618,9 +619,9 @@ impl P2pServer { Ok((peer, rx)) } - // this function handle all new connections - // A new connection have to send an Handshake - // if the handshake is valid, we accept it & register it on server + // This function handles all new connections. + // A new connection must send a handshake. + // If the handshake is valid, we accept it and register it on the server async fn verify_connection(&self, buf: &mut [u8], connection: &mut Connection) -> Result { trace!("New connection: {}", connection); @@ -633,9 +634,9 @@ impl P2pServer { self.send_handshake(&connection).await?; } - // wait on the handshake packet + // Wait on the handshake packet let mut handshake: Handshake<'_> = match timeout(Duration::from_millis(PEER_TIMEOUT_INIT_CONNECTION), connection.read_packet(buf, buf.len() as u32)).await?? { - // only allow handshake packet + // Only allow handshake packet Packet::Handshake(h) => h.into_owned(), _ => return Err(P2pError::ExpectedHandshake) }; @@ -644,22 +645,23 @@ impl P2pServer { self.verify_handshake(connection, &mut handshake).await?; trace!("Handshake has been verified"); - // if it's a outgoing connection, don't send the handshake back - // because we have already sent it + // If it's a outgoing connection, don't send the handshake back + // because we have already sent it. if !connection.is_out() { trace!("Sending handshake back to {}", connection); self.send_handshake(&connection).await?; } - // if we reach here, handshake is all good, we can start listening this new peer + // If we reach here, handshake is valid, we can start listening to this new peer connection.set_state(State::Success); Ok(handshake) } async fn handle_new_peer(self: &Arc, peer: &Arc, rx: Rx) -> Result<(), P2pError> { - // we can save the peer in our peerlist - let peer_id = peer.get_id(); // keep in memory the peer_id outside connection (because of moved value) + // We can save the peer in our peerlist + let peer_id = peer.get_id(); + // Keep in memory the peer_id outside connection (because of moved value) if self.is_internal_id(peer_id) { return Err(P2pError::PeerIdAlreadyUsed(peer_id)); } @@ -685,9 +687,9 @@ impl P2pServer { self.exclusive_nodes.is_empty() || self.exclusive_nodes.contains(addr) } - // Connect to a specific peer address - // Buffer is passed in parameter to prevent the re-allocation each time - // No check is done, this is done at the moment of the connection + // Connect to a specific peer address. + // Buffer is passed in parameter to prevent the re-allocation each time. + // No check is done, this is done at the moment of the connection. pub async fn try_to_connect_to_peer(&self, addr: SocketAddr, priority: bool) { debug!("try to connect to peer addr {}, priority: {}", addr, priority); if self.connections_sender.is_closed() { @@ -700,8 +702,8 @@ impl P2pServer { } } - // Connect to a new peer using its socket address - // Then we send him a handshake + // Connect to a new peer using its socket address. + // Then we send the peer a handshake. async fn connect_to_peer(&self, addr: SocketAddr) -> Result { trace!("Trying to connect to {}", addr); @@ -728,16 +730,16 @@ impl P2pServer { Ok(connection) } - // Send a handshake to a connection (this is used to determine if its a potential peer) - // Handsake is sent only once, when we connect to a new peer, and we get it back from connection to make it a peer + // Send a handshake to a connection (this is used to determine if its a potential peer). + // Handshake is sent only once, when we connect to a new peer, and we get it back from connection to make it a peer. async fn send_handshake(&self, connection: &Connection) -> Result<(), P2pError> { trace!("Sending handshake to {}", connection); let handshake = self.build_handshake().await?; connection.send_bytes(&handshake).await } - // build a ping packet with the current state of the blockchain - // if a peer is given, we will check and update the peers list + // Build a ping packet with the current state of the blockchain, + // if a peer is given, we will check and update the peers list. async fn build_generic_ping_packet_with_storage(&self, storage: &S) -> Ping<'_> { let (cumulative_difficulty, block_top_hash, pruned_topoheight) = { let pruned_topoheight = match storage.get_pruned_topoheight().await { @@ -762,21 +764,20 @@ impl P2pServer { Ping::new(Cow::Owned(block_top_hash), highest_topo_height, highest_height, pruned_topoheight, cumulative_difficulty, new_peers) } - // Build a generic ping packet - // This will lock the storage for us + // Build a generic ping packet. + // This will lock the storage for us. async fn build_generic_ping_packet(&self) -> Ping<'_> { let storage = self.blockchain.get_storage().read().await; debug!("locking storage to build generic ping packet"); self.build_generic_ping_packet_with_storage(&*storage).await } - // select a random peer which is greater than us to sync chain - // candidate peer should have a greater topoheight or a higher block height than us - // It must also have a greater cumulative difficulty than us - // Cumulative difficulty is used in case two chains are running at same speed - // We must determine which one has the most work done - // if we are not in fast sync mode, we must verify its pruned topoheight to be sure - // he have the blocks we need + // Select a random peer with a higher chain state to sync the chain from. + // The candidate peer should have a higher topoheight or block height than us. + // It must also have a greater cumulative difficulty than us. + // Cumulative difficulty helps determine which chain has more work done if two chains are running at the same speed. + // If we are not in fast sync mode, we must verify its pruned topoheight to be sure + // that is has the blocks we need. async fn select_random_best_peer(&self, fast_sync: bool, previous_peer: Option<&(Arc, bool)>) -> Result>, BlockchainError> { trace!("select random best peer"); @@ -793,8 +794,8 @@ impl P2pServer { trace!("peer list locked for select random best peer"); - // search for peers which are greater than us - // and that are pruned but before our height so we can sync correctly + // Search for peers which are higher than us + // and that are pruned but before our height so we can sync correctly. let available_peers = self.peer_list.get_cloned_peers().await; // IndexSet is used to select by random index let mut peers: IndexSet> = IndexSet::with_capacity(available_peers.len()); @@ -810,21 +811,21 @@ impl P2pServer { let peer_topoheight = p.get_topoheight(); if fast_sync { - // if we want to fast sync, but this peer is not compatible, we skip it - // for this we check that the peer topoheight is not less than the prune safety limit + // If we want to fast sync but this peer is not compatible, we skip it. + // For this we check that the peer topoheight is not less than the prune safety limit. if peer_topoheight < PRUNE_SAFETY_LIMIT || our_topoheight + PRUNE_SAFETY_LIMIT > peer_topoheight { continue; } if let Some(pruned_topoheight) = p.get_pruned_topoheight() { // This shouldn't be possible if following the protocol, - // But we may never know if a peer is not following the protocol strictly + // but we may never know if a peer is not following the protocol strictly. if peer_topoheight - pruned_topoheight < PRUNE_SAFETY_LIMIT { continue; } } } else { - // check that the pruned topoheight is less than our topoheight to sync - // so we can sync chain from pruned chains + // Check that the pruned topoheight is less than our topoheight to sync + // so we can sync chain from pruned chains. if let Some(pruned_topoheight) = p.get_pruned_topoheight() { if pruned_topoheight > our_topoheight { continue; @@ -855,18 +856,18 @@ impl P2pServer { } let selected = rand::thread_rng().gen_range(0..count); - // clone the Arc to prevent the lock until the end of the sync request + // Clone the Arc to prevent the lock until the end of the sync request Ok(peers.swap_remove_index(selected)) } - // Check if user has allowed fast sync mode - // This is useful for light node by syncing only the top chain while staying fully compatible + // Check if user has allowed fast sync mode. + // This is useful for lite node by syncing only the top chain while staying fully compatible. pub fn allow_fast_sync(&self) -> bool { self.allow_fast_sync_mode } - // Check if user has allowed the boost sync mode - // This is requesting blocks in parallel during chain sync + // Check if user has allowed the boost sync mode. + // This is requesting blocks in parallel during chain sync. pub fn allow_boost_sync(&self) -> bool { self.allow_boost_sync_mode } @@ -881,14 +882,14 @@ impl P2pServer { self.is_syncing.load(Ordering::Acquire) } - // This a infinite task that is running every CHAIN_SYNC_DELAY seconds - // Based on the user configuration, it will try to sync the chain with another node with longest chain if any + // This is an infinite task that is running every CHAIN_SYNC_DELAY seconds. + // Based on the user configuration, it will attempt to sync the chain with another node that has the longest chain, if available. async fn chain_sync_loop(self: Arc) { - // used to detect how much time we have to wait before next request + // Used to detect how much time we have to wait before next request let mut last_chain_sync = get_current_time_in_millis(); let interval = Duration::from_secs(CHAIN_SYNC_DELAY); - // Try to not reuse the same peer between each sync - // Don't use it at all if its errored + // Try to not reuse the same peer between each sync. + // Don't use it at all if its errored. let mut previous_peer: Option<(Arc, bool)> = None; loop { // Detect exact time needed before next chain sync @@ -906,9 +907,9 @@ impl P2pServer { break; } - // first we have to check if we allow fast sync mode - // and then we check if we have a potential peer above us to fast sync - // otherwise we sync normally + // First, we check if fast sync mode is allowed. + // Then, we check if there is a potential peer with a higher chain state for fast syncing. + // If not, we proceed with normal synchronization. let fast_sync = if self.allow_fast_sync() { trace!("locking peer list for fast sync check"); trace!("peer list locked for fast sync check"); @@ -934,11 +935,11 @@ impl P2pServer { // We are syncing the chain self.set_chain_syncing(true); - // check if we can maybe fast sync first - // otherwise, fallback on the normal chain sync + // First, check if fast sync is possible. + // If not, fall back to the normal chain synchronization. let err = if fast_sync { if let Err(e) = self.bootstrap_chain(&peer).await { - warn!("Error occured while fast syncing with {}: {}", peer, e); + warn!("Error occurred while fast syncing with {}: {}", peer, e); true } else { false @@ -946,7 +947,7 @@ impl P2pServer { } else { let previous_err = previous_peer.map(|(_, err)| err).unwrap_or(false); if let Err(e) = self.request_sync_chain_for(&peer, &mut last_chain_sync, previous_err).await { - warn!("Error occured on chain sync with {}: {}", peer, e); + warn!("Error occurred on chain sync with {}: {}", peer, e); true } else { false @@ -962,10 +963,11 @@ impl P2pServer { } } - // broadcast generic ping packet every 10s - // if we have to send our peerlist to all peers, we calculate the ping for each peer - // instead of being done in each write task of peer, we do it one time so we don't have - // several lock on the chain and on peerlist + + // Broadcast a generic ping packet every 10 seconds. + // If we need to send our peer list to all peers, we calculate the ping for each peer. + // Instead of performing this calculation in each peer's write task, we do it once to avoid + // multiple locks on the chain and the peer list. async fn ping_loop(self: Arc) { debug!("Starting ping loop..."); @@ -987,7 +989,7 @@ impl P2pServer { let all_peers = self.peer_list.get_cloned_peers().await; let current_time = get_current_time_in_seconds(); - // check if its time to send our peerlist + // Check if its time to send our peerlist if current_time > last_peerlist_update + P2P_PING_PEER_LIST_DELAY { trace!("Sending ping packet with peerlist..."); for peer in all_peers.iter() { @@ -1002,21 +1004,21 @@ impl P2pServer { // Is it a peer from our local network let is_local_peer = is_local_address(peer.get_connection().get_address()); - // all the peers we already shared with this peer + // All the peers we already shared with this peer let mut shared_peers = peer.get_peers().lock().await; - // iterate through our peerlist to determinate which peers we have to send + // Iterate through our peerlist to determinate which peers we have to send for p in all_peers.iter() { - // don't send him itself - // and don't share a peer that don't want to be shared + // Don't send to itself + // and don't share a peer that doesn't want to be shared if p.get_id() == peer.get_id() || !p.sharable() { continue; } - // if we haven't send him this peer addr and that he don't have him already, insert it + // If we haven't sent him this peer address and he doesn't already have it, insert it let addr = p.get_outgoing_address(); - // Don't share local network addresses if it's external peer + // Don't share local network addresses with an external peer if (is_local_address(addr) && !is_local_peer) || !is_valid_address(addr) { debug!("{} is a local address but peer is external, skipping", addr); continue; @@ -1031,10 +1033,10 @@ impl P2pServer { }; if send { - // add it in our side to not re send it again + // Add it in our side to not send it again trace!("{} didn't received {} yet, adding it to peerlist in ping packet", peer.get_outgoing_address(), addr); - // add it to new list to send it + // Add it to new list to send it new_peers.insert(*addr); if new_peers.len() >= P2P_PING_PEER_LIST_LIMIT { break; @@ -1042,9 +1044,9 @@ impl P2pServer { } } - // update the ping packet with the new peers + // Update the ping packet with the new peers debug!("Set peers: {:?}, going to {}", new_peers, peer.get_outgoing_address()); - // send the ping packet to the peer + // Send the ping packet to the peer if let Err(e) = peer.send_packet(Packet::Ping(Cow::Borrowed(&ping))).await { debug!("Error sending specific ping packet to {}: {}", peer, e); } else { @@ -1052,15 +1054,15 @@ impl P2pServer { } } - // update the last time we sent our peerlist - // We don't use previous current_time variable because it may have been - // delayed due to the packet sending + // Update the last time we sent our peerlist. + // We don't use the previous current_time variable because it may have been + // delayed due to the packet sending. last_peerlist_update = get_current_time_in_seconds(); } else { trace!("Sending generic ping packet..."); let packet = Packet::Ping(Cow::Owned(ping)); let bytes = Bytes::from(packet.to_bytes()); - // broadcast directly the ping packet asap to all peers + // Broadcast directly the ping packet asap to all peers for peer in all_peers { if current_time - peer.get_last_ping_sent() > P2P_PING_DELAY && !peer.get_connection().is_closed() { trace!("broadcast generic ping packet to {}", peer); @@ -1077,7 +1079,7 @@ impl P2pServer { } } - // try to extend our peerlist each time its possible by searching in known peerlist from disk + // Try to extend our peerlist whenever possible by searching in the known peerlist from disk async fn peerlist_loop(self: Arc) { debug!("Starting peerlist task..."); loop { @@ -1104,8 +1106,8 @@ impl P2pServer { } } - // This function is used to broadcast PeerDisconnected event to listeners - // We use a channel to avoid having to pass the Blockchain to the Peerlist & Peers + // This function broadcast PeerDisconnected events to listeners. + // A channel is used to avoid passing Blockchain to Peerlist & Peers. async fn event_loop(self: Arc, mut receiver: Receiver>) { debug!("Starting event loop task..."); let mut server_exit = self.exit_sender.subscribe(); @@ -1157,7 +1159,8 @@ impl P2pServer { let mut response_blockers: Vec = Vec::new(); for hash in header.get_txs_hashes() { - let contains = { // we don't lock one time because we may wait on p2p response + let contains = { + // Don't lock for extended periods to avoid waiting on p2p response // Check in ObjectTracker if let Some(response_blocker) = self.object_tracker.get_response_blocker_for_requested_object(hash).await { trace!("{} is already requested, waiting on response blocker for block {}", hash, block_hash); @@ -1168,7 +1171,8 @@ impl P2pServer { } }; - if !contains { // retrieve one by one to prevent acquiring the lock for nothing + if !contains { + // Retrieve one by one to prevent acquiring the lock for nothing debug!("Requesting TX {} to {} for block {}", hash, peer, block_hash); if let Err(e) = self.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash.clone()), false).await { error!("Error while requesting TX {} to {} for block {}: {}", hash, peer, block_hash, e); @@ -1182,7 +1186,7 @@ impl P2pServer { } } - // Wait on all already requested txs + // Wait for all previously requested txs for mut blocker in response_blockers { if let Err(e) = blocker.recv().await { // It's mostly a closed channel error, so we can ignore it @@ -1192,7 +1196,7 @@ impl P2pServer { } } - // add immediately the block to chain as we are synced with + // Add the block to the chain immediately, as we are synced let block = match self.blockchain.build_block_from_header(Immutable::Owned(header)).await { Ok(block) => block, Err(e) => { @@ -1214,7 +1218,7 @@ impl P2pServer { debug!("Blocks processing task ended"); } - // this function handle the logic to send all packets to the peer + // This function handles the logic for sending all packets to the peer async fn handle_connection_write_side(&self, peer: &Arc, rx: &mut Rx, mut task_rx: oneshot::Receiver<()>) -> Result<(), P2pError> { let mut server_exit = self.exit_sender.subscribe(); let mut peer_exit = peer.get_exit_receiver(); @@ -1222,7 +1226,7 @@ impl P2pServer { loop { select! { biased; - // exit message from the read task + // Exit message from the read task _ = &mut task_rx => { trace!("Exit message received from read task for peer {}", peer); break; @@ -1237,28 +1241,28 @@ impl P2pServer { }, _ = interval.tick() => { trace!("Checking heartbeat of {}", peer); - // Last time we got a ping packet from him + // Last time we got a ping packet from peer let last_ping = peer.get_last_ping(); if last_ping != 0 && get_current_time_in_seconds() - last_ping > P2P_PING_TIMEOUT { debug!("{} has not sent a ping packet for {} seconds, closing connection...", peer, P2P_PING_TIMEOUT); break; } }, - // all packets to be sent to the peer are received here + // All packets to be sent to the peer are received here Some(bytes) = rx.recv() => { - // there is a overhead of 4 for each packet (packet size u32 4 bytes, packet id u8 is counted in the packet size) + // There is an overhead of 4 bytes per packet (packet size u32 4 bytes, packet id u8 is counted in the packet size) trace!("Sending packet with ID {}, size sent: {}, real size: {}", bytes[4], u32::from_be_bytes(bytes[0..4].try_into()?), bytes.len()); peer.get_connection().send_bytes(&bytes).await?; - trace!("data sucessfully sent!"); + trace!("data successfully sent!"); } } } Ok(()) } - // This function is a separated task with its own buffer (1kB) to read and handle every packets from the peer sequentially + // This function runs as a separate task with its own 1kB buffer to read and handle packets from the peer sequentially async fn handle_connection_read_side(self: &Arc, peer: &Arc, mut write_task: JoinHandle<()>) -> Result<(), P2pError> { - // allocate the unique buffer for this connection + // Allocate the unique buffer for this connection let mut buf = [0u8; 1024]; let mut server_exit = self.exit_sender.subscribe(); let mut peer_exit = peer.get_exit_receiver(); @@ -1280,9 +1284,9 @@ impl P2pServer { res = self.listen_connection(&mut buf, &peer) => { res?; - // check that we don't have too many fails - // otherwise disconnect peer - // Priority nodes are not disconnected + // Check if there are too many failures. + // If so, disconnect the peer. + // Priority nodes are not disconnected. if peer.get_fail_count() >= PEER_FAIL_LIMIT && !peer.is_priority() { warn!("High fail count detected for {}! Closing connection...", peer); if let Err(e) = peer.close_and_temp_ban().await { @@ -1296,11 +1300,12 @@ impl P2pServer { Ok(()) } - // this function handle the whole connection with a peer - // create a task for each part (reading and writing) - // so we can do both at the same time without blocking / waiting on other part when important traffic + // This function manages the entire connection with a peer. + // It creates separate tasks for reading and writing, + // allowing both operations to proceed simultaneously + // without blocking or waiting on the other part during important traffic. async fn handle_connection(self: &Arc, peer: Arc, mut rx: Rx) -> Result<(), P2pError> { - // task for writing to peer + // Task for writing to peer let (write_tx, write_rx) = oneshot::channel(); let write_task = { @@ -1317,7 +1322,7 @@ impl P2pServer { peer.set_write_task_state(TaskState::Exiting).await; - // clean shutdown + // Clean shutdown rx.close(); if let Err(e) = peer.close().await { @@ -1329,7 +1334,7 @@ impl P2pServer { }) }; - // task for reading from peer + // Task for reading from peer { let zelf = Arc::clone(&self); let peer = Arc::clone(&peer); @@ -1342,8 +1347,8 @@ impl P2pServer { peer.set_read_task_state(TaskState::Exiting).await; - // Verify that the connection is closed - // Write task should be responsible for closing the connection + // Verify that the connection is closed. + // Write task should be responsible for closing the connection. if write_tx.send(()).is_err() { debug!("Write task has already exited, closing connection for {}", peer); } @@ -1355,7 +1360,7 @@ impl P2pServer { }); } - // verify that we are synced with him to receive all TXs correctly + // Verify that we are synced with the peer to receive all TXs correctly let our_topoheight = self.blockchain.get_topo_height(); let peer_topoheight = peer.get_topoheight(); if peer_topoheight == our_topoheight { @@ -1367,13 +1372,12 @@ impl P2pServer { Ok(()) } - // Returns the list of all common peers we have between Peer and us + // Returns the list of all common peers we have between a peer and us. // TODO fix common peers detection - // Problem is: - // We are connected to node A and node B, we know that they are connected each other - // But they may not already shared their peerlist about us so they don't know we are - // a common peer between them two, which result in false positive in our case and they send - // us both the same object + // Problem: We are connected to nodes A and B, which are connected to each + // other. However, they may not have shared their peer lists with each other, + // so they don't know we are a common peer. This can cause false positives + // where they send us the same object. async fn get_common_peers_for(&self, peer: &Arc) -> Vec> { debug!("get common peers for {}", peer); trace!("locked peer_list, locking peers received (common peers)"); @@ -1382,7 +1386,7 @@ impl P2pServer { let mut common_peers = Vec::new(); for (common_peer_addr, _) in peer_peers.iter().filter(|(_, direction)| **direction == Direction::Both) { - // if we have a common peer with him + // If we have a common peer with them if let Some(common_peer) = self.peer_list.get_peer_by_addr(common_peer_addr).await { if peer.get_id() != common_peer.get_id() { common_peers.push(common_peer); @@ -1393,9 +1397,8 @@ impl P2pServer { common_peers } - // Main function used by every nodes connections - // This is handling each packet available in our p2p protocol - // Each packet is a enum variant + // Main function for handling connections with nodes. + // Processes each packet in our p2p protocol; each packet is an enum variant. async fn handle_incoming_packet(self: &Arc, peer: &Arc, packet: Packet<'_>) -> Result<(), P2pError> { match packet { Packet::Handshake(_) => { @@ -1415,7 +1418,7 @@ impl P2pServer { ping.into_owned().update_peer(peer, &self.blockchain).await?; - // peer should not send us twice the same transaction + // Peer should not send us the same TX twice debug!("Received tx hash {} from {}", hash, peer.get_outgoing_address()); { let mut txs_cache = peer.get_txs_cache().lock().await; @@ -1430,7 +1433,7 @@ impl P2pServer { } } - // Check that the tx is not in mempool or on disk already + // Check that the TX is not in mempool or on disk already if !self.blockchain.has_tx(&hash).await? { trace!("Requesting tx {} propagated because we don't have it", hash); if !self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.clone()), true).await? { @@ -1438,13 +1441,14 @@ impl P2pServer { } } - // Avoid sending the TX propagated to a common peer - // because we track peerlist of each peers, we can try to determinate it - // iterate over all common peers of this peer broadcaster + // Avoid sending the TX propagated to a common peer. + // We track the peer list of each peer and determine common peers. + // Iterate over all common peers of this peer broadcaster. for common_peer in self.get_common_peers_for(&peer).await { debug!("{} is a common peer with {}, adding TX {} to its cache", common_peer, peer, hash); let mut txs_cache = common_peer.get_txs_cache().lock().await; - // Set it as Out so we don't send it anymore but we can get it one time in case of bad common peer prediction + // Mark the TX as Out so we don't send it again. This allows us to retrieve + // it once in case of incorrect common peer prediction. txs_cache.put(hash.clone(), Direction::Out); } }, @@ -1453,11 +1457,11 @@ impl P2pServer { let (header, ping) = packet_wrapper.consume(); ping.into_owned().update_peer(peer, &self.blockchain).await?; - // check that the block height is valid + // Check that the block height is valid let header = header.into_owned(); let block_hash = header.hash(); - // verify that this block wasn't already sent by him + // Verify that this block wasn't already sent by him { let mut blocks_propagation = peer.get_blocks_propagation().lock().await; if let Some(direction) = blocks_propagation.get_mut(&block_hash) { @@ -1471,18 +1475,19 @@ impl P2pServer { } } - // Avoid sending the same block to a common peer that may have already got it - // because we track peerlist of each peers, we can try to determinate it + // Avoid sending the same block to a common peer that may have already received it. + // We track the peer list of each peer and use this information to determine this. for common_peer in self.get_common_peers_for(&peer).await { debug!("{} is a common peer with {}, adding block {} to its propagation cache", common_peer, peer, block_hash); let mut blocks_propagation = common_peer.get_blocks_propagation().lock().await; - // Out allow to get "In" again, because it's a prediction, don't block it completely + // Mark the block as Out to avoid sending it again. + // This allows us to get it back in case our prediction was incorrect. if !blocks_propagation.contains(&block_hash) { blocks_propagation.put(block_hash.clone(), Direction::Out); } } - // check that we don't have this block in our chain + // Check that this block is not already in our chain. { let storage = self.blockchain.get_storage().read().await; if storage.has_block_with_hash(&block_hash).await? { @@ -1516,7 +1521,7 @@ impl P2pServer { let request = request.into_owned(); let last_request = peer.get_last_chain_sync(); let time = get_current_time_in_seconds(); - // Node is trying to ask too fast our chain + // Node is requesting the chain too quickly // Don't allow faster than 1/3 of the delay if last_request + (CHAIN_SYNC_DELAY * 2 / 3) > time { debug!("{} requested sync chain too fast!", peer); @@ -1524,9 +1529,10 @@ impl P2pServer { } peer.set_last_chain_sync(time); - // at least one block necessary (genesis block) + // At least one block necessary (genesis block) let request_size = request.size(); - if request_size == 0 || request_size > CHAIN_SYNC_REQUEST_MAX_BLOCKS { // allows maximum 64 blocks id (2560 bytes max) + if request_size == 0 || request_size > CHAIN_SYNC_REQUEST_MAX_BLOCKS { + // Allows maximum 64 blocks id (2560 bytes max) warn!("{} sent us a malformed chain request ({} blocks)!", peer, request_size); return Err(P2pError::MalformedChainRequest(request_size)) } @@ -1558,10 +1564,10 @@ impl P2pServer { let current_time = get_current_time_in_seconds(); let empty_peer_list = ping.get_peers().is_empty(); - // update the last ping only if he respect the protocol rules + // Update the last ping time only if the peer respects the protocol rules. peer.set_last_ping(current_time); - // we verify the respect of the countdown of peer list updates to prevent any spam + // Verify compliance with the countdown for peer list updates to prevent spam. if !empty_peer_list { trace!("received peer list from {}: {}", peer, ping.get_peers().len()); let last_peer_list = peer.get_last_peer_list(); @@ -1651,15 +1657,15 @@ impl P2pServer { let response = response.to_owned(); trace!("Object response received is {}", response.get_hash()); - // check if we requested it from this peer + // Check if we requested it from this peer already let request = response.get_request(); if peer.has_requested_object(&request).await { let sender = peer.remove_object_request(request).await?; - // handle the response + // Handle the response if sender.send(response).is_err() { error!("Error while sending object response to sender!"); } - // check if the Object Tracker has requested this object + // Check if the Object Tracker has requested this object } else if self.object_tracker.has_requested_object(request.get_hash()).await { trace!("Object Tracker requested it, handling it"); self.object_tracker.handle_object_response(response).await?; @@ -1707,7 +1713,7 @@ impl P2pServer { return Err(P2pError::InvalidPacket) } - // we received the inventory + // We received the inventory peer.set_requested_inventory(false); peer.set_last_inventory(get_current_time_in_seconds()); @@ -1716,7 +1722,7 @@ impl P2pServer { let txs = inventory.get_txs(); let total_count = txs.len(); - // check that the response was really full if he send us another "page" + // Check that the response was really full if he send us another "page" if next_page.is_some() { if total_count != NOTIFY_MAX_LEN { error!("Received only {} while maximum is {} elements, and tell us that there is another page", total_count, NOTIFY_MAX_LEN); @@ -1735,7 +1741,7 @@ impl P2pServer { } } - // request the next page + // Request the next page if next_page.is_some() { trace!("Requesting next page of inventory from {}", peer); let packet = Cow::Owned(NotifyInventoryRequest::new(next_page)); @@ -1793,8 +1799,8 @@ impl P2pServer { Ok(()) } - // Listen to incoming packets from a connection - // Packet is read from the same task always, while its handling is delegated to a unique task + // Listen for incoming packets from a connection. + // Packets are read by a single task, but handling is delegated to a separate task. async fn listen_connection(self: &Arc, buf: &mut [u8], peer: &Arc) -> Result<(), P2pError> { // Read & parse the packet // 16 additional bytes are for AEAD @@ -1812,7 +1818,7 @@ impl P2pServer { return Err(e) }, e => { - error!("Error occured while handling incoming packet #{} from {}: {}", packet_id, peer, e); + error!("Error occurred while handling incoming packet #{} from {}: {}", packet_id, peer, e); peer.increment_fail_count(); } } @@ -1821,9 +1827,9 @@ impl P2pServer { Ok(()) } - // Search a common point between us and the peer chain - // For this we have a list of block id which is basically block hash + its topoheight - // BlockId list should be in descending order (higher topoheight first) + // Find a common point between our chain and the peer's chain. + // For this we have a list of block ids which is basically block hash + its topoheight. + // The BlockId list should be in descending order (higher topoheight first). async fn find_common_point(&self, storage: &S, blocks: IndexSet) -> Result, P2pError> { let start_topoheight = if let Some(first) = blocks.first() { first.get_topoheight() + 1 @@ -1842,12 +1848,12 @@ impl P2pServer { } let mut expected_topoheight = start_topoheight; - // search a common point + // Search a common point for (i, block_id) in blocks.into_iter().enumerate() { - // Verify good order of blocks - // If we already processed genesis block (topo 0) and still have some blocks, it's invalid list - // If we are in the first CHAIN_SYNC_REQUEST_EXPONENTIAL_INDEX_START blocks, verify the exact good order - // If we are above it, i = i * 2, start topo - i = expected topoheight + // Verify good order of blocks. + // If we have already processed the genesis block (topo 0) but still have blocks, the list is invalid. + // For the first CHAIN_SYNC_REQUEST_EXPONENTIAL_INDEX_START blocks, verify the exact order. + // If we are above it, i = i * 2, start topo - i = expected topoheight. if expected_topoheight == 0 || (i < CHAIN_SYNC_REQUEST_EXPONENTIAL_INDEX_START && expected_topoheight - 1 != block_id.get_topoheight()) { warn!("Block id list has not a good order at index {}, current topo {}, next: {}", i, expected_topoheight, block_id.get_topoheight()); return Err(P2pError::InvalidBlockIdList) @@ -1858,8 +1864,9 @@ impl P2pServer { if storage.has_block_with_hash(block_id.get_hash()).await? { let (hash, topoheight) = block_id.consume(); debug!("Block {} is common, expected topoheight: {}", hash, topoheight); - // check that the block is ordered like us - if storage.is_block_topological_ordered(&hash).await && storage.get_topo_height_for_hash(&hash).await? == topoheight { // common point + // Check that the block order matches our chain. + if storage.is_block_topological_ordered(&hash).await && storage.get_topo_height_for_hash(&hash).await? == topoheight { + // Common point debug!("common point found at block {} with same topoheight at {}", hash, topoheight); return Ok(Some(CommonPoint::new(hash, topoheight))) } @@ -1868,27 +1875,27 @@ impl P2pServer { Ok(None) } - // search a common point between our blockchain and the peer's one - // when the common point is found, start sending blocks from this point + // Search for a common point between our blockchain and the peer's. + // Once the common point is found, start sending blocks from this point. async fn handle_chain_request(self: &Arc, peer: &Arc, blocks: IndexSet, accepted_response_size: usize) -> Result<(), BlockchainError> { debug!("handle chain request for {} with {} blocks", peer, blocks.len()); let storage = self.blockchain.get_storage().read().await; - // blocks hashes sent for syncing (topoheight ordered) + // Blocks hashes sent for syncing (topoheight ordered) let mut response_blocks = IndexSet::new(); let mut top_blocks = IndexSet::new(); - // common point used to notify peer if he should rewind or not + // Common point used to notify peer if he should rewind or not let common_point = self.find_common_point(&*storage, blocks).await?; // Lowest height of the blocks sent let mut lowest_common_height = None; if let Some(common_point) = &common_point { let mut topoheight = common_point.get_topoheight(); - // lets add all blocks ordered hash + // Lets add all blocks ordered hash let top_topoheight = self.blockchain.get_topo_height(); - // used to detect if we find unstable height for alt tips + // Used to detect if we find unstable height for alt tips let mut unstable_height = None; let top_height = self.blockchain.get_height(); - // check to see if we should search for alt tips (and above unstable height) + // Check to see if we should search for alt tips (and above unstable height) let should_search_alt_tips = top_topoheight - topoheight < accepted_response_size as u64; if should_search_alt_tips { debug!("Peer is near to be synced, will send him alt tips blocks"); @@ -1898,7 +1905,7 @@ impl P2pServer { // Search the lowest height let mut lowest_height = top_height; - // complete ChainResponse blocks until we are full or that we reach the top topheight + // Complete ChainResponse blocks until we are full or that we reach the top topoheight while response_blocks.len() < accepted_response_size && topoheight <= top_topoheight { trace!("looking for hash at topoheight {}", topoheight); let hash = storage.get_hash_at_topo_height(topoheight).await?; @@ -1912,13 +1919,13 @@ impl P2pServer { let mut swap = false; if let Some(previous_hash) = response_blocks.last() { let version = get_version_at_height(self.blockchain.get_network(), height); - // Due to the TX being orphaned, some TXs may be in the wrong order in V1 - // It has been sorted in V2 and should not happen anymore + // Due to the TX being orphaned, some TXs may be in the wrong order in V1. + // It has been sorted in V2 and should not happen anymore. if version == BlockVersion::V0 && storage.has_block_position_in_order(&hash).await? && storage.has_block_position_in_order(&previous_hash).await? { if self.blockchain.is_side_block_internal(&*storage, &hash, top_topoheight).await? { let position = storage.get_block_position_in_order(&hash).await?; let previous_position = storage.get_block_position_in_order(&previous_hash).await?; - // if the block is a side block, we need to check if it's in the right order + // If the block is a side block, we need to check if it's in the right order if position < previous_position { swap = true; } @@ -1965,11 +1972,11 @@ impl P2pServer { Ok(()) } - // Handle a chain response from another peer - // We receive a list of blocks hashes ordered by their topoheight - // It also contains a CommonPoint which is a block hash point where we have the same topoheight as our peer - // Based on the lowest height of the chain sent, we may need to rewind some blocks - // NOTE: Only a priority node can rewind below the stable height + // Handle a chain response from another peer. + // We receive a list of block hashes ordered by their topoheight. + // It includes a CommonPoint, which is a block hash where our topoheight matches the peer's. + // Based on the lowest height in the chain sent, we may need to rewind some blocks. + // NOTE: Only a priority node can rewind below the stable height. async fn handle_chain_response(&self, peer: &Arc, mut response: ChainResponse, requested_max_size: usize, skip_stable_height_check: bool) -> Result<(), BlockchainError> { trace!("handle chain response from {}", peer); let response_size = response.blocks_size(); @@ -2028,43 +2035,44 @@ impl P2pServer { let top_len = top_blocks.len(); let blocks_len = blocks.len(); - // merge both list together + // Merge both list together blocks.extend(top_blocks); if pop_count > 0 { warn!("{} sent us a pop count request of {} with {} blocks", peer, pop_count, blocks_len); } - // if node asks us to pop blocks, check that the peer's height/topoheight is in advance on us + // If node asks us to pop blocks, check that the peer's height/topoheight is in advance on us let peer_topoheight = peer.get_topoheight(); if pop_count > 0 && peer_topoheight > our_previous_topoheight && peer.get_height() >= our_previous_height - // then, verify if it's a priority node, otherwise, check if we are connected to a priority node so only him can rewind us + // Then, verify if it's a priority node, otherwise, check if we are connected to a priority node so only this peer can rewind us && (peer.is_priority() || !self.is_connected_to_a_synced_priority_node().await) { - // check that if we can trust him + // Check if we can trust this peer if peer.is_priority() { warn!("Rewinding chain without checking because {} is a priority node (pop count: {})", peer, pop_count); - // User trust him as a priority node, rewind chain without checking, allow to go below stable height also + // User trusts this peer as a priority node. Rewind the chain without checking. + // Allow going below the stable height as well. self.blockchain.rewind_chain(pop_count, false).await?; } else { // Verify that someone isn't trying to trick us if pop_count > blocks_len as u64 { // TODO: maybe we could request its whole chain for comparison until chain validator has_higher_cumulative_difficulty ? - // If after going through all its chain and we still have a higher cumulative difficulty, we should not rewind + // If after going through all its chain and we still have a higher cumulative difficulty, we should not rewind. warn!("{} sent us a pop count of {} but only sent us {} blocks, ignoring", peer, pop_count, blocks_len); return Err(P2pError::InvalidPopCount(pop_count, blocks_len as u64).into()) } - // request all blocks header and verify basic chain structure - // Starting topoheight must be the next topoheight after common block - // Blocks in chain response must be ordered by topoheight otherwise it will give incorrect results + // Request all block headers and verify the basic chain structure. + // The starting topoheight must be the next one after the common block. + // Blocks in the chain response must be ordered by topoheight; otherwise, it will give incorrect results. let mut chain_validator = ChainValidator::new(&self.blockchain, common_topoheight + 1); for hash in blocks { trace!("Request block header for chain validator: {}", hash); - // check if we already have the block to not request it + // Check if we already have the block to not request it if self.blockchain.has_block(&hash).await? { trace!("We already have block {}, skipping", hash); continue; @@ -2087,24 +2095,25 @@ impl P2pServer { return Err(BlockchainError::LowerCumulativeDifficulty) } - // peer chain looks correct, lets rewind our chain + // Peer chain looks correct, lets rewind our chain warn!("Rewinding chain because of {} (pop count: {})", peer, pop_count); self.blockchain.rewind_chain(pop_count, false).await?; - // now retrieve all txs from all blocks header and add block in chain + // Now retrieve all TXs from all blocks header and add block in chain for (hash, header) in chain_validator.get_blocks() { trace!("Processing block {} from chain validator", hash); - // we don't already have this block, lets retrieve its txs and add in our chain + // We don't already have this block, lets retrieve its TXs and add in our chain if !self.blockchain.has_block(&hash).await? { let mut transactions = Vec::new(); // don't pre allocate for tx_hash in header.get_txs_hashes() { - // check first on disk in case it was already fetch by a previous block - // it can happens as TXs can be integrated in multiple blocks and executed only one time - // check if we find it + // First, check on disk in case it was already fetched by a previous block. + // This can happen as TXs can be integrated into multiple blocks but executed only once. + // Check if we find it. if let Some(tx) = self.blockchain.get_tx(tx_hash).await.ok() { trace!("Found the transaction {} on disk", tx_hash); transactions.push(Immutable::Arc(tx)); - } else { // otherwise, ask it from peer + } else { + // Otherwise, ask it from peer let response = peer.request_blocking_object(ObjectRequest::Transaction(tx_hash.clone())).await?; if let OwnedObjectResponse::Transaction(tx, _) = response { trace!("Received transaction {} at block {} from {}", tx_hash, hash, peer); @@ -2118,17 +2127,17 @@ impl P2pServer { // Assemble back the block and add it to the chain let block = Block::new(Immutable::Arc(header), transactions); - self.blockchain.add_new_block(block, false, false).await?; // don't broadcast block because it's syncing + self.blockchain.add_new_block(block, false, false).await?; // Don't broadcast block because it's syncing } } } } else { - // no rewind are needed, process normally - // it will first add blocks to sync, and then all alt-tips blocks if any (top blocks) + // Rewind is not needed, process normally. + // It will first add blocks to sync, and then all alt-tips blocks if any (top blocks). let mut total_requested: usize = 0; let mut final_blocker = None; // If boost sync is allowed, we can request all blocks in parallel, - // Create a new group in Object Tracker to be notified of a failure + // create a new group in Object Tracker to be notified of a failure. let (group_id, mut notifier) = if self.allow_boost_sync() { let (group_id, notifier) = self.object_tracker.get_group_manager().next_group_id().await; (Some(group_id), Some(notifier)) @@ -2136,18 +2145,18 @@ impl P2pServer { (None, None) }; - // Peekable is here to help to know if we are at the last element - // so we create only one channel for the last blocker + // Peekable is here to help know if we are at the last element, + // so we create only one channel for the last blocker. let mut blocks_iter = blocks.into_iter().peekable(); while let Some(hash) = blocks_iter.next() { if !self.blockchain.has_block(&hash).await? { trace!("Block {} is not found, asking it to {} (index = {})", hash, peer.get_outgoing_address(), total_requested); - // if it's allowed by the user, request all blocks in parallel + // If it's allowed by the user, request all blocks in parallel if self.allow_boost_sync() { if let Some(notifier) = &mut notifier { // Check if we don't have any message pending in the channel if let Ok(err) = notifier.try_recv() { - debug!("An error has occured in batch while requesting chain in boost mode"); + debug!("An error has occurred in batch while requesting chain in boost mode"); return Err(P2pError::BoostSyncModeFailed(Box::new(err)).into()); } } @@ -2201,7 +2210,7 @@ impl P2pServer { select! { res = &mut notifier => { let err = res.map_err(|e| P2pError::BoostSyncModeBlockerResponseError(e))?; - debug!("An error has occured while requesting chain in boost mode: {}", err); + debug!("An error has occurred while requesting chain in boost mode: {}", err); return Err(err.into()); }, res = blocker.recv() => match res { @@ -2224,11 +2233,11 @@ impl P2pServer { } let peer_topoheight = peer.get_topoheight(); - // ask inventory of this peer if we sync from too far - // if we are not further than one sync, request the inventory + // Ask for the inventory from this peer if we are synced from too far behind. + // If we are not further than one sync, request the inventory. if peer_topoheight > our_previous_topoheight && blocks_len < requested_max_size { let our_topoheight = self.blockchain.get_topo_height(); - // verify that we synced it partially well + // Verify that we synced it partially well if peer_topoheight >= our_topoheight && peer_topoheight - our_topoheight < STABLE_LIMIT { if let Err(e) = self.request_inventory_of(&peer).await { error!("Error while asking inventory to {}: {}", peer, e); @@ -2239,7 +2248,7 @@ impl P2pServer { Ok(()) } - // determine if we are connected to a priority node and that this node is equal / greater to our chain + // Determine if we are connected to a priority node and that this node is equal / greater to our chain async fn is_connected_to_a_synced_priority_node(&self) -> bool { let topoheight = self.blockchain.get_topo_height(); trace!("locking peer list for checking if connected to a synced priority node"); @@ -2302,7 +2311,8 @@ impl P2pServer { // Check if we are already connected to a socket address (IPv4 or IPv6) including its port pub async fn is_connected_to_addr(&self, peer_addr: &SocketAddr) -> bool { - if *peer_addr == *self.get_bind_address() { // don't try to connect to ourself + if *peer_addr == *self.get_bind_address() { + // Don't try to connect to ourself debug!("Trying to connect to ourself, ignoring."); return true } @@ -2310,7 +2320,7 @@ impl P2pServer { self.peer_list.is_connected_to_addr(peer_addr).await } - // get the socket address on which we are listening + // Get the socket address on which we are listening pub fn get_bind_address(&self) -> &SocketAddr { &self.bind_address } @@ -2320,30 +2330,30 @@ impl P2pServer { &self.peer_list } - // Broadcast a new transaction hash using propagation packet - // This is used so we don't overload the network during spam or high transactions count - // We simply share its hash to nodes and others nodes can check if they have it already or not + // Broadcast a new transaction hash using propagation packet. + // This is used so we don't overload the network during spam or high transactions count. + // We simply share its hash to nodes and others nodes can check if they have it already or not. pub async fn broadcast_tx_hash(&self, tx: Hash) { debug!("Broadcasting tx hash {}", tx); let ping = self.build_generic_ping_packet().await; debug!("Ping packet has been generated for tx broadcast"); let current_topoheight = ping.get_topoheight(); let packet = Packet::TransactionPropagation(PacketWrapper::new(Cow::Borrowed(&tx), Cow::Owned(ping))); - // transform packet to bytes (so we don't need to transform it for each peer) + // Transform packet to bytes (so we don't need to transform it for each peer) let bytes = Bytes::from(packet.to_bytes()); trace!("Locking peer list for tx broadcast"); let peers = self.peer_list.get_cloned_peers().await; trace!("Lock acquired for tx broadcast"); for peer in peers { - // check that the peer is not too far from us - // otherwise we may spam him for nothing + // Check that the peer is not too far from us, + // otherwise we may spam him for nothing. let peer_topoheight = peer.get_topoheight(); if (peer_topoheight >= current_topoheight && peer_topoheight - current_topoheight < STABLE_LIMIT) || (current_topoheight >= peer_topoheight && current_topoheight - peer_topoheight < STABLE_LIMIT) { trace!("Peer {} is not too far from us, checking cache for tx hash {}", peer, tx); let mut txs_cache = peer.get_txs_cache().lock().await; trace!("Cache locked for tx hash {}", tx); - // check that we didn't already send this tx to this peer or that he don't already have it + // Check that we haven't already sent this TX to the peer and that he doesn't already have it if !txs_cache.contains(&tx) { trace!("Broadcasting tx hash {} to {}", tx, peer); if let Err(e) = peer.send_bytes(bytes.clone()).await { @@ -2359,11 +2369,11 @@ impl P2pServer { } } - // broadcast block to all peers that can accept directly this new block + // Broadcast the block to all peers that can directly accept this new block pub async fn broadcast_block(&self, block: &BlockHeader, cumulative_difficulty: CumulativeDifficulty, our_topoheight: u64, our_height: u64, pruned_topoheight: Option, hash: &Hash, lock: bool) { debug!("Broadcasting block {} at height {}", hash, block.get_height()); - // we build the ping packet ourself this time (we have enough data for it) - // because this function can be call from Blockchain, which would lead to a deadlock + // We build the ping packet ourself this time (we have enough data for it) + // because this function can be call from Blockchain, which would lead to a deadlock. let ping = Ping::new(Cow::Borrowed(hash), our_topoheight, our_height, pruned_topoheight, cumulative_difficulty, IndexSet::new()); let block_packet = Packet::BlockPropagation(PacketWrapper::new(Cow::Borrowed(block), Cow::Borrowed(&ping))); let packet_block_bytes = Bytes::from(block_packet.to_bytes()); @@ -2372,22 +2382,23 @@ impl P2pServer { trace!("Locking peer list for broadcasting block {}", hash); trace!("start broadcasting block {} to all peers", hash); for peer in self.peer_list.get_cloned_peers().await { - // if the peer can directly accept this new block, send it + // If the peer can directly accept this new block, send it let peer_height = peer.get_height(); - // if the peer is not too far from us, send the block - // check that peer height is greater or equal to block height but still under or equal to STABLE_LIMIT - // or, check that peer height as difference of maximum 1 block - // (block height is always + 1 above the highest tip height, so we can just check that peer height is not above block height + 1, it's enough in 90% of time) - // chain can accept old blocks (up to STABLE_LIMIT) but new blocks only N+1 + // If the peer is not too far from us, send the block. + // Ensure the peer height is greater than or equal to the block height + // but still within STABLE_LIMIT, or check that the peer height is at most + // one block behind (block height is always +1 above the highest tip height, + // so checking if peer height is not above block height + 1 suffices in most cases). + // The chain can accept old blocks (up to STABLE_LIMIT), but new blocks only N+1. if (peer_height >= block.get_height() && peer_height - block.get_height() <= STABLE_LIMIT) || (peer_height <= block.get_height() && block.get_height() - peer_height <= 1) { trace!("locking blocks propagation for peer {}", peer); let mut blocks_propagation = peer.get_blocks_propagation().lock().await; trace!("end locking blocks propagation for peer {}", peer); - // check that this block was never shared with this peer + // Check that this block was never shared with this peer if !blocks_propagation.contains(hash) { - // we broadcasted to him, add it to the cache - // he should not send it back to us if it's a block found by us + // We broadcasted to him, add it to the cache. + // He should not send it back to us if it's a block found by us. blocks_propagation.put(hash.clone(), if lock { Direction::Both } else { Direction::Out }); debug!("Broadcast {} to {} (lock: {})", hash, peer, lock); @@ -2413,8 +2424,8 @@ impl P2pServer { } // Handle a bootstrap chain request - // We have differents steps available for a bootstrap sync - // We verify that they are send in good order + // We have different steps available for a bootstrap sync + // We verify that they are sent in the correct order. async fn handle_bootstrap_chain_request(self: &Arc, peer: &Arc, request: StepRequest<'_>) -> Result<(), BlockchainError> { let request_kind = request.kind(); debug!("Handle bootstrap chain request {:?} from {}", request_kind, peer); @@ -2504,7 +2515,7 @@ impl P2pServer { }, StepRequest::BlocksMetadata(topoheight) => { let mut blocks = IndexSet::with_capacity(PRUNE_SAFETY_LIMIT as usize); - // go from the lowest available point until the requested stable topoheight + // Go from the lowest available point until the requested stable topoheight let lower = if topoheight - PRUNE_SAFETY_LIMIT <= pruned_topoheight { pruned_topoheight + 1 } else { @@ -2528,16 +2539,16 @@ impl P2pServer { Ok(()) } - // Build a block id list to share our DAG order and chain state - // Block id list must be in descending order and unique hash / topoheight - // This is used to search the common point between two peers + // Build a block id list to share our DAG order and chain state. + // Block id list must be in descending order and unique hash / topoheight. + // This is used to search the common point between two peers. async fn build_list_of_blocks_id(&self, storage: &S) -> Result, BlockchainError> { let mut blocks = IndexSet::new(); let topoheight = self.blockchain.get_topo_height(); let pruned_topoheight = storage.get_pruned_topoheight().await?.unwrap_or(0); let mut i = 0; - // we add 1 for the genesis block added below + // We add 1 for the genesis block added below trace!("Building list of blocks id for {} blocks, pruned topo: {}", topoheight, pruned_topoheight); while i < topoheight && topoheight - i > pruned_topoheight && blocks.len() + 1 < CHAIN_SYNC_REQUEST_MAX_BLOCKS { let current_topo = topoheight - i; @@ -2552,15 +2563,15 @@ impl P2pServer { } } - // add genesis block + // Add genesis block let genesis_block = storage.get_hash_at_topo_height(0).await?; blocks.insert(BlockId::new(genesis_block, 0)); Ok(blocks) } - // Update all keys using bootstrap request - // This will fetch the nonce and associated balance for each asset + // Update all keys using bootstrap request. + // This will fetch the nonce and associated balance for each asset. async fn update_bootstrap_keys(&self, peer: &Arc, keys: &IndexSet, our_topoheight: u64, stable_topoheight: u64) -> Result<(), P2pError> { if keys.is_empty() { warn!("No keys to update"); @@ -2568,14 +2579,14 @@ impl P2pServer { } let StepResponse::Nonces(nonces) = peer.request_boostrap_chain(StepRequest::Nonces(stable_topoheight, Cow::Borrowed(&keys))).await? else { - // shouldn't happen + // Shouldn't happen error!("Received an invalid StepResponse (how ?) while fetching nonces"); return Err(P2pError::InvalidPacket.into()) }; { let mut storage = self.blockchain.get_storage().write().await; - // save all nonces + // Save all nonces for (key, nonce) in keys.iter().zip(nonces) { debug!("Saving nonce {} for {}", nonce, key.as_address(self.blockchain.get_network().is_mainnet())); storage.set_last_nonce_to(key, stable_topoheight, &VersionedNonce::new(nonce, None)).await?; @@ -2596,25 +2607,25 @@ impl P2pServer { assets }; - // Request every asset balances + // Request every asset balance for asset in assets { debug!("Requesting balances for asset {} at topo {}", asset, stable_topoheight); let StepResponse::Balances(balances) = peer.request_boostrap_chain(StepRequest::Balances(Cow::Borrowed(&keys), Cow::Borrowed(&asset), our_topoheight, stable_topoheight)).await? else { - // shouldn't happen + // Shouldn't happen error!("Received an invalid StepResponse (how ?) while fetching balances"); return Err(P2pError::InvalidPacket.into()) }; - // save all balances for this asset + // Save all balances for this asset for (key, balance) in keys.iter().zip(balances) { - // check that the account have balance for this asset + // Check that the account has a balance for this asset if let Some(account) = balance { debug!("Saving balance {} summary for {}", asset, key.as_address(self.blockchain.get_network().is_mainnet())); let ((stable_topo, stable), output) = account.as_versions(); let mut storage = self.blockchain.get_storage().write().await; storage.set_last_balance_to(key, &asset, stable_topo, &stable).await?; - // save the output balance if it's different from the stable one + // Save the output balance if it's different from the stable one if let Some((topo, output)) = output{ storage.set_balance_at_topoheight(&asset, topo, key, &output).await?; } @@ -2629,12 +2640,12 @@ impl P2pServer { Ok(()) } - // first, retrieve chain info of selected peer - // We retrieve all assets through pagination, - // then we fetch all keys with its nonces and its balances (also through pagination) - // and for the last step, retrieve last STABLE TOPOHEIGHT - PRUNE_SAFETY_LIMIT blocks - // reload blockchain cache from disk, and we're ready to sync the rest of the chain - // NOTE: it could be even faster without retrieving each TXs, but we do it in case user don't enable pruning + // First, retrieve chain info from the selected peer. + // Retrieve all assets through pagination. + // Fetch all keys with their nonces and balances, also through pagination. + // Finally, retrieve the last STABLE TOPOHEIGHT - PRUNE_SAFETY_LIMIT blocks. + // Reload blockchain cache from disk, and we're ready to sync the rest of the chain. + // NOTE: It could be faster without retrieving each TX, but we do it if pruning is not enabled. async fn bootstrap_chain(&self, peer: &Arc) -> Result<(), BlockchainError> { info!("Starting fast sync with {}", peer); @@ -2646,8 +2657,7 @@ impl P2pServer { Some(StepRequest::ChainInfo(self.build_list_of_blocks_id(&*storage).await?)) }; - // keep them in memory, we add them when we're syncing - // it's done to prevent any sync failure + // Keep them in memory and add them during syncing to prevent sync failure. let mut top_topoheight: u64 = 0; let mut top_height: u64 = 0; let mut top_block_hash: Option = None; @@ -2663,7 +2673,7 @@ impl P2pServer { step = match response { StepResponse::ChainInfo(common_point, topoheight, height, hash) => { - // first, check the common point in case we deviated from the chain + // First, check the common point in case we deviated from the chain if let Some(common_point) = common_point { let mut storage = self.blockchain.get_storage().write().await; debug!("Unverified common point found at {} with hash {}", common_point.get_topoheight(), common_point.get_hash()); @@ -2700,7 +2710,7 @@ impl P2pServer { Some(StepRequest::Assets(our_topoheight, topoheight, None)) }, - // fetch all assets from peer + // Fetch all assets from peer StepResponse::Assets(assets, next_page) => { { let mut storage = self.blockchain.get_storage().write().await; @@ -2722,13 +2732,12 @@ impl P2pServer { let storage = self.blockchain.get_storage().read().await; let keys = storage.get_registered_keys(MAX_ITEMS_PER_PAGE, 0, minimum_topoheight, our_topoheight).await?; - // Because the keys are sorted by topoheight, we can get the minimum topoheight - // of the last key to avoid fetching the same keys again - // We could use skip, but because update_bootstrap_keys can reorganize the keys, - // we may miss some - // This solution may also duplicate some keys - // We could do it in one request and store in memory all keys, - // but think about future and dozen of millions of accounts, in memory :) + // Keys are sorted by topoheight, so we get the minimum topoheight + // of the last key to avoid fetching the same keys again. + // Using skip might miss some keys because update_bootstrap_keys can reorganize the keys. + // This solution might duplicate some keys. We could fetch all in one + // request and store them in memory, but consider future scalability + // with potentially millions of accounts in memory :) if let Some(key) = keys.last() { minimum_topoheight = storage.get_account_registration_topoheight(key).await?; } else { @@ -2748,7 +2757,7 @@ impl P2pServer { Some(StepRequest::Keys(our_topoheight, stable_topoheight, None)) } }, - // fetch all new accounts + // Fetch all new accounts StepResponse::Keys(keys, next_page) => { debug!("Requesting nonces for keys"); self.update_bootstrap_keys(peer, &keys, our_topoheight, stable_topoheight).await?; @@ -2771,7 +2780,7 @@ impl P2pServer { for (i, metadata) in blocks.into_iter().enumerate() { let topoheight = stable_topoheight - i as u64; trace!("Processing block metadata {} at topoheight {}", metadata.hash, topoheight); - // check that we don't already have this block in storage + // Check that we don't already have this block in storage if self.blockchain.has_block(&metadata.hash).await? { warn!("Block {} at topo {} already in storage, skipping", metadata.hash, topoheight); continue; @@ -2801,20 +2810,20 @@ impl P2pServer { txs.push(tx); } - // link its TX to the block + // Link its TX to the block let mut storage = self.blockchain.get_storage().write().await; for tx_hash in header.get_txs_hashes() { storage.add_block_for_tx(tx_hash, &hash)?; } - // save metadata of this block + // Save metadata of this block storage.set_supply_at_topo_height(lowest_topoheight, metadata.supply)?; storage.set_block_reward_at_topo_height(lowest_topoheight, metadata.reward)?; storage.set_topo_height_for_block(&hash, lowest_topoheight).await?; storage.set_cumulative_difficulty_for_block_hash(&hash, metadata.cumulative_difficulty).await?; - // save the block with its transactions, difficulty + // Save the block with its transactions, difficulty storage.save_block(Arc::new(header), &txs, metadata.difficulty, metadata.p, hash).await?; } @@ -2837,7 +2846,8 @@ impl P2pServer { None }, - response => { // shouldn't happens + response => { + // Shouldn't happens error!("Received bootstrap chain response {:?} but didn't asked for it", response); return Err(P2pError::InvalidPacket.into()); } @@ -2849,8 +2859,8 @@ impl P2pServer { Ok(()) } - // Request the inventory of a peer - // This will sends him a request packet so we get notified of all its TXs hashes in its mempool + // Request the inventory of a peer. + // This will send him a request packet so we get notified of all its TXs hashes in its mempool. async fn request_inventory_of(&self, peer: &Arc) -> Result<(), BlockchainError> { debug!("Requesting inventory of {}", peer); let packet = Cow::Owned(NotifyInventoryRequest::new(None)); @@ -2860,16 +2870,16 @@ impl P2pServer { Ok(()) } - // this function basically send all our blocks based on topological order (topoheight) - // we send up to CHAIN_SYNC_REQUEST_MAX_BLOCKS blocks id (combinaison of block hash and topoheight) - // we add at the end the genesis block to be sure to be on the same chain as others peers - // its used to find a common point with the peer to which we ask the chain + // This function sends all our blocks in topological order (topoheight). + // We send up to CHAIN_SYNC_REQUEST_MAX_BLOCKS block IDs (combination of block hash and topoheight). + // The genesis block is included to ensure we align with other peers. + // It is used to find a common point with the peer when requesting the chain. pub async fn request_sync_chain_for(&self, peer: &Arc, last_chain_sync: &mut TimestampMillis, skip_stable_height_check: bool) -> Result<(), BlockchainError> { trace!("Requesting chain from {}", peer); - // This can be configured by the node operator, it will be adjusted between protocol bounds - // and based on peer configuration - // This will allow to boost-up syncing for those who want and can be used to use low resources for low devices + // This can be configured by the node operator and adjusted within protocol bounds + // and based on peer configuration. It allows for faster syncing for those who want it + // and can be used to conserve resources for lower-end devices. let requested_max_size = self.max_chain_response_size; let packet = { @@ -2914,8 +2924,8 @@ pub fn is_local_address(socket_addr: &SocketAddr) -> bool { } } -// Check if a socket address is a valid address -// Only public and private addresses that can be used in a network are considered valid +// Check if a socket address is a valid address. +// Only public and private addresses that can be used in a network are considered valid. pub fn is_valid_address(socket_addr: &SocketAddr) -> bool { match socket_addr.ip() { IpAddr::V4(ipv4) => { diff --git a/xelis_daemon/src/p2p/packet/mod.rs b/xelis_daemon/src/p2p/packet/mod.rs index df390b0b..fda58c7c 100644 --- a/xelis_daemon/src/p2p/packet/mod.rs +++ b/xelis_daemon/src/p2p/packet/mod.rs @@ -84,7 +84,7 @@ pub enum Packet<'a> { // packet contains tx hash, view this packet as a "notification" // instead of sending the TX directly, we notify our peers // so the peer that already have this TX in mempool don't have to read it again - // imo: can be useful when the network is spammed by alot of txs + // imo: can be useful when the network is spammed by a lot of txs TransactionPropagation(PacketWrapper<'a, Hash>), BlockPropagation(PacketWrapper<'a, BlockHeader>), ChainRequest(PacketWrapper<'a, ChainRequest>), diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index 4094285b..115b585d 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -62,8 +62,8 @@ use log::{ warn, }; -// A RequestedObjects is a map of all objects requested from a peer -// This is done to be awaitable with a timeout +// A RequestedObjects is a map of all objects requested from a peer. +// This is done to be awaitable with a timeout. pub type RequestedObjects = HashMap>; pub type Tx = mpsc::Sender; @@ -72,75 +72,75 @@ pub type Rx = mpsc::Receiver; // Enum used to track the state of a task #[derive(Clone, Copy, Debug, PartialEq)] pub enum TaskState { - // not started yet + // Not started yet Inactive, - // running + // Running Active, - // task has been cancelled + // Task has been cancelled Exiting, // Task has exited Finished, Unknown, } -// A Peer represents a connection to another node in the network -// It is used to propagate and receive blocks / transactions and do chain sync -// It contains all the necessary information to manage the connection and the communication +// A Peer represents a connection to another node in the network. +// It is used to propagate and receive blocks / transactions and sync the chain. +// It contains all the necessary information to manage the connection and communication. pub struct Peer { // Connection of the peer to manage read/write to TCP Stream connection: Connection, - // unique ID of the peer to recognize him + // Unique ID of the peer to recognize him id: u64, // Node tag if provided node_tag: Option, - // port on which the node is listening on its side + // Port on which the node is listening on its side local_port: u16, - // daemon version + // Daemon version version: String, - // if this node can be trusted (seed node or added manually by user) + // If this node can be trusted (seed node or added manually by user) priority: bool, - // current block top hash for this peer + // Current block top hash for this peer top_hash: Mutex, - // current highest topo height for this peer + // Current highest topo height for this peer topoheight: AtomicU64, - // current highest block height for this peer + // Current highest block height for this peer height: AtomicU64, - // last time we got a chain request + // Last time we got a chain request last_chain_sync: AtomicU64, - // last time we got a fail + // Last time we got a fail last_fail_count: AtomicU64, - // fail count: if greater than 20, we should close this connection + // Fail count: if greater than 20, we should close this connection fail_count: AtomicU8, - // shared pointer to the peer list in case of disconnection + // Shared pointer to the peer list in case of disconnection peer_list: SharedPeerList, - // map of requested objects from this peer + // Map of requested objects from this peer objects_requested: Mutex, - // all peers sent/received + // All peers sent/received peers: Mutex>, - // last time we received a peerlist from this peer + // Last time we received a peerlist from this peer last_peer_list: AtomicU64, - // last time we got a ping packet from this peer + // Last time we got a ping packet from this peer last_ping: AtomicU64, - // last time we sent a ping packet to this peer + // Last time we sent a ping packet to this peer last_ping_sent: AtomicU64, - // cumulative difficulty of peer chain + // Cumulative difficulty of peer chain cumulative_difficulty: Mutex, // All transactions propagated from/to this peer txs_cache: Mutex>, - // last blocks propagated to/from this peer + // Last blocks propagated to/from this peer blocks_propagation: Mutex>, - // last time we got an inventory packet from this peer + // Last time we got an inventory packet from this peer last_inventory: AtomicU64, - // if we requested this peer to send us an inventory notification + // If we requested this peer to send us an inventory notification requested_inventory: AtomicBool, - // pruned topoheight if its a pruned node + // Pruned topoheight if its a pruned node pruned_topoheight: AtomicU64, - // Store the pruned state of the peer - // cannot be set to false if its already to true (protocol rules) + // Store the pruned state of the peer. + // Cannot be set to false if its already to true (protocol rules). is_pruned: AtomicBool, - // used for await on bootstrap chain packets + // Used for await on bootstrap chain packets bootstrap_chain: Mutex>>, - // used to wait on chain response when syncing chain + // Used to wait on chain response when syncing chain sync_chain: Mutex>>, // IP address with local port outgoing_address: SocketAddr, @@ -309,8 +309,8 @@ impl Peer { &self.cumulative_difficulty } - // Store the cumulative difficulty - // This is updated by ping packet + // Store the cumulative difficulty. + // This is updated by ping packet. pub async fn set_cumulative_difficulty(&self, cumulative_difficulty: CumulativeDifficulty) { *self.cumulative_difficulty.lock().await = cumulative_difficulty; } @@ -320,8 +320,8 @@ impl Peer { self.connection.is_out() } - // Get the priority flag of the peer - // If the peer is a seed node or added manually by the user, it should be trusted + // Get the priority flag of the peer. + // If the peer is a seed node or added manually by the user, it should be trusted. pub fn is_priority(&self) -> bool { self.priority } @@ -346,9 +346,9 @@ impl Peer { self.fail_count.load(Ordering::Acquire) } - // Update the fail count of the peer - // This is used by display to have up-to-date data - // We don't add anything, just reset the counter if its long time we didn't get a fail + // Update the fail count of the peer. + // This is used by the display to have up-to-date data. + // We don't increment the counter; instead, we reset it if a long time has passed since the last failure. fn update_fail_count_default(&self) -> bool { self.update_fail_count(get_current_time_in_seconds(), 0) } @@ -364,21 +364,21 @@ impl Peer { reset } - // Increment the fail count of the peer - // This is used to track the number of times we failed to communicate with the peer - // If the fail count is greater than 20, we should close the connection + // Increment the fail count of the peer. + // This is used to track the number of times we failed to communicate with the peer. + // If the fail count is greater than 20, we should close the connection. pub fn increment_fail_count(&self) { let current_time = get_current_time_in_seconds(); - // if its long time we didn't get a fail, reset the fail count to 1 (because of current fail) - // otherwise, add 1 + // If its long time we didn't get a fail, reset the fail count to 1 (because of current fail). + // Otherwise, add 1. if !self.update_fail_count(current_time, 1) { self.fail_count.fetch_add(1, Ordering::Release); } self.set_last_fail_count(current_time); } - // Get the last time we got a chain sync request - // This is used to prevent spamming the chain sync packet + // Get the last time we got a chain sync request. + // This is used to prevent spamming the chain sync packet. pub fn get_last_chain_sync(&self) -> TimestampSeconds { self.last_chain_sync.load(Ordering::Acquire) } @@ -415,7 +415,7 @@ impl Peer { } self.send_packet(Packet::ObjectRequest(Cow::Borrowed(&request))).await?; let (sender, receiver) = tokio::sync::oneshot::channel(); - objects.insert(request.clone(), sender); // clone is necessary in case timeout has occured + objects.insert(request.clone(), sender); // clone is necessary in case timeout has occurred receiver }; let object = match timeout(Duration::from_millis(PEER_TIMEOUT_REQUEST_OBJECT), receiver).await { @@ -452,10 +452,10 @@ impl Peer { *sender_lock = Some(sender); } - // send the packet + // Send the packet self.send_packet(Packet::BootstrapChainRequest(BootstrapChainRequest::new(step))).await?; - // wait on the response + // Wait on the response let response: StepResponse = match timeout(Duration::from_millis(PEER_TIMEOUT_BOOTSTRAP_STEP), receiver).await { Ok(res) => res?, Err(e) => { @@ -464,7 +464,7 @@ impl Peer { } }; - // check that the response is what we asked for + // Check that the response is what we asked for let response_kind = response.kind(); if response_kind != step_kind { return Err(P2pError::InvalidBootstrapStep(step_kind, response_kind)) @@ -497,14 +497,14 @@ impl Peer { Ok(response) } - // Get the bootstrap chain channel - // Like the sync chain channel, but for bootstrap (fast sync) syncing + // Get the bootstrap chain channel. + // Like the sync chain channel, but for bootstrap (fast sync) syncing. pub fn get_bootstrap_chain_channel(&self) -> &Mutex>> { &self.bootstrap_chain } - // Get the sync chain channel - // This is used for chain sync requests to be fully awaited + // Get the sync chain channel. + // This is used for chain sync requests to be fully awaited. pub fn get_sync_chain_channel(&self) -> &Mutex>> { &self.sync_chain } @@ -519,8 +519,8 @@ impl Peer { self.last_peer_list.load(Ordering::Acquire) } - // Track the last time we got a peer list - // This is used to prevent spamming the peer list + // Track the last time we got a peer list. + // This is used to prevent spamming the peer list. pub fn set_last_peer_list(&self, value: TimestampSeconds) { self.last_peer_list.store(value, Ordering::Release) } @@ -565,8 +565,8 @@ impl Peer { self.requested_inventory.store(value, Ordering::Release) } - // Get the outgoing address of the peer - // This represents the IP address of the peer and the port on which it is listening + // Get the outgoing address of the peer. + // This represents the IP address of the peer and the port on which it is listening. pub fn get_outgoing_address(&self) -> &SocketAddr { &self.outgoing_address } @@ -591,8 +591,8 @@ impl Peer { Ok(()) } - // Signal the exit of the peer to the tasks - // This is listened by write task to close the connection + // Signal the exit of the peer to the tasks. + // This is listened by write task to close the connection. pub async fn signal_exit(&self) -> Result<(), P2pError> { self.exit_channel.send(()) .map_err(|e| P2pError::SendError(e.to_string()))?; @@ -612,14 +612,14 @@ impl Peer { Ok(()) } - // Send a packet to the peer - // This will transform the packet into bytes and send it to the peer + // Send a packet to the peer. + // This will transform the packet into bytes and send it to the peer. pub async fn send_packet(&self, packet: Packet<'_>) -> Result<(), P2pError> { self.send_bytes(Bytes::from(packet.to_bytes())).await } - // Send packet bytes to the peer - // This will send the bytes to the writer task through its channel + // Send packet bytes to the peer. + // This will send the bytes to the writer task through its channel. pub async fn send_bytes(&self, bytes: Bytes) -> Result<(), P2pError> { self.tx.send(bytes).await .map_err(|e| P2pError::SendError(e.to_string())) @@ -644,7 +644,7 @@ impl Peer { impl Display for Peer { fn fmt(&self, f: &mut Formatter<'_>) -> std::result::Result<(), Error> { - // update fail counter to have up-to-date data to display + // Update fail counter to have up-to-date data to display self.update_fail_count_default(); let peers = if let Ok(peers) = self.get_peers().try_lock() { if log_enabled!(Level::Debug) { diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index cbe5cded..eeb64041 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -28,19 +28,19 @@ use log::{info, debug, trace, error, warn}; pub type SharedPeerList = Arc; -// this object will be shared in Server, and each Peer -// so when we call Peer#close it will remove it from the list too -// using a RwLock so we can have multiple readers at the same time +// This object is shared between the Server and each Peer. +// When Peer#close is called, it removes the peer from the list. +// Using a RwLock allows multiple readers simultaneously. pub struct PeerList { // Keep track of all connected peers peers: RwLock>>, // We only keep one "peer" per address in case the peer changes multiple - // times its local port + // times its local port. stored_peers: RwLock>, filename: String, - // used to notify the server that a peer disconnected - // this is done through a channel to not have to handle generic types - // and to be flexible in the future + // Used to notify the server that a peer disconnected. + // This is done through a channel to not have to handle generic types + // and to be flexible in the future. peer_disconnect_channel: Option>> } @@ -64,18 +64,18 @@ pub struct StoredPeer { } impl PeerList { - // load all the stored peers from the file + // Load all the stored peers from the file fn load_stored_peers(filename: &String) -> Result, P2pError> { - // check that the file exists + // Check that the file exists if fs::metadata(filename).is_err() { info!("Peerlist file not found, creating a new one"); let peers = HashMap::new(); - // write empty set in file + // Write empty set in file fs::write(filename, serde_json::to_string_pretty(&peers)?)?; return Ok(peers); } - // read the whole file + // Read the whole file let content = match fs::read_to_string(filename) { Ok(content) => content, Err(e) => { @@ -90,7 +90,7 @@ impl PeerList { } }; - // deserialize the content + // Deserialize the content let mut peers: HashMap = match serde_json::from_str(&content) { Ok(peers) => peers, Err(e) => { @@ -98,14 +98,14 @@ impl PeerList { warn!("Removing peerlist file and creating a new empty one"); fs::remove_file(filename)?; let peers = HashMap::new(); - // write empty set in file + // Write empty set in file fs::write(filename, serde_json::to_string_pretty(&peers)?)?; peers } }; - // reset the fail count of all whitelisted peers + // Reset the fail count of all whitelisted peers for stored_peer in peers.values_mut() { if *stored_peer.get_state() == StoredPeerState::Whitelist { stored_peer.fail_count = 0; @@ -146,8 +146,8 @@ impl PeerList { } } - // Remove a peer from the list - // We will notify all peers that have this peer in common + // Remove a peer from the list. + // We will notify all peers that have this peer in common. pub async fn remove_peer(&self, peer_id: u64, notify: bool) -> Result<(), P2pError> { let (peer, peers) = { let mut peers = self.peers.write().await; @@ -158,7 +158,7 @@ impl PeerList { // If peer allows us to share it, we have to notify all peers that have this peer in common if notify && peer.sharable() { - // now remove this peer from all peers that tracked it + // Now remove this peer from all peers that tracked it let addr = peer.get_outgoing_address(); let packet = Bytes::from(Packet::PeerDisconnected(PacketPeerDisconnected::new(*addr)).to_bytes()); for peer in peers { @@ -166,13 +166,13 @@ impl PeerList { let mut shared_peers = peer.get_peers().lock().await; trace!("locked shared peers for {}", peer.get_connection().get_address()); - // check if it was a common peer (we sent it and we received it) - // Because its a common peer, we can expect that he will send us the same packet + // Check if the peer was common (we both sent and received it). + // Since it's a common peer, we expect it to send us the same packet. if let Some(direction) = shared_peers.get(addr) { // If its a outgoing direction, send a packet to notify that the peer disconnected if *direction != Direction::In { trace!("Sending PeerDisconnected packet to peer {} for {}", peer.get_outgoing_address(), addr); - // we send the packet to notify the peer that we don't have it in common anymore + // We send the packet to notify the peer that we don't have it in common anymore if let Err(e) = peer.send_bytes(packet.clone()).await { error!("Error while trying to send PeerDisconnected packet to peer {}: {}", peer.get_connection().get_address(), e); } @@ -195,8 +195,8 @@ impl PeerList { Ok(()) } - // Add a new peer to the list - // This will returns an error if peerlist is full + // Add a new peer to the list. + // This will returns an error if peerlist is full. pub async fn add_peer(&self, peer: &Arc, max_peers: usize) -> Result<(), P2pError> { { let mut peers = self.peers.write().await; @@ -223,7 +223,7 @@ impl PeerList { let mut stored_peers = self.stored_peers.write().await; if let Some(stored_peer) = stored_peers.get_mut(&ip) { debug!("Updating {} in stored peerlist", peer); - // reset the fail count and update the last seen time + // Reset the fail count and update the last seen time stored_peer.set_fail_count(0); stored_peer.set_last_seen(get_current_time_in_seconds()); stored_peer.set_local_port(peer.get_local_port()); @@ -324,10 +324,10 @@ impl PeerList { } } - // get a peer by its address + // Get a peer by its address fn internal_get_peer_by_addr<'a>(peers: &'a HashMap>, addr: &SocketAddr) -> Option<&'a Arc> { peers.values().find(|peer| { - // check both SocketAddr (the outgoing and the incoming) + // Check both SocketAddr (the outgoing and the incoming) peer.get_connection().get_address() == addr || peer.get_outgoing_address() == addr }) } @@ -386,7 +386,7 @@ impl PeerList { } // Set a peer to graylist, if its local port is 0, delete it from the stored peerlist - // Because it was added manually and never connected to before + // because it was added manually and never connected to before. pub async fn set_graylist_for_peer(&self, ip: &IpAddr) { let mut stored_peers = self.stored_peers.write().await; let delete = if let Some(peer) = stored_peers.get_mut(ip) { @@ -416,10 +416,10 @@ impl PeerList { self.get_list_with_state(stored_peers, &StoredPeerState::Whitelist) } - // blacklist a peer address - // if this peer is already known, change its state to blacklist - // otherwise create a new StoredPeer with state blacklist - // disconnect the peer if present in peerlist + // Blacklist a peer address. + // If this peer is already known, change its state to blacklist. + // Otherwise create a new StoredPeer with state blacklist. + // Disconnect the peer if present in peerlist. pub async fn blacklist_address(&self, ip: &IpAddr) { self.set_state_to_address(ip, StoredPeerState::Blacklist).await; @@ -435,8 +435,8 @@ impl PeerList { } } - // temp ban a peer for a duration in seconds - // this will also close the peer + // Temp ban a peer for a duration in seconds. + // This will also close the peer. pub async fn temp_ban_peer(&self, peer: &Peer, seconds: u64) { self.temp_ban_address(&peer.get_connection().get_address().ip(), seconds).await; if let Err(e) = peer.get_connection().close().await { @@ -448,7 +448,7 @@ impl PeerList { } } - // temp ban a peer address for a duration in seconds + // Temp ban a peer address for a duration in seconds pub async fn temp_ban_address(&self, ip: &IpAddr, seconds: u64) { let mut stored_peers = self.stored_peers.write().await; if let Some(stored_peer) = stored_peers.get_mut(ip) { @@ -458,26 +458,26 @@ impl PeerList { } } - // whitelist a peer address - // if this peer is already known, change its state to whitelist - // otherwise create a new StoredPeer with state whitelist + // Whitelist a peer address. + // If this peer is already known, change its state to whitelist. + // Otherwise create a new StoredPeer with state whitelist. pub async fn whitelist_address(&self, ip: &IpAddr) { self.set_state_to_address(ip, StoredPeerState::Whitelist).await; } pub async fn find_peer_to_connect(&self) -> Option { - // remove all peers that have a high fail count + // Remove all peers that have a high fail count let peers = self.peers.read().await; let mut stored_peers = self.stored_peers.write().await; stored_peers.retain(|_, stored_peer| *stored_peer.get_state() == StoredPeerState::Whitelist || stored_peer.get_fail_count() < PEER_FAIL_LIMIT); let current_time = get_current_time_in_seconds(); - // first lets check in whitelist + // First lets check in whitelist if let Some(addr) = self.find_peer_to_connect_to_with_state(&peers, &mut stored_peers, current_time, StoredPeerState::Whitelist) { return Some(addr); } - // then in graylist + // Then in graylist if let Some(addr) = self.find_peer_to_connect_to_with_state(&peers, &mut stored_peers, current_time, StoredPeerState::Graylist) { return Some(addr); } @@ -485,8 +485,8 @@ impl PeerList { None } - // find among stored peers a peer to connect to with the requested StoredPeerState - // we check that we're not already connected to this peer and that we didn't tried to connect to it recently + // Find a peer among the stored peers to connect to with the requested StoredPeerState. + // We check that we're not already connected to this peer and that we haven't tried to connect to it recently. fn find_peer_to_connect_to_with_state(&self, peers: &HashMap>, stored_peers: &mut HashMap, current_time: TimestampSeconds, state: StoredPeerState) -> Option { for (ip, stored_peer) in stored_peers { let addr = SocketAddr::new(*ip, stored_peer.get_local_port()); @@ -499,7 +499,7 @@ impl PeerList { None } - // increase the fail count of a peer + // Increase the fail count of a peer pub async fn increase_fail_count_for_stored_peer(&self, ip: &IpAddr, temp_ban: bool) { trace!("increasing fail count for {}, allow temp ban: {}", ip, temp_ban); let mut stored_peers = self.stored_peers.write().await; @@ -534,7 +534,7 @@ impl PeerList { true } - // serialize the stored peers to a file + // Serialize the stored peers to a file fn save_peers_to_file(&self, stored_peers: &HashMap) -> Result<(), P2pError> { trace!("saving peerlist to file"); let content = serde_json::to_string_pretty(&stored_peers)?; diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index 37b3ec08..8147f8a8 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -236,21 +236,20 @@ impl ExpirableCache { } } -// this ObjectTracker is a unique sender allows to create a queue system in one task only -// currently used to fetch in order all txs propagated by the network +// This ObjectTracker is a unique sender that creates a queue system within a single task. +// Currently used to fetch all txs propagated by the network in order. pub struct ObjectTracker { - // This is used to send the request to the requester task loop - // it is a bounded channel, so if the queue is full, it will block the sender + // This is used to send the request to the requester task loop. + // It is a bounded channel, so if the queue is full, it will block the sender. request_sender: Sender, // This is used to send the response to the handler task loop handler_sender: Sender, - // queue of requests with preserved order + // Queue of requests with preserved order queue: RwLock>, - // Group Manager for batched requests - // If one fail, all the group is removed + // Group Manager for batched requests. + // If one fail, all the group is removed. group: GroupManager, - // Requests that should be ignored - // They got canceled but already requested + // Requests that should be ignored as they were canceled but already requested cache: ExpirableCache } @@ -276,7 +275,7 @@ impl ObjectTracker { cache: ExpirableCache::new() }); - // start the requester task loop which send requests to peers + // Start the requester task loop which send requests to peers { let server_exit = server_exit.resubscribe(); let zelf = zelf.clone(); @@ -285,7 +284,7 @@ impl ObjectTracker { }); } - // start the handler task loop which handle the responses based on request queue order + // Start the handler task loop which handle the responses based on request queue order { let server_exit = server_exit.resubscribe(); let zelf = zelf.clone(); @@ -366,7 +365,7 @@ impl ObjectTracker { request.set_response(response); } } else { - // channel closed + // Channel closed break; } }, @@ -376,9 +375,9 @@ impl ObjectTracker { } } - // Loop through the queue in a ordered way to handle correctly the responses - // For this, we need to check if the first element has a response and so on - // If we don't have a response during too much time, we remove the request from the queue as it is probably timed out + // Loop through the queue in an ordered way to correctly handle the responses. + // For this, we need to check if the first element has a response and so on. + // If a response is not received within a certain time, remove the request from the queue as it has probably timed out. let mut queue = self.queue.write().await; while let Some((_, request)) = queue.peek_mut() { match request.take_response() { @@ -393,7 +392,7 @@ impl ObjectTracker { }, None => { if let Some(requested_at) = request.get_requested() { - // check if the request is timed out + // Check if the request is timed out if requested_at.elapsed() > TIME_OUT { warn!("Request timed out for object {}", request.get_hash()); let (_, request) = queue.pop().unwrap(); @@ -402,7 +401,7 @@ impl ObjectTracker { break; } } else { - // It wasn't yet requested + // The request hasn't been sent yet break; } } @@ -445,8 +444,8 @@ impl ObjectTracker { Some(request.get_response_blocker()) } - // This function is called from P2p Server when a peer sends an object response that we requested - // It will pass the response to the handler task loop + // This function is called from P2p Server when a peer sends an object response that we requested. + // It will pass the response to the handler task loop. pub async fn handle_object_response(&self, response: OwnedObjectResponse) -> Result<(), P2pError> { { let queue = self.queue.read().await; @@ -496,7 +495,7 @@ impl ObjectTracker { (listener, hash) }; - trace!("Transfering object request {} to task", hash); + trace!("Transferring object request {} to task", hash); self.request_sender.send(hash).await?; Ok(listener) } @@ -542,8 +541,8 @@ impl ObjectTracker { } } - // Request the object from the peer - // This is called from the requester task loop + // Request the object from the peer. + // This is called from the requester task loop. async fn request_object_from_peer_internal(&self, request_hash: Hash) { debug!("Requesting object with hash {}", request_hash); let mut queue = self.queue.write().await; @@ -551,7 +550,7 @@ impl ObjectTracker { let fail = if let Some(request) = queue.get_mut(&request_hash) { request.set_requested(); let packet = Bytes::from(Packet::ObjectRequest(Cow::Borrowed(request.get_object())).to_bytes()); - // send the packet to the Peer + // Send the packet to the Peer let peer = request.get_peer(); if peer.get_connection().is_closed() { warn!("Peer {} is disconnected but still has a pending request object {}", peer, request_hash); diff --git a/xelis_daemon/src/rpc/getwork_server.rs b/xelis_daemon/src/rpc/getwork_server.rs index 513566de..472d3661 100644 --- a/xelis_daemon/src/rpc/getwork_server.rs +++ b/xelis_daemon/src/rpc/getwork_server.rs @@ -91,17 +91,17 @@ impl TMessage for Response { pub struct Miner { // Used to display correctly its address mainnet: bool, - // timestamp of first connection + // Timestamp of first connection first_seen: TimestampMillis, - // public key of account (address) + // Public key of account (address) key: PublicKey, - // worker name + // Worker name name: String, - // blocks accepted by us since he is connected + // Blocks accepted by us miner has is connected blocks_accepted: IndexSet, - // blocks rejected since he is connected + // Blocks rejected since miner has connected blocks_rejected: usize, - // timestamp of the last invalid block received + // Timestamp of the last invalid block received last_invalid_block: TimestampMillis } @@ -210,12 +210,12 @@ impl Handler for GetWorkWebSocketHandler { pub struct GetWorkServer { miners: Mutex>, Miner>>, blockchain: Arc>, - // all potential jobs sent to miners - // we can keep them in cache up to STABLE_LIMIT blocks - // so even a late miner have a chance to not be orphaned and be included in chain + // All potential jobs sent to miners. + // We can keep them in cache up to STABLE_LIMIT blocks, + // so even a late miner have a chance to not be orphaned and be included in chain. mining_jobs: Mutex>, last_header_hash: Mutex>, - // used only when a new TX is received in mempool + // Used only when a new TX is received in mempool last_notify: AtomicU64, notify_rate_limit_ms: u64 } @@ -228,7 +228,7 @@ impl GetWorkServer { mining_jobs: Mutex::new(LruCache::new(NonZeroUsize::new(STABLE_LIMIT as usize).unwrap())), last_header_hash: Mutex::new(None), last_notify: AtomicU64::new(0), - notify_rate_limit_ms: 500 // maximum one time every 500ms + notify_rate_limit_ms: 500 // Maximum one time every 500ms } } @@ -244,16 +244,16 @@ impl GetWorkServer { &self.miners } - // retrieve last mining job and set random extra nonce and miner public key - // then, send it + // Retrieve last mining job, set random extra nonce and miner public key, + // then send it. async fn send_new_job(self: Arc, addr: Addr>, key: PublicKey) -> Result<(), InternalRpcError> { debug!("Sending new job to miner"); let (mut job, version, height, difficulty) = { let mut hash = self.last_header_hash.lock().await; let mut mining_jobs = self.mining_jobs.lock().await; let (version, job, height, difficulty); - // if we have a job in cache, and we are rate limited, we can send it - // otherwise, we generate a new job + // If we have a job in cache, and we are rate limited, we can send it. + // Otherwise, we generate a new job. if let Some(hash) = hash.as_ref().filter(|_| self.is_rate_limited().0) { let (header, diff) = mining_jobs.peek(hash).ok_or_else(|| { error!("No mining job found! How is it possible ?"); @@ -264,7 +264,7 @@ impl GetWorkServer { version = header.get_version(); difficulty = *diff; } else { - // generate a mining job + // Generate a mining job let storage = self.blockchain.get_storage().read().await; let header = self.blockchain.get_block_template_for_storage(&storage, DEV_PUBLIC_KEY.clone()).await.context("Error while retrieving block template")?; (difficulty, _) = self.blockchain.get_difficulty_at_tips(&*storage, header.get_tips().iter()).await.context("Error while retrieving difficulty at tips")?; @@ -273,7 +273,7 @@ impl GetWorkServer { height = header.get_height(); version = header.get_version(); - // save the mining job, and set it as last job + // Save the mining job, and set it as last job let header_work_hash = job.get_header_work_hash(); *hash = Some(header_work_hash.clone()); mining_jobs.put(header_work_hash.clone(), (header, difficulty)); @@ -282,11 +282,11 @@ impl GetWorkServer { (job, version, height, difficulty) }; - // set miner key and random extra nonce + // Set miner key and random extra nonce job.set_miner(Cow::Owned(key)); OsRng.fill_bytes(job.get_extra_nonce()); - // get the algorithm for the current version + // Get the algorithm for the current version let algorithm = get_pow_algorithm_for_version(version); let topoheight = self.blockchain.get_topo_height(); debug!("Sending job to new miner"); @@ -303,7 +303,7 @@ impl GetWorkServer { miners.insert(addr.clone(), miner); } - // notify the new miner so he can work ASAP + // Notify the new miner so he can work ASAP let zelf = Arc::clone(&self); spawn_task("getwork-new-job", async move { if let Err(e) = zelf.send_new_job(addr, key).await { @@ -320,10 +320,10 @@ impl GetWorkServer { } } - // this function is called when a miner send a new block - // we retrieve the block header saved in cache using the mining job "header_work_hash" - // its used to check that the job come from our server - // when it's found, we merge the miner job inside the block header + // This function is called when a miner submits a new block. + // We retrieve the block header saved in cache using the mining job's "header_work_hash". + // Its used to check that the job came from our server. + // When it's found, we merge the miner's job inside the block header. async fn accept_miner_job(&self, job: MinerWork<'_>) -> Result<(Response, Hash), InternalRpcError> { trace!("accept miner job"); if job.get_miner().is_none() { @@ -334,11 +334,11 @@ impl GetWorkServer { { let mining_jobs = self.mining_jobs.lock().await; if let Some((header, _)) = mining_jobs.peek(job.get_header_work_hash()) { - // job is found in cache, clone it and put miner data inside + // Job is found in cache, clone it and put miner data inside miner_header = header.clone(); miner_header.apply_miner_work(job); } else { - // really old job, or miner send invalid job + // Really old job, or miner send invalid job debug!("Job {} was not found in cache", job.get_header_work_hash()); return Err(InternalRpcError::InvalidParams("Job was not found in cache")) }; @@ -355,9 +355,9 @@ impl GetWorkServer { }) } - // handle the incoming mining job from the miner - // decode the block miner, and using its header work hash, retrieve the block header - // if its block is rejected, resend him the job + // Handle the incoming mining job from the miner + // Decode the block miner, and using its header work hash, retrieve the block header. + // If its block is rejected, resend the job to the miner. pub async fn handle_block_for(self: Arc, addr: Addr>, submitted_work: SubmitMinerWorkParams) { trace!("handle block for"); let (response, hash) = match MinerWork::from_hex(submitted_work.miner_work) { @@ -374,7 +374,7 @@ impl GetWorkServer { } }; - // update miner stats + // Update miner stats { let mut miners = self.miners.lock().await; if let Some(miner) = miners.get_mut(&addr) { @@ -425,17 +425,16 @@ impl GetWorkServer { }); } - // check if the last notify is older than the rate limit - // if it's the case, we can notify miners - // Returns a tuple with a boolean indicating if the rate limit is reached, and the current timestamp + // Check if the last notify is older than the rate limit. + // If thats the case, we can notify miners. + // Returns a tuple with a boolean indicating if the rate limit is reached, and the current timestamp. fn is_rate_limited(&self) -> (bool, TimestampMillis) { let now = get_current_time_in_millis(); let last_notify = self.last_notify.load(Ordering::SeqCst); (now - last_notify < self.notify_rate_limit_ms, now) } - // notify every miners connected to the getwork server - // each miner have his own task so nobody wait on other + // If the rate limit has not yet been reached, notify the miners pub async fn notify_new_job_rate_limited(&self) -> Result<(), InternalRpcError> { let (rate_limit_reached, now) = self.is_rate_limited(); if rate_limit_reached { @@ -447,12 +446,12 @@ impl GetWorkServer { self.notify_new_job().await } - // notify every miners connected to the getwork server - // each miner have his own task so nobody wait on other + // Notify every miner connected to the getwork server. + // Each miner has its own task, so no miner waits for others. pub async fn notify_new_job(&self) -> Result<(), InternalRpcError> { trace!("notify new job"); - // Check that there is at least one miner connected - // otherwise, no need to build a new job + // Check that there is at least one miner connected. + // Otherwise, no need to build a new job. { let miners = self.miners.lock().await; if miners.is_empty() { @@ -473,7 +472,7 @@ impl GetWorkServer { let height = header.get_height(); let version = header.get_version(); - // save the header used for job in cache + // Save the header used for job in cache { let header_work_hash = job.get_header_work_hash(); let mut last_header_hash = self.last_header_hash.lock().await; @@ -482,14 +481,14 @@ impl GetWorkServer { mining_jobs.put(header_work_hash.clone(), (header, difficulty)); } - // now let's send the job to every miner + // Now let's send the job to every miner let mut miners = self.miners.lock().await; miners.retain(|addr, _| addr.connected()); - // get the algorithm for the current version + // Get the algorithm for the current version let algorithm = get_pow_algorithm_for_version(version); - // Also send the node topoheight to miners - // This is for visual purposes only + // Send the node topoheight to miners. + // This is for visual purposes only. let topoheight = self.blockchain.get_topo_height(); for (addr, miner) in miners.iter() { @@ -500,8 +499,8 @@ impl GetWorkServer { OsRng.fill_bytes(job.get_extra_nonce()); let template = job.to_hex(); - // New task for each miner in case a miner is slow - // we don't want to wait for him + // Create a new task for each miner to avoid delays. + // This ensures that we don't have to wait for a slow miner. spawn_task("getwork-notify-new-job", async move { match addr.send(Response::NewJob(GetMinerWorkResult { algorithm, miner_work: template, height, topoheight, difficulty })).await { Ok(request) => { diff --git a/xelis_daemon/src/rpc/mod.rs b/xelis_daemon/src/rpc/mod.rs index c739c40b..8479527c 100644 --- a/xelis_daemon/src/rpc/mod.rs +++ b/xelis_daemon/src/rpc/mod.rs @@ -92,11 +92,11 @@ impl DaemonRpcServer { None }; - // create the RPC Handler which will register and contains all available methods + // Create the RPC Handler which will register and contains all available methods let mut rpc_handler = RPCHandler::new(blockchain); rpc::register_methods(&mut rpc_handler, !disable_getwork_server); - // create the default websocket server (support event & rpc methods) + // Create the default websocket server (support event & rpc methods) let ws = WebSocketServer::new(EventWebSocketHandler::new(rpc_handler)); let server = Arc::new(Self { @@ -121,7 +121,7 @@ impl DaemonRpcServer { .bind(&bind_address)? .run(); - { // save the server handle to be able to stop it later + { // Save the server handle to be able to stop it later let handle = http_server.handle(); let mut lock = server.handle.lock().await; *lock = Some(handle); @@ -215,6 +215,6 @@ async fn getwork_endpoint(server: Data>, request: getwork.add_miner(addr, key, worker).await; Ok(response) }, - None => Ok(HttpResponse::NotFound().reason("GetWork server is not enabled").finish()) // getwork server is not started + None => Ok(HttpResponse::NotFound().reason("GetWork server is not enabled").finish()) // Getwork server is not started } } \ No newline at end of file diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 6f38f487..045ef064 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -105,8 +105,8 @@ pub async fn get_block_response(blockchain: &Blockchain, storage: let mut total_fees = 0; if block_type != BlockType::Orphaned { for (tx, tx_hash) in block.get_transactions().iter().zip(block.get_txs_hashes()) { - // check that the TX was correctly executed in this block - // retrieve all fees for valid txs + // Check that the TX was correctly executed in this block. + // Retrieve all fees for valid txs. if storage.is_tx_executed_in_block(tx_hash, &hash).context("Error while checking if tx was executed")? { total_fees += tx.get_fee(); } @@ -181,7 +181,7 @@ pub async fn get_block_response_for_hash(blockchain: &Blockchain, let (topoheight, supply, reward, block_type, cumulative_difficulty, difficulty) = get_block_data(blockchain, storage, hash).await?; let header = storage.get_block_header_by_hash(&hash).await.context("Error while retrieving full block")?; - // calculate total size in bytes + // Calculate total size in bytes let mut total_size_in_bytes = header.size(); for tx_hash in header.get_txs_hashes() { total_size_in_bytes += storage.get_transaction_size(tx_hash).await.context(format!("Error while retrieving transaction {hash} size"))?; @@ -232,7 +232,7 @@ pub async fn get_transaction_response(storage: &S, tx: &Arc(storage: &S, mempool: &Mempool, hash: &Hash) -> Result { match storage.get_transaction(hash).await { Ok(tx) => get_transaction_response(storage, &tx, hash, false, None).await, @@ -786,10 +786,10 @@ async fn get_peers(context: &Context, body: Value) -> Result { let peer_list = p2p.get_peer_list(); let mut peers = Vec::new(); - let peers_availables = peer_list.get_cloned_peers().await; - let total_peers = peers_availables.len(); + let peers_available = peer_list.get_cloned_peers().await; + let total_peers = peers_available.len(); let mut sharable_peers = 0; - for p in peers_availables.iter().filter(|p| p.sharable()) { + for p in peers_available.iter().filter(|p| p.sharable()) { peers.push(get_peer_entry(p).await); sharable_peers += 1; } @@ -841,8 +841,8 @@ async fn get_tips(context: &Context, body: Value) -> Result(context: &Context, body: Value) -> Result { let params: GetTopoHeightRangeParams = parse_params(body)?; @@ -879,7 +879,7 @@ fn get_range(start: Option, end: Option, maximum: u64, current: u64) - } let count = range_end - range_start; - if count > maximum { // only retrieve max 20 blocks hash per request + if count > maximum { // Only retrieve max 20 blocks hash per request debug!("get range requested count: {}", count); return Err(InternalRpcError::InvalidJSONRequest).context(format!("Invalid range count requested, received {} but maximum is {}", count, maximum))? } @@ -887,8 +887,8 @@ fn get_range(start: Option, end: Option, maximum: u64, current: u64) - Ok((range_start, range_end)) } -// get blocks between range of topoheight -// if no params found, get last 20 blocks header +// Get blocks between range of topoheight. +// If no params found, retrieve the headers of the last 20 blocks. async fn get_blocks_range_by_topoheight(context: &Context, body: Value) -> Result { let params: GetTopoHeightRangeParams = parse_params(body)?; @@ -907,9 +907,9 @@ async fn get_blocks_range_by_topoheight(context: &Context, body: Val Ok(json!(blocks)) } -// get blocks between range of height -// if no params found, get last 20 blocks header -// you can only request +// Get blocks between range of topoheight. +// If no params found, retrieve the headers of the last 20 blocks. +// You can only request. async fn get_blocks_range_by_height(context: &Context, body: Value) -> Result { let params: GetHeightRangeParams = parse_params(body)?; let blockchain: &Arc> = context.get()?; @@ -930,8 +930,8 @@ async fn get_blocks_range_by_height(context: &Context, body: Value) } const MAX_TXS: usize = 20; -// get up to 20 transactions at once -// if a tx hash is not present, we keep the order and put json "null" value +// Get up to 20 transactions at once. +// If a TX hash is not present, we keep the order and put json "null" value. async fn get_transactions(context: &Context, body: Value) -> Result { let params: GetTransactionsParams = parse_params(body)?; @@ -959,7 +959,7 @@ async fn get_transactions(context: &Context, body: Value) -> Result< } const MAX_HISTORY: usize = 20; -// retrieve all history changes for an account on an asset +// Retrieve all history changes for an account on an asset async fn get_account_history(context: &Context, body: Value) -> Result { let params: GetAccountHistoryParams = parse_params(body)?; let blockchain: &Arc> = context.get()?; @@ -981,8 +981,8 @@ async fn get_account_history(context: &Context, body: Value) -> Resu } - // if incoming flows aren't accepted - // use nonce versions to determine topoheight + // If incoming flows aren't accepted + // use nonce versions to determine topoheight. if !params.incoming_flow { if let Some((topo, nonce)) = storage.get_nonce_at_maximum_topoheight(key, topo).await.context("Error while retrieving last nonce")? { let version = storage.get_balance_at_exact_topoheight(key, ¶ms.asset, topo).await.context(format!("Error while retrieving balance at nonce topo height {topo}"))?; @@ -997,7 +997,7 @@ async fn get_account_history(context: &Context, body: Value) -> Resu } } else { if !params.incoming_flow { - // don't return any error, maybe this account never spend anything + // Don't return any error, maybe this account never spent anything. // (even if we force 0 nonce at first activity) let (topo, nonce) = storage.get_last_nonce(key).await.context("Error while retrieving last topoheight for nonce")?; let version = storage.get_balance_at_exact_topoheight(key, ¶ms.asset, topo).await.context(format!("Error while retrieving balance at topo height {topo}"))?; @@ -1021,8 +1021,8 @@ async fn get_account_history(context: &Context, body: Value) -> Resu break; } - // Get the block header at topoheight - // we will scan it below for transactions and rewards + // Get the block header at topoheight. + // We will scan it below for transactions and rewards. let (hash, block_header) = storage.get_block_header_at_topoheight(topo).await.context(format!("Error while retrieving block header at topo height {topo}"))?; // Block reward is only paid in XELIS @@ -1030,7 +1030,7 @@ async fn get_account_history(context: &Context, body: Value) -> Resu let is_miner = *block_header.get_miner() == *key; if (is_miner || is_dev_address) && params.incoming_flow { let mut reward = storage.get_block_reward_at_topo_height(topo).context(format!("Error while retrieving reward at topo height {topo}"))?; - // subtract dev fee if any + // Subtract dev fee if any let dev_fee_percentage = get_block_dev_fee(block_header.get_height()); if dev_fee_percentage != 0 { let dev_fee = reward * dev_fee_percentage / 100; @@ -1115,8 +1115,8 @@ async fn get_account_history(context: &Context, body: Value) -> Resu break; } - // if incoming flows aren't accepted - // use nonce versions to determine topoheight + // If incoming flows aren't accepted + // use nonce versions to determine topoheight. if let Some(previous) = prev_nonce.filter(|_| !params.incoming_flow) { let nonce_version = storage.get_nonce_at_exact_topoheight(key, previous).await.context(format!("Error while retrieving nonce at topo height {previous}"))?; version = Some((previous, nonce_version.get_previous_topoheight(), storage.get_balance_at_exact_topoheight(key, ¶ms.asset, previous).await.context(format!("Error while retrieving previous balance at topo height {previous}"))?)); @@ -1146,7 +1146,7 @@ async fn get_account_assets(context: &Context, body: Value) -> Resul } const MAX_ACCOUNTS: usize = 100; -// retrieve all available accounts (each account got at least one interaction on chain) +// Retrieve all available accounts (each account got at least one interaction on chain) async fn get_accounts(context: &Context, body: Value) -> Result { let params: GetAccountsParams = parse_params(body)?; let blockchain: &Arc> = context.get()?; diff --git a/xelis_miner/src/config.rs b/xelis_miner/src/config.rs index d32a5c09..87beee21 100644 --- a/xelis_miner/src/config.rs +++ b/xelis_miner/src/config.rs @@ -1,2 +1,2 @@ -// daemon address by default when no specified +// Daemon address by default when none is specified pub const DEFAULT_DAEMON_ADDRESS: &str = "127.0.0.1:8080"; \ No newline at end of file diff --git a/xelis_miner/src/main.rs b/xelis_miner/src/main.rs index 649fa41b..c6967316 100644 --- a/xelis_miner/src/main.rs +++ b/xelis_miner/src/main.rs @@ -116,15 +116,15 @@ pub struct MinerConfig { /// Disable the log file #[clap(long)] disable_file_logging: bool, - /// Disable the log filename date based + /// Disable the log filename date based. /// If disabled, the log file will be named xelis-miner.log instead of YYYY-MM-DD.xelis-miner.log #[clap(long)] disable_file_log_date_based: bool, /// Disable the usage of colors in log #[clap(long)] disable_log_color: bool, - /// Disable terminal interactive mode - /// You will not be able to write CLI commands in it or to have an updated prompt + /// Disable terminal interactive mode. + /// You will not be able to write CLI commands in it or to have an updated prompt. #[clap(long)] disable_interactive_mode: bool, /// Log filename @@ -156,7 +156,7 @@ pub struct MinerConfig { enum ThreadNotification<'a> { NewJob(Algorithm, MinerWork<'a>, Difficulty, u64), // POW algorithm, block work, difficulty, height WebSocketClosed, // WebSocket connection has been closed - Exit // all threads must stop + Exit // All threads must stop } #[derive(Serialize, Deserialize)] @@ -218,7 +218,7 @@ async fn main() -> Result<()> { warn!("Attention, the number of threads used may not be optimal, recommended is: {}", detected_threads); } - // broadcast channel to send new jobs / exit command to all threads + // Broadcast channel to send new jobs / exit command to all threads let (sender, _) = broadcast::channel::(threads as usize); // mpsc channel to send from threads to the "communication" task. let (block_sender, block_receiver) = mpsc::channel::(threads as usize); @@ -229,13 +229,13 @@ async fn main() -> Result<()> { } } - // start communication task + // Start communication task let task = spawn_task("communication", communication_task(config.daemon_address, sender.clone(), block_receiver, address, config.worker)); let stats_task: Option>>; #[cfg(feature = "api_stats")] { - // start stats task + // Start stats task stats_task = match config.api_bind_address { Some(addr) => Some(spawn_task("broadcast", broadcast_stats_task(addr))), None => None, @@ -250,15 +250,15 @@ async fn main() -> Result<()> { error!("Error on running prompt: {}", e); } - // send exit command to all threads to stop + // Send exit command to all threads to stop if let Err(_) = sender.send(ThreadNotification::Exit) { debug!("Error while sending exit message to threads"); } - // stop the communication task + // Stop the communication task task.abort(); - // stop the stats broadcast task + // Stop the stats broadcast task if let Some(stats_handle) = stats_task { stats_handle.abort() } @@ -296,7 +296,7 @@ async fn broadcast_stats_task(broadcast_address: String) -> Result<()> { let length = contents.len(); let response = format!("{status_line}{content_type}Content-Length: {length}\r\n\r\n{contents}"); - // Send HTTP repsonse and close socket + // Send HTTP response and close socket AsyncWriteExt::write_all(&mut socket, response.as_bytes()) .await?; socket.shutdown().await?; @@ -305,8 +305,8 @@ async fn broadcast_stats_task(broadcast_address: String) -> Result<()> { } -// Benchmark the miner with the specified number of threads and iterations -// It will output the total time, total iterations, time per PoW and hashrate for each number of threads +// Benchmark the miner with the specified number of threads and iterations. +// It will output the total time, total iterations, time per PoW and hashrate for each number of threads. fn benchmark(threads: usize, iterations: usize, algorithm: Algorithm) { info!("{0: <10} | {1: <10} | {2: <16} | {3: <13} | {4: <13}", "Threads", "Total Time", "Total Iterations", "Time/PoW (ms)", "Hashrate"); @@ -336,9 +336,9 @@ fn benchmark(threads: usize, iterations: usize, algorithm: Algorithm) { } } -// this Tokio task will runs indefinitely until the user stop himself the miner. +// This Tokio task will runs indefinitely until the user stops the miner himself. // It maintains a WebSocket connection with the daemon and notify all threads when it receive a new job. -// Its also the task who have the job to send directly the new block found by one of the threads. +// It is also responsible for sending the new block found by one of the threads directly to the daemon. // This allow mining threads to only focus on mining and receiving jobs through memory channels. async fn communication_task(daemon_address: String, job_sender: broadcast::Sender>, mut block_receiver: mpsc::Receiver>, address: Address, worker: String) { info!("Starting communication task"); @@ -378,7 +378,8 @@ async fn communication_task(daemon_address: String, job_sender: broadcast::Sende let (mut write, mut read) = client.split(); loop { select! { - Some(message) = read.next() => { // read all messages from daemon + Some(message) = read.next() => { + // Read all messages from daemon debug!("Received message from daemon: {:?}", message); match handle_websocket_message(message, &job_sender).await { Ok(exit) => { @@ -393,7 +394,8 @@ async fn communication_task(daemon_address: String, job_sender: broadcast::Sende } } }, - Some(work) = block_receiver.recv() => { // send all valid blocks found to the daemon + Some(work) = block_receiver.recv() => { + // Send all valid blocks found to the daemon info!("submitting new block found..."); let submit = serde_json::json!(SubmitMinerWorkParams { miner_work: work.to_hex() }).to_string(); if let Err(e) = write.send(Message::Text(submit)).await { @@ -481,7 +483,7 @@ fn start_thread(id: u16, mut job_receiver: broadcast::Receiver { - // wait until we receive a new job, check every 100ms + // Wait until we receive a new job, check every 100ms while job_receiver.is_empty() { thread::sleep(Duration::from_millis(100)); } @@ -492,7 +494,7 @@ fn start_thread(id: u16, mut job_receiver: broadcast::Receiver { debug!("Mining Thread #{} received a new job", id); - // set thread id in extra nonce for more work spread between threads + // Set thread id in extra nonce for more work spread between threads // u16 support up to 65535 threads new_job.set_thread_id_u16(id); let initial_timestamp = new_job.get_timestamp(); @@ -511,7 +513,7 @@ fn start_thread(id: u16, mut job_receiver: broadcast::Receiver>) { handler.register_method("estimate_fees", async_handler!(estimate_fees)); handler.register_method("estimate_extra_data_size", async_handler!(estimate_extra_data_size)); - // These functions allow to have an encrypted DB directly in the wallet storage - // You can retrieve keys, values, have differents trees, and store values - // It is restricted in XSWD context (each app access to their own trees), and open to everything in RPC - // Keys and values can be anything + // These functions allow you to have an encrypted DB directly in the wallet storage. + // You can retrieve keys, values, have different trees, and store values. + // It is restricted in XSWD context (each app access to their own trees), and open to everything in RPC. + // Keys and values can be anything. handler.register_method("get_matching_keys", async_handler!(get_matching_keys)); handler.register_method("count_matching_entries", async_handler!(count_matching_entries)); handler.register_method("get_value_from_key", async_handler!(get_value_from_key)); @@ -123,7 +123,7 @@ async fn get_nonce(context: &Context, body: Value) -> Result Result { if body != Value::Null { return Err(InternalRpcError::UnexpectedParams) @@ -189,16 +189,16 @@ async fn rescan(context: &Context, body: Value) -> Result Result { let params: GetBalanceParams = parse_params(body)?; let asset = params.asset.unwrap_or(XELIS_ASSET); let wallet: &Arc = context.get()?; let storage = wallet.get_storage().read().await; - // If the asset is not found, it will returns 0 - // Use has_balance below to check if the wallet has a balance for a specific asset + // If the asset is not found, it will returns 0. + // Use has_balance below to check if the wallet has a balance for a specific asset. let balance = storage.get_plaintext_balance_for(&asset).await.unwrap_or(0); Ok(json!(balance)) } @@ -252,7 +252,7 @@ async fn get_transaction(context: &Context, body: Value) -> Result Result { let params: BuildTransactionParams = parse_params(body)?; let wallet: &Arc = context.get()?; - // request ask to broadcast the TX but wallet is not connected to any daemon + // Request to broadcast the TX, but the wallet is not connected to any daemon if !wallet.is_online().await && params.broadcast { return Err(WalletError::NotOnlineMode)? } @@ -261,13 +261,13 @@ async fn build_transaction(context: &Context, body: Value) -> Result Result Result Result { if body != Value::Null { return Err(InternalRpcError::UnexpectedParams) @@ -427,7 +427,7 @@ async fn get_tree_name(context: &Context, tree: String) -> Result>> = context.get()?; let xswd = session.get_server().get_handler(); let applications = xswd.get_applications().read().await; diff --git a/xelis_wallet/src/api/xswd.rs b/xelis_wallet/src/api/xswd.rs index 87397e4a..0dbf271f 100644 --- a/xelis_wallet/src/api/xswd.rs +++ b/xelis_wallet/src/api/xswd.rs @@ -86,13 +86,13 @@ use log::{ // is a way to communicate with the XELIS Wallet // from a web browser through a secure websocket. // The idea is that a token is generated on websocket side -// and send through the WS connection to the wallet. -// The wallet then signs the token and send it back to the WS. +// and sent through the WS connection to the wallet. +// The wallet then signs the token and sends it back to the WS. // On browser side we can save it in local storage and use it // to communicate and request data from wallet. // Each action will require the validation of the user // based on the permission configured. -// The token is saved also in wallet side for a reminder of +// The token is also saved in the wallet side as a reminder of // all applications allowed. // For security reasons, in case the signed token leaks, at each connection, // the wallet will request the authorization of the user @@ -165,7 +165,7 @@ pub struct AppState { name: String, // Small description of the app description: String, - // URL of the app if exists + // URL of the app if it exists url: Option, // All permissions for each method permissions: Mutex>, @@ -223,11 +223,11 @@ pub struct ApplicationData { name: String, // Small description of the app description: String, - // URL of the app if exists + // URL of the app if it exists url: Option, // All permissions for each method permissions: IndexMap, - // signature of all data + // Signature of all data signature: Option, } @@ -372,7 +372,7 @@ impl Permission { } pub enum PermissionRequest<'a> { - // bool tell if it was already signed or not + // The boolean indicates whether it was already signed or not Application(bool), Request(&'a RpcRequest) } @@ -420,19 +420,19 @@ where } } - // This method is used to get the applications HashMap - // be careful by using it, and if you delete a session, please disconnect it + // This method is used to get the applications HashMap. + // Be careful when using it, and if you delete a session, please disconnect it. pub fn get_applications(&self) -> &RwLock, AppStateShared>> { &self.applications } - // get a HashSet of all events tracked + // Get a HashSet of all events tracked pub async fn get_tracked_events(&self) -> HashSet { let sessions = self.listeners.lock().await; HashSet::from_iter(sessions.values().map(|e| e.keys().cloned()).flatten()) } - // verify if a event is tracked by XSWD + // Verify if a event is tracked by XSWD pub async fn is_event_tracked(&self, event: &NotifyEvent) -> bool { let sessions = self.listeners.lock().await; sessions @@ -441,7 +441,7 @@ where .is_some() } - // notify a new event to all connected WebSocket + // Notify a new event to all connected WebSocket pub async fn notify(&self, event: &NotifyEvent, value: Value) { let value = json!(EventResult { event: Cow::Borrowed(event), value }); let sessions = self.listeners.lock().await; @@ -451,15 +451,15 @@ where let session = session.clone(); spawn_task("xswd-notify", async move { if let Err(e) = session.send_text(response.to_string()).await { - debug!("Error occured while notifying a new event: {}", e); + debug!("Error occurred while notifying a new event: {}", e); }; }); } } } - // verify the permission for a request - // if the permission is not set, it will request it to the user + // Verify the permission for a request. + // If the permission is not set, it will request it to the user. async fn verify_permission_for_request(&self, app: &AppStateShared, request: &RpcRequest) -> Result<(), RpcResponseError> { let _permit = self.permission_handler_semaphore.acquire().await .map_err(|_| RpcResponseError::new(request.id.clone(), InternalRpcError::InternalError("Permission handler semaphore error")))?; @@ -498,8 +498,8 @@ where } } - // register a new application - // if the application is already registered, it will return an error + // Register a new application. + // If the application is already registered, it will return an error. async fn add_application(&self, session: &WebSocketSessionShared, app_data: ApplicationData) -> Result { // Sanity check { @@ -541,7 +541,7 @@ where } if app_data.signature.is_some() { - // TODO: verify the signature + // TODO: Verify the signature return Err(RpcResponseError::new(None, XSWDError::InvalidSignatureForApplicationData)) } @@ -554,7 +554,7 @@ where // Verify the signature of the app data to validate permissions previously set if let Some(signature) = &app_data.signature { let bytes = app_data.to_bytes(); - // remove signature bytes for verification + // Remove signature bytes for verification let bytes = &bytes[0..bytes.len() - SIGNATURE_SIZE]; let key = wallet.get_public_key().await .map_err(|e| { @@ -610,7 +610,7 @@ where })) } - // register a new event listener for the specified connection/application + // Register a new event listener for the specified connection/application async fn subscribe_session_to_event(&self, session: &WebSocketSessionShared, event: NotifyEvent, id: Option) -> Result<(), RpcResponseError> { let mut listeners = self.listeners.lock().await; let events = listeners.entry(session.clone()).or_insert_with(HashMap::new); @@ -624,7 +624,7 @@ where Ok(()) } - // unregister an event listener for the specified connection/application + // Unregister an event listener for the specified connection/application async fn unsubscribe_session_from_event(&self, session: &WebSocketSessionShared, event: NotifyEvent, id: Option) -> Result<(), RpcResponseError> { let mut listeners = self.listeners.lock().await; let events = listeners.get_mut(session).ok_or_else(|| RpcResponseError::new(id.clone(), InternalRpcError::EventNotSubscribed))?; @@ -636,16 +636,16 @@ where Ok(()) } - // Verify if an application is already registered - // ID must be unique and not used by another application + // Verify if an application is already registered. + // ID must be unique and not used by another application. async fn has_app_with_id(&self, id: &String) -> bool { let applications = self.applications.read().await; applications.values().find(|e| e.get_id() == id).is_some() } - // Internal method to handle the message received from the WebSocket connection - // This method will parse the message and call the appropriate method if app is registered - // Otherwise, it expects a JSON object with the application data to register it + // Internal method to handle the message received from the WebSocket connection. + // This method will parse the message and call the appropriate method if app is registered. + // Otherwise, it expects a JSON object with the application data to register it. async fn on_message_internal(&self, session: &WebSocketSessionShared, message: &[u8]) -> Result, RpcResponseError> { let (request, is_subscribe, is_unsubscribe) = { let app_state = { @@ -676,7 +676,7 @@ where return Err(RpcResponseError::new(request.id, InternalRpcError::MethodNotFound(request.method))) } - // let's check the permission set by user for this method + // Let's check the permission set by user for this method app.set_requesting(true); self.verify_permission_for_request(&app, &request).await?; app.set_requesting(false); @@ -708,7 +708,7 @@ where }; if is_subscribe || is_unsubscribe { - // retrieve the event variant + // Retrieve the event variant let event = serde_json::from_value( request.params.ok_or_else(|| RpcResponseError::new(request.id.clone(), InternalRpcError::ExpectedParams))?) .map_err(|e| RpcResponseError::new(request.id.clone(), InternalRpcError::InvalidJSONParams(e)) diff --git a/xelis_wallet/src/cipher.rs b/xelis_wallet/src/cipher.rs index 926d5605..c8a78822 100644 --- a/xelis_wallet/src/cipher.rs +++ b/xelis_wallet/src/cipher.rs @@ -16,7 +16,7 @@ use crate::{error::WalletError, config::SALT_SIZE}; pub struct Cipher { cipher: XChaCha20Poly1305, - // this salt is used for keys and values + // This salt is used for keys and values salt: Option<[u8; SALT_SIZE]> } @@ -30,27 +30,27 @@ impl Cipher { }) } - // encrypt value passed in param and add plaintext nonce before encrypted value - // a Nonce is generated randomly at each call + // Encrypt value passed in param and add plaintext nonce before encrypted value. + // A Nonce is generated randomly at each call. pub fn encrypt_value(&self, value: &[u8]) -> Result, WalletError> { // generate unique random nonce let nonce = XChaCha20Poly1305::generate_nonce(&mut OsRng); self.encrypt_value_with_nonce(value, &nonce.into()) } - // encrypt value passed in param and add plaintext nonce before encrypted value + // Encrypt value passed in param and add plaintext nonce before encrypted value pub fn encrypt_value_with_nonce(&self, value: &[u8], nonce: &[u8; Self::NONCE_SIZE]) -> Result, WalletError> { let mut plaintext: Vec = Vec::with_capacity(SALT_SIZE + value.len()); - // add salt to the plaintext value + // Add salt to the plaintext value if let Some(salt) = &self.salt { plaintext.extend_from_slice(salt); } plaintext.extend_from_slice(value); - // encrypt data using plaintext and nonce + // Encrypt data using plaintext and nonce let data = &self.cipher.encrypt(nonce.into(), plaintext.as_slice()).map_err(|e| WalletError::CryptoError(e))?; - // append unique nonce to the encrypted data + // Append unique nonce to the encrypted data let mut encrypted = Vec::with_capacity(Self::NONCE_SIZE + data.len()); encrypted.extend_from_slice(nonce); encrypted.extend_from_slice(data); @@ -58,18 +58,18 @@ impl Cipher { Ok(encrypted) } - // decrypt any value loaded from disk, with the format of above function + // Decrypt any value loaded from disk, with the format of above function pub fn decrypt_value(&self, encrypted: &[u8]) -> Result> { - // nonce is 24 bytes and is mandatory in encrypted slice + // Nonce is 24 bytes and is mandatory in encrypted slice if encrypted.len() < 25 { return Err(WalletError::InvalidEncryptedValue.into()) } - // read the nonce for this data + // Read the nonce for this data let nonce = XNonce::from_slice(&encrypted[0..24]); - // decrypt the value using the nonce previously decoded + // Decrypt the value using the nonce previously decoded let mut decrypted = self.cipher.decrypt(nonce, &encrypted[nonce.len()..]).map_err(|e| WalletError::CryptoError(e))?; - // delete the salt from the decrypted slice + // Delete the salt from the decrypted slice if let Some(salt) = &self.salt { decrypted.drain(0..salt.len()); } @@ -77,7 +77,7 @@ impl Cipher { Ok(decrypted) } - // hash the key with salt + // Hash the key with salt pub fn hash_key>(&self, key: S) -> [u8; HASH_SIZE] { let mut data = Vec::new(); if let Some(salt) = &self.salt { diff --git a/xelis_wallet/src/config.rs b/xelis_wallet/src/config.rs index 2fadf0f6..d4919d6a 100644 --- a/xelis_wallet/src/config.rs +++ b/xelis_wallet/src/config.rs @@ -7,7 +7,7 @@ pub const PASSWORD_HASH_SIZE: usize = 32; pub const SALT_SIZE: usize = 32; pub const KEY_SIZE: usize = 32; -// daemon address by default when no specified +// Daemon address by default when none is specified pub const DEFAULT_DAEMON_ADDRESS: &str = "http://127.0.0.1:8080"; // Auto reconnect interval in seconds for Network Handler pub const AUTO_RECONNECT_INTERVAL: u64 = 5; diff --git a/xelis_wallet/src/daemon_api.rs b/xelis_wallet/src/daemon_api.rs index 177d20cf..ffc04669 100644 --- a/xelis_wallet/src/daemon_api.rs +++ b/xelis_wallet/src/daemon_api.rs @@ -76,14 +76,14 @@ impl DaemonAPI { &self.client } - // is the websocket connection alive + // Is the websocket connection alive pub fn is_online(&self) -> bool { trace!("is_online"); self.client.is_online() } - // Disconnect by closing the connection with node RPC - // This will only disconnect if there are no more references to the daemon API + // Disconnect by closing the connection with node RPC. + // This will only disconnect if there are no more references to the daemon API. pub async fn disconnect(self: &Arc) -> Result { trace!("disconnect"); let count = Arc::strong_count(self); diff --git a/xelis_wallet/src/entry.rs b/xelis_wallet/src/entry.rs index d1debf60..432e93a4 100644 --- a/xelis_wallet/src/entry.rs +++ b/xelis_wallet/src/entry.rs @@ -299,8 +299,8 @@ impl TransactionEntry { &mut self.entry } - // Convert to RPC Transaction Entry - // This is a necessary step to serialize correctly the public key into an address + // Convert to RPC Transaction Entry. + // This is a necessary step to serialize correctly the public key into an address. pub fn serializable(self, mainnet: bool) -> RPCTransactionEntry { RPCTransactionEntry { hash: self.hash, diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index 62c89839..2d607add 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -78,19 +78,19 @@ use { anyhow::Error, }; -// This struct is used to configure the RPC Server +// This struct is used to configure the RPC Server. // In case we want to enable it instead of starting -// the XSWD Server +// the XSWD Server. #[cfg(feature = "api_server")] #[derive(Debug, clap::Args)] pub struct RPCConfig { /// RPC Server bind address #[clap(long)] rpc_bind_address: Option, - /// username for RPC authentication + /// Username for RPC authentication #[clap(long)] rpc_username: Option, - /// password for RPC authentication + /// Password for RPC authentication #[clap(long)] rpc_password: Option } @@ -116,15 +116,15 @@ pub struct Config { /// Disable the log file #[clap(long)] disable_file_logging: bool, - /// Disable the log filename date based + /// Disable the log filename date based. /// If disabled, the log file will be named xelis-wallet.log instead of YYYY-MM-DD.xelis-wallet.log #[clap(long)] disable_file_log_date_based: bool, - /// Disable the usage of colors in log + /// Disable the usage of colors in log. #[clap(long)] disable_log_color: bool, - /// Disable terminal interactive mode - /// You will not be able to write CLI commands in it or to have an updated prompt + /// Disable terminal interactive mode. + /// You will not be able to write CLI commands in it or to have an updated prompt. #[clap(long)] disable_interactive_mode: bool, /// Log filename @@ -146,8 +146,7 @@ pub struct Config { /// Set the path for wallet storage to open/create a wallet at this location #[clap(long)] wallet_path: Option, - /// Set the path to use for precomputed tables - /// + /// Set the path to use for precomputed tables. /// By default, it will be from current directory. #[clap(long)] precomputed_tables_path: Option, @@ -168,9 +167,9 @@ pub struct Config { #[cfg(feature = "api_server")] #[clap(long)] enable_xswd: bool, - /// Disable the history scan - /// This will prevent syncing old TXs/blocks - /// Only blocks / transactions caught by the network handler will be stored, not the old ones + /// Disable the history scan. + /// This will prevent syncing old TXs/blocks. + /// Only blocks / transactions caught by the network handler will be stored, not the old ones. #[clap(long)] disable_history_scan: bool, /// Force the wallet to use a stable balance only during transactions creation. @@ -197,22 +196,21 @@ async fn main() -> Result<()> { #[cfg(feature = "api_server")] { - // Sanity check - // check that we don't have both server enabled + // Sanity check - Check that we don't have both server enabled. if config.enable_xswd && config.rpc.rpc_bind_address.is_some() { error!("Invalid parameters configuration: RPC Server and XSWD cannot be enabled at the same time"); return Ok(()); // exit } - // check that username/password is not in param if bind address is not set + // Check that username/password is not in param if bind address is not set if config.rpc.rpc_bind_address.is_none() && (config.rpc.rpc_password.is_some() || config.rpc.rpc_username.is_some()) { error!("Invalid parameters configuration for rpc password and username: RPC Server is not enabled"); return Ok(()) } - // check that username/password is set together if bind address is set + // Check that username/password is set together if bind address is set if config.rpc.rpc_bind_address.is_some() && config.rpc.rpc_password.is_some() != config.rpc.rpc_username.is_some() { - error!("Invalid parameters configuration: usernamd AND password must be provided"); + error!("Invalid parameters configuration: username AND password must be provided"); return Ok(()) } } @@ -221,7 +219,7 @@ async fn main() -> Result<()> { command_manager.store_in_context(config.network)?; if let Some(path) = config.wallet_path { - // read password from option or ask him + // Read password from option or ask him let password = if let Some(password) = config.password { password } else { @@ -305,7 +303,7 @@ async fn xswd_handler(mut receiver: UnboundedReceiver, prompt: Sharea async fn xswd_handle_request_application(prompt: &ShareablePrompt, app_state: AppStateShared, signed: bool) -> Result { let mut message = format!("XSWD: Allow application {} ({}) to access your wallet\r\n(Y/N): ", app_state.get_name(), app_state.get_id()); if signed { - message = prompt.colorize_str(Color::BrightYellow, "NOTE: Application authorizaion was already approved previously.\r\n") + &message; + message = prompt.colorize_str(Color::BrightYellow, "NOTE: Application authorization was already approved previously.\r\n") + &message; } let accepted = prompt.read_valid_str_value(prompt.colorize_string(Color::Blue, &message), vec!["y", "n"]).await? == "y"; if accepted { @@ -525,7 +523,7 @@ async fn open_wallet(manager: &CommandManager, _: ArgumentManager) -> Result<(), Wallet::open(dir, password, *network, precomputed_tables)? }; - manager.message("Wallet sucessfully opened"); + manager.message("Wallet successfully opened"); apply_config(&wallet, #[cfg(feature = "api_server")] prompt).await; setup_wallet_command_manager(wallet, manager).await?; @@ -546,13 +544,13 @@ async fn create_wallet(manager: &CommandManager, _: ArgumentManager) -> Result<( } let dir = format!("{}{}", DIR_PATH, name); - // check if it doesn't exists yet + // Check if it doesn't exists yet if Path::new(&dir).is_dir() { manager.message("Wallet already exist with this name!"); return Ok(()) } - // ask and verify password + // Ask and verify password let password = prompt.read_input("Password: ", true) .await.context("Error while reading password")?; let confirm_password = prompt.read_input("Confirm Password: ", true) @@ -570,7 +568,7 @@ async fn create_wallet(manager: &CommandManager, _: ArgumentManager) -> Result<( Wallet::create(dir, password, None, *network, precomputed_tables)? }; - manager.message("Wallet sucessfully created"); + manager.message("Wallet successfully created"); apply_config(&wallet, #[cfg(feature = "api_server")] prompt).await; // Display the seed in prompt @@ -607,13 +605,13 @@ async fn recover_wallet(manager: &CommandManager, _: ArgumentManager) -> Result< } let dir = format!("{}{}", DIR_PATH, name); - // check if it doesn't exists yet + // Check if it doesn't exists yet if Path::new(&dir).is_dir() { manager.message("Wallet already exist with this name!"); return Ok(()) } - // ask and verify password + // Ask and verify password let password = prompt.read_input("Password: ", true) .await.context("Error while reading password")?; let confirm_password = prompt.read_input("Confirm Password: ", true) @@ -632,7 +630,7 @@ async fn recover_wallet(manager: &CommandManager, _: ArgumentManager) -> Result< Wallet::create(dir, password, Some(seed), *network, precomputed_tables)? }; - manager.message("Wallet sucessfully recovered"); + manager.message("Wallet successfully recovered"); apply_config(&wallet, #[cfg(feature = "api_server")] prompt).await; setup_wallet_command_manager(wallet, manager).await?; @@ -667,7 +665,7 @@ async fn transfer(manager: &CommandManager, _: ArgumentManager) -> Result<(), Co let context = manager.get_context().lock()?; let wallet: &Arc = context.get()?; - // read address + // Read address let str_address = prompt.read_input( prompt.colorize_str(Color::Green, "Address: "), false @@ -687,7 +685,7 @@ async fn transfer(manager: &CommandManager, _: ArgumentManager) -> Result<(), Co (balance, decimals) }; - // read amount + // Read amount let float_amount: f64 = prompt.read( prompt.colorize_string(Color::Green, &format!("Amount (max: {}): ", format_coin(max_balance, decimals))) ).await.context("Error while reading amount")?; @@ -727,7 +725,7 @@ async fn transfer_all(manager: &CommandManager, mut args: ArgumentManager) -> Re let context = manager.get_context().lock()?; let wallet: &Arc = context.get()?; - // read address + // Read address let str_address = prompt.read_input( prompt.colorize_str(Color::Green, "Address: "), false @@ -804,7 +802,7 @@ async fn burn(manager: &CommandManager, _: ArgumentManager) -> Result<(), Comman (balance, decimals) }; - // read amount + // Read amount let float_amount: f64 = prompt.read( prompt.colorize_string(Color::Green, &format!("Amount (max: {}): ", format_coin(max_balance, decimals))) ).await.context("Error while reading amount")?; @@ -873,13 +871,13 @@ async fn history(manager: &CommandManager, mut arguments: ArgumentManager) -> Re let storage = wallet.get_storage().read().await; let mut transactions = storage.get_transactions()?; - // if we don't have any txs, no need proceed further + // If we don't have any txs, no need proceed further if transactions.is_empty() { manager.message("No transactions available"); return Ok(()) } - // desc ordered + // Desc ordered transactions.sort_by(|a, b| b.get_topoheight().cmp(&a.get_topoheight())); let mut max_pages = transactions.len() / TXS_PER_PAGE; if transactions.len() % TXS_PER_PAGE != 0 { @@ -955,7 +953,7 @@ async fn seed(manager: &CommandManager, mut arguments: ArgumentManager) -> Resul let password = prompt.read_input("Password: ", true) .await.context("Error while reading password")?; - // check if password is valid + // Check if password is valid wallet.is_valid_password(password).await?; let language = if arguments.has_argument("language") { @@ -1059,7 +1057,7 @@ async fn start_xswd(manager: &CommandManager, _: ArgumentManager) -> Result<(), Ok(()) } -// broadcast tx if possible +// Broadcast TX if possible // submit_transaction increase the local nonce in storage in case of success async fn broadcast_tx(wallet: &Wallet, manager: &CommandManager, tx: Transaction) { let tx_hash = tx.hash(); diff --git a/xelis_wallet/src/mnemonics/mod.rs b/xelis_wallet/src/mnemonics/mod.rs index 035200d9..13456b40 100644 --- a/xelis_wallet/src/mnemonics/mod.rs +++ b/xelis_wallet/src/mnemonics/mod.rs @@ -57,9 +57,9 @@ pub enum MnemonicsError { pub struct Language<'a> { // Language name, like "English" or "French" name: &'a str, - // number of utf-8 chars to use for checksum + // Number of utf-8 chars to use for checksum prefix_length: usize, - // list of words in the language + // List of words in the language words: [&'a str; WORDS_LIST] } @@ -93,25 +93,25 @@ fn verify_checksum(words: &Vec, prefix_len: usize) -> Result) -> Result, usize)>, MnemonicsError> { 'main: for (i, language) in LANGUAGES.iter().enumerate() { - // this map is used to store the indices of the words in the language + // This map is used to store the indices of the words in the language let mut language_words: HashMap<&str, usize> = HashMap::with_capacity(WORDS_LIST); - // build the map + // Build the map for (j, word) in language.words.iter().enumerate() { language_words.insert(word, j); } - // find the indices of the words + // Find the indices of the words let mut indices = Vec::new(); for word in words.iter() { if let Some(index) = language_words.get(word.as_str()) { indices.push(*index); } else { - // incorrect language for this word, try the next one + // Incorrect language for this word, try the next one continue 'main; } } - // we were able to build the indices, now verify checksum + // We were able to build the indices, now verify checksum if !verify_checksum(&words, language.prefix_length)?.unwrap_or(true) { return Err(MnemonicsError::InvalidChecksum); } @@ -121,7 +121,7 @@ fn find_indices(words: &Vec) -> Result, usize)>, Mnem Ok(None) } -// convert a words list to a Private Key (32 bytes) +// Convert a words list to a Private Key (32 bytes) pub fn words_to_key(words: &Vec) -> Result { if !(words.len() == SEED_LENGTH + 1 || words.len() == SEED_LENGTH) { return Err(MnemonicsError::InvalidWordsCount); diff --git a/xelis_wallet/src/network_handler.rs b/xelis_wallet/src/network_handler.rs index a8410f00..b483209a 100644 --- a/xelis_wallet/src/network_handler.rs +++ b/xelis_wallet/src/network_handler.rs @@ -68,19 +68,19 @@ pub enum NetworkError { } pub struct NetworkHandler { - // tokio task + // Tokio task task: Mutex>>>, - // wallet where we can save every data from chain + // Wallet where we can save every data from chain wallet: Arc, - // api to communicate with daemon - // It is behind a Arc to be shared across several wallets - // in case someone make a custom service and don't want to create a new connection + // Api to communicate with daemon. + // It is behind an Arc to be shared across several wallets + // in case someone makes a custom service and doesn't want to create a new connection. api: Arc } impl NetworkHandler { - // Create a new network handler with a wallet and a daemon address - // This will create itself a DaemonAPI and verify if connection is possible + // Create a new network handler with a wallet and a daemon address. + // This will create a DaemonAPI and verify if connection is possible. pub async fn new(wallet: Arc, daemon_address: S) -> Result { let s = daemon_address.to_string(); let api = DaemonAPI::new(format!("{}/json_rpc", sanitize_daemon_address(s.as_str()))).await?; @@ -89,7 +89,7 @@ impl NetworkHandler { // Create a new network handler with an already created daemon API pub async fn with_api(wallet: Arc, api: Arc) -> Result { - // check that we can correctly get version from daemon + // Check that we can correctly get version from daemon let version = api.get_version().await?; debug!("Connected to daemon running version {}", version); @@ -185,7 +185,7 @@ impl NetworkHandler { &self.api } - // check if the network handler is running (that we have a task and its not finished) + // Check if the network handler is running (that we have a task and its not finished) pub async fn is_running(&self) -> bool { let task = self.task.lock().await; if let Some(handle) = task.as_ref() { @@ -196,8 +196,8 @@ impl NetworkHandler { } // Process a block by checking if it contains any transaction for us - // Or that we mined it - // Returns assets that changed and returns the highest nonce if we send a transaction + // or that we mined it. + // Returns assets that changed and returns the highest nonce if we send a transaction. async fn process_block(&self, address: &Address, block: BlockResponse, topoheight: u64) -> Result, Option)>, Error> { let block_hash = block.hash.into_owned(); debug!("Processing block {} at topoheight {}", block_hash, topoheight); @@ -214,7 +214,7 @@ impl NetworkHandler { // Prevent storing changes multiple times let mut changes_stored = false; - // create Coinbase entry if its our address and we're looking for XELIS asset + // Create Coinbase entry if its our address and we're looking for XELIS asset if miner == *address.get_public_key() { debug!("Block {} at topoheight {} is mined by us", block_hash, topoheight); if let Some(reward) = block.miner_reward { @@ -225,8 +225,8 @@ impl NetworkHandler { let broadcast = { let mut storage = self.wallet.get_storage().write().await; - // Mark it as last coinbase reward topoheight - // it is internally checked if its higher or not + // Mark it as last coinbase reward topoheight. + // It is internally checked if its higher or not. debug!("Storing last coinbase reward topoheight {}", topoheight); storage.set_last_coinbase_reward_topoheight(Some(topoheight))?; @@ -342,11 +342,11 @@ impl NetworkHandler { } } - if is_owner { // check that we are owner of this TX + if is_owner { // Check that we are owner of this TX Some(EntryData::Outgoing { transfers: transfers_out, fee: tx.fee, nonce: tx.nonce }) - } else if !transfers_in.is_empty() { // otherwise, check that we received one or few transfers from it + } else if !transfers_in.is_empty() { // Otherwise, check that we received one or few transfers from it Some(EntryData::Incoming { from: tx.source.to_public_key(), transfers: transfers_in }) - } else { // this TX has nothing to do with us, nothing to save + } else { // This TX has nothing to do with us, nothing to save None } } @@ -366,7 +366,7 @@ impl NetworkHandler { debug!("Transaction {} was executed in block {} at topoheight {}", tx.hash, executor.block_hash, executor.block_topoheight); }, Err(e) => { - // Tx is maybe not executed, this is really rare event + // TX is maybe not executed, this is really rare event warn!("Error while fetching topoheight execution of transaction {}: {}", tx.hash, e); continue; } @@ -411,32 +411,33 @@ impl NetworkHandler { storage.has_transaction(hash) } - // Scan the chain using a specific balance asset, this helps us to get a list of version to only requests blocks where changes happened - // When the block is requested, we don't limit the syncing to asset in parameter + // Scan the chain using a specific balance asset, this helps us to get a list of versions to only request blocks where changes happened. + // When the block is requested, we don't limit syncing to the specified asset. async fn get_balance_and_transactions(&self, topoheight_processed: &mut HashSet, address: &Address, asset: &Hash, min_topoheight: u64, balances: bool, highest_nonce: &mut Option) -> Result<(), Error> { // Retrieve the highest version let (mut topoheight, mut version) = self.api.get_balance(address, asset).await.map(|res| (res.topoheight, res.version))?; + debug!("Starting sync from topoheight {} for asset {}", topoheight, asset); - // don't sync already synced blocks + // Don't sync already synced blocks if min_topoheight >= topoheight { debug!("Reached minimum topoheight {}, topo: {}", min_topoheight, topoheight); return Ok(()) } - // Determine if its the highest version of balance or not - // This is used to save the latest balance + // Determine if its the highest version of balance or not. + // This is used to save the latest balance. let mut highest_version = true; loop { let (mut balance, _, _, previous_topoheight) = version.consume(); - // add this topoheight in cache to not re-process it (blocks are independant of asset to have faster sync) - // if its not already processed, do it + // Add this topoheight in cache to not re-process it (blocks are independent of asset to have faster sync). + // If its not already processed, do it. if topoheight_processed.insert(topoheight) { debug!("Processing topoheight {}", topoheight); let response = self.api.get_block_with_txs_at_topoheight(topoheight).await?; let changes = self.process_block(address, response, topoheight).await?; - // Check if a change occured, we are the highest version and update balances is requested + // Check if a change occurred, we are the highest version and update balances is requested if let Some((_, nonce)) = changes.filter(|_| balances && highest_version) { let mut storage = self.wallet.get_storage().write().await; @@ -447,9 +448,9 @@ impl NetworkHandler { *highest_nonce = Some(storage.get_nonce()?); } - // Store only the highest nonce - // Because if we are building queued transactions, it may break our queue - // Our we couldn't submit new txs before they get removed from mempool + // Store only the highest nonce. + // If we are building queued transactions, storing a lower nonce may break our queue. + // We also couldn't submit new TXs before they get removed from mempool. if let Some(nonce) = nonce.filter(|n| highest_nonce.as_ref().map(|h| *h < *n).unwrap_or(true)) { debug!("Storing new highest nonce {}", nonce); storage.set_nonce(nonce)?; @@ -502,10 +503,10 @@ impl NetworkHandler { Ok(()) } - // Locate the last topoheight valid for syncing, this support soft forks, DAG reorgs, etc... - // Balances and nonce may be outdated, but we will sync them later - // All transactions / changes above the last valid topoheight will be deleted - // Returns daemon topoheight along wallet stable topoheight and if back sync is needed + // Locate the last topoheight valid for syncing, this supports soft forks, DAG reorgs, etc... + // Balances and nonce may be outdated, but we will sync them later. + // All transactions / changes above the last valid topoheight will be deleted. + // Returns daemon topoheight along wallet stable topoheight and if back sync is needed. async fn locate_sync_topoheight_and_clean(&self) -> Result<(u64, Hash, u64), NetworkError> { trace!("locating sync topoheight and cleaning"); let info = self.api.get_info().await?; @@ -526,8 +527,8 @@ impl NetworkHandler { let synced_topoheight = { let storage = self.wallet.get_storage().read().await; if storage.has_top_block_hash()? { - // Check that the daemon topoheight is the same as our - // Verify also that the top block hash is same as our + // Check that the daemon topoheight is the same as ours + // Verify also that the top block hash is same as ours let top_block_hash = storage.get_top_block_hash()?; let synced_topoheight = storage.get_synced_topoheight()?; @@ -548,7 +549,7 @@ impl NetworkHandler { let header = self.api.get_block_at_topoheight(synced_topoheight).await?; let block_hash = header.hash.into_owned(); if block_hash == top_block_hash { - // topoheight and block hash are equal, we are still on right chain + // Topoheight and block hash are equal, we are still on right chain return Ok((daemon_topoheight, daemon_block_hash, synced_topoheight)) } } @@ -573,7 +574,7 @@ impl NetworkHandler { } // We are under the pruned topoheight, - // lets assume we are on the right chain under it + // lets assume we are on the right chain under it. if maximum < pruned_topoheight { maximum = pruned_topoheight; break None; @@ -629,10 +630,10 @@ impl NetworkHandler { Ok((daemon_topoheight, daemon_block_hash, maximum)) } - // Sync the latest version of our balances and nonces and determine if we should parse all blocks - // If assets are provided, we'll only sync these assets - // TODO: this may bug with Smart Contract integration as we could receive a new asset and not detect it - // If nonce is not provided, we will fetch it from the daemon + // Sync the latest version of our balances and nonces and determine if we should parse all blocks. + // If assets are provided, we'll only sync these assets. + // TODO: This may cause issues with Smart Contract integration as we could receive a new asset and not detect it. + // If nonce is not provided, we will fetch it from the daemon. async fn sync_head_state(&self, address: &Address, assets: Option>, nonce: Option, sync_nonce: bool) -> Result { trace!("syncing head state"); let new_nonce = if nonce.is_some() { @@ -670,8 +671,8 @@ impl NetworkHandler { trace!("assets: {}", assets.len()); let mut balances: HashMap<&Hash, CiphertextCache> = HashMap::new(); - // Store newly detected assets - // Get the final balance of each asset + // Store newly detected assets. + // Get the final balance of each asset. for asset in &assets { trace!("asset: {}", asset); // check if we have this asset locally @@ -759,8 +760,8 @@ impl NetworkHandler { Ok(should_sync_blocks) } - // Locate the highest valid topoheight we synced to, clean wallet storage - // then sync again the head state + // Locate the highest valid topoheight we synced to, + // clean wallet storage, then sync the head state again. async fn sync(&self, address: &Address, event: Option) -> Result<(), Error> { trace!("sync"); @@ -781,9 +782,9 @@ impl NetworkHandler { debug!("We must sync head state, assets: {}, nonce: {:?}", assets.iter().map(|a| a.to_string()).collect::>().join(", "), nonce); { let storage = self.wallet.get_storage().read().await; - // Verify that its a higher nonce than our locally stored - // Because if we are building queued transactions, it may break our queue - // Our we couldn't submit new txs before they get removed from mempool + // Verify that its a higher nonce than our locally stored. + // If we are building queued TXs, it may break our queue. + // Or we couldn't submit new txs before they get removed from mempool. let stored_nonce = storage.get_nonce().unwrap_or(0); if nonce.is_some_and(|n| n <= stored_nonce) { debug!("Nonce {:?} is lower or equal to stored nonce {}, skipping it", nonce, stored_nonce); @@ -813,8 +814,8 @@ impl NetworkHandler { sync_new_blocks |= self.sync_head_state(&address, None, None, true).await?; } - // we have something that changed, sync transactions - // prevent a double sync head state if history scan is disabled + // We have something that changed, sync transactions. + // Prevent a double sync head state if history scan is disabled. if sync_new_blocks && self.wallet.get_history_scan() { debug!("Syncing new blocks"); self.sync_new_blocks(address, wallet_topoheight, true).await?; @@ -833,9 +834,9 @@ impl NetworkHandler { Ok(()) } - // Runs an infinite loop to sync on each new block added in chain - // Because of potential forks and DAG reorg during attacks, - // we verify the last valid topoheight where changes happened + // Runs an infinite loop to sync on each new block added in chain. + // Due to potential forks and DAG reorg during attacks, + // we verify the last valid topoheight where changes happened. async fn start_syncing(self: &Arc) -> Result<(), Error> { debug!("Starting syncing"); // Generate only one time the address @@ -843,15 +844,15 @@ impl NetworkHandler { // Do a first sync to be up-to-date with the daemon self.sync(&address, None).await?; - // Thanks to websocket, we can be notified when a new block is added in chain - // this allows us to have a instant sync of each new block instead of polling periodically + // Thanks to websocket, we can be notified when a new block is added in chain. + // This allows us to have a instant sync of each new block instead of polling periodically. - // Because DAG can reorder any blocks in stable height, its possible we missed some txs because they were not executed - // when the block was added. We must check on DAG reorg for each block just to be sure + // Because DAG can reorder any blocks in stable height, its possible we missed some TXs because they were not executed + // when the block was added. We must check on DAG reorg for each block just to be sure. let mut on_block_ordered = self.api.on_block_ordered_event().await?; - // For better security, verify that an orphaned TX isn't in our ledger - // This is rare event but may happen if someone try to do something shady + // For better security, verify that an orphaned TX isn't in our ledger. + // This is rare event but may happen if someone tries to do something shady. let mut on_transaction_orphaned = self.api.on_transaction_orphaned_event().await?; // Network events to detect if we are online or offline @@ -862,8 +863,8 @@ impl NetworkHandler { select! { biased; // Wait on a new block, we don't parse the block directly as it may - // have reorg the chain - // Wait on a new block ordered in DAG + // have reorged on the chain. + // Wait on a new block ordered in DAG. res = on_block_ordered.next() => { let event = res?; debug!("Block ordered event {} at {}", event.block_hash, event.topoheight); @@ -878,8 +879,8 @@ impl NetworkHandler { debug!("Deleting all transactions due to reorg until 0"); storage.delete_transactions()?; } else { - // TODO we should make a faster way to delete all TXs above this topoheight - // Otherwise in future with millions of TXs, this may take few seconds. + // TODO: We should make a faster way to delete all TXs above this topoheight + // otherwise in the future with millions of TXs, this may take few seconds. debug!("Deleting transactions above {} due to DAG reorg", topoheight); storage.delete_transactions_above_topoheight(topoheight)?; } @@ -889,9 +890,9 @@ impl NetworkHandler { } } - // TODO delete all TXs & changes at this topoheight and above + // TODO: Delete all TXs & changes at this topoheight and above. // We need to clean up the DB as we may have some TXs that are not executed anymore - // and some others that got executed + // and some others that got executed. // Sync this block again as it may have some TXs executed let block = self.api.get_block_with_txs_at_topoheight(topoheight).await?; @@ -939,8 +940,8 @@ impl NetworkHandler { }; debug!("Scanning history for each asset"); - // cache for all topoheight we already processed - // this will prevent us to request more than one time the same topoheight + // Cache for all topoheight we already processed. + // This will prevent us to request more than one time the same topoheight. let mut topoheight_processed = HashSet::new(); let mut highest_nonce = None; for asset in assets { diff --git a/xelis_wallet/src/precomputed_tables/mod.rs b/xelis_wallet/src/precomputed_tables/mod.rs index f77d32c2..f16f0ac1 100644 --- a/xelis_wallet/src/precomputed_tables/mod.rs +++ b/xelis_wallet/src/precomputed_tables/mod.rs @@ -31,8 +31,8 @@ use std::sync::Arc; use log::debug; use xelis_common::crypto::ecdlp; -// This is a 32 bytes aligned struct -// It is necessary for the precomputed tables points +// This is a 32 bytes aligned struct. +// It is necessary for the precomputed tables points. #[derive(bytemuck::Pod, bytemuck::Zeroable, Copy, Clone)] #[repr(C, align(32))] struct Bytes32Alignment([u8; 32]); diff --git a/xelis_wallet/src/precomputed_tables/native.rs b/xelis_wallet/src/precomputed_tables/native.rs index 18de2c6b..ad72e375 100644 --- a/xelis_wallet/src/precomputed_tables/native.rs +++ b/xelis_wallet/src/precomputed_tables/native.rs @@ -6,8 +6,8 @@ use xelis_common::crypto::ecdlp; use super::{PrecomputedTables, PrecomputedTablesShared}; -// This will read from file if exists, or generate and store it in file -// This must be call only one time, and can be cloned to be shared through differents wallets +// This will read from file if it exists, or generate and store it in file. +// This must be called only one time, and can be cloned to be shared through different wallets. pub fn read_or_generate_precomputed_tables(path: Option, progress_report: P, l1: usize) -> Result { let mut precomputed_tables = PrecomputedTables::new(l1); diff --git a/xelis_wallet/src/precomputed_tables/web.rs b/xelis_wallet/src/precomputed_tables/web.rs index 16f9ec2b..6a44656d 100644 --- a/xelis_wallet/src/precomputed_tables/web.rs +++ b/xelis_wallet/src/precomputed_tables/web.rs @@ -5,13 +5,13 @@ use xelis_common::crypto::ecdlp; use super::{PrecomputedTables, PrecomputedTablesShared}; -// Precomputed tables is too heavy to be stored in local Storage, and generating it on the fly would be too slow -// So we will generate it on the server and store it in a file, and then we will read it from the file +// The precomputed tables is too heavy to be stored in local Storage, and generating it on the fly would be too slow. +// So we will generate it on the server and store it in a file, and then we will read it from the file. pub fn read_or_generate_precomputed_tables(_: Option, _: P, l1: usize) -> Result { let mut precomputed_tables = PrecomputedTables::new(l1); let bytes = include_bytes!("precomputed_tables.bin"); // We are forced to re-allocate the precomputed tables instead of using the reference - // to have the correct alignment in memory + // to have the correct alignment in memory. precomputed_tables.get_mut().copy_from_slice(bytes); // let precomputed_tables = PrecomputedTables::with_bytes(bytes, l1); Ok(Arc::new(precomputed_tables)) diff --git a/xelis_wallet/src/storage/backend/web.rs b/xelis_wallet/src/storage/backend/web.rs index e1cb025c..b05bf573 100644 --- a/xelis_wallet/src/storage/backend/web.rs +++ b/xelis_wallet/src/storage/backend/web.rs @@ -118,7 +118,7 @@ pub struct Db { )))] #[derive(Debug, Error)] pub enum DbError { - #[error("An error occured on the database")] + #[error("An error occurred on the database")] Poisoned } @@ -129,7 +129,7 @@ pub enum DbError { ))] #[derive(Debug, Error)] pub enum DbError { - #[error("An error occured on the database")] + #[error("An error occurred on the database")] Poisoned, #[error("Cannot access to the window object")] Window, @@ -435,7 +435,7 @@ impl InnerTree { } } -// TODO: rework this +// TODO: rework this. // A reference to all the entries in a `Tree`. // So, even if it get changed while we iter, we still have a reference to the old entries. pub struct Iter { diff --git a/xelis_wallet/src/storage/mod.rs b/xelis_wallet/src/storage/mod.rs index 2abf6696..8a290dd9 100644 --- a/xelis_wallet/src/storage/mod.rs +++ b/xelis_wallet/src/storage/mod.rs @@ -50,7 +50,7 @@ use crate::{ use self::backend::{Db, Tree}; use log::{trace, debug, error}; -// keys used to retrieve from storage +// Keys used to retrieve from storage const NONCE_KEY: &[u8] = b"NONCE"; const SALT_KEY: &[u8] = b"SALT"; // Password + salt is necessary to decrypt master key @@ -59,10 +59,10 @@ const PASSWORD_SALT_KEY: &[u8] = b"PSALT"; const MASTER_KEY: &[u8] = b"MKEY"; const PRIVATE_KEY: &[u8] = b"PKEY"; -// const used for online mode -// represent the daemon topoheight +// Const used for online mode. +// Represent the daemon topoheight. const TOPOHEIGHT_KEY: &[u8] = b"TOPH"; -// represent the daemon top block hash +// Represent the daemon top block hash const TOP_BLOCK_HASH_KEY: &[u8] = b"TOPBH"; const NETWORK: &[u8] = b"NET"; // Last coinbase reward topoheight @@ -120,35 +120,35 @@ pub struct TxCache { // Implement an encrypted storage system pub struct EncryptedStorage { - // cipher used to encrypt/decrypt/hash data + // Cipher used to encrypt/decrypt/hash data cipher: Cipher, // All transactions where this wallet is part of transactions: Tree, - // balances for each asset + // Balances for each asset balances: Tree, - // extra data (network, topoheight, etc) + // Extra data (network, topoheight, etc) extra: Tree, - // all assets tracked by the wallet + // All assets tracked by the wallet assets: Tree, - // This tree is used to store all topoheight where a change in the wallet occured + // This tree is used to store all topoheight where a change in the wallet occurred changes_topoheight: Tree, // The inner storage inner: Storage, // Caches balances_cache: Mutex>, - // this cache is used to store unconfirmed balances - // it is used to store the balance before the transaction is confirmed - // so we can build several txs without having to wait for the confirmation - // We store it in a VecDeque so for each TX we have an entry and can just retrieve it + // This cache is used to store unconfirmed balances. + // It is used to store the balance before the transaction is confirmed + // so we can build several txs without having to wait for the confirmation. + // We store it in a VecDeque so for each TX we have an entry and can just retrieve it. unconfirmed_balances_cache: Mutex>>, tx_cache: Option, // Cache for the assets with their decimals assets_cache: Mutex>, // Cache for the synced topoheight synced_topoheight: Option, - // Topoheight of the last coinbase reward + // Topoheight of the last coinbase reward. // This is used to determine if we should - // use a stable balance or not + // use a stable balance or not. last_coinbase_reward_topoheight: Option, } @@ -219,7 +219,7 @@ impl EncryptedStorage { self.internal_load(tree, &hashed_key) } - // Because we can't predict the nonce used for encryption, we make it determistic + // Because we can't predict the nonce used for encryption, we make it deterministic fn create_encrypted_key(&self, key: &[u8]) -> Result> { trace!("create encrypted key"); // the hashed key is salted so its unique and can't be recover/bruteforced @@ -233,15 +233,15 @@ impl EncryptedStorage { Ok(key) } - // load from disk using an encrypted key, decrypt the value and deserialize it + // Load from disk using an encrypted key, decrypt the value and deserialize it fn load_from_disk_with_encrypted_key(&self, tree: &Tree, key: &[u8]) -> Result { trace!("load from disk with encrypted key"); let encrypted_key = self.create_encrypted_key(key)?; self.internal_load(tree, &encrypted_key) } - // Encrypt key, encrypt data and then save to disk - // We encrypt instead of hashing to be able to retrieve the key + // Encrypt key, encrypt data and then save to disk. + // We encrypt instead of hashing to be able to retrieve the key. fn save_to_disk_with_encrypted_key(&self, tree: &Tree, key: &[u8], value: &[u8]) -> Result<()> { trace!("save to disk with encrypted key"); let encrypted_key = self.create_encrypted_key(key)?; @@ -250,7 +250,7 @@ impl EncryptedStorage { Ok(()) } - // hash key, encrypt data and then save to disk + // Hash key, encrypt data and then save to disk fn save_to_disk(&self, tree: &Tree, key: &[u8], value: &[u8]) -> Result<()> { trace!("save to disk"); let hashed_key = self.cipher.hash_key(key); @@ -258,7 +258,7 @@ impl EncryptedStorage { Ok(()) } - // hash key, encrypt data and then save to disk + // Hash key, encrypt data and then save to disk fn delete_from_disk(&self, tree: &Tree, key: &[u8]) -> Result<()> { trace!("delete from disk"); let hashed_key = self.cipher.hash_key(key); @@ -266,7 +266,7 @@ impl EncryptedStorage { Ok(()) } - // hash key, encrypt data and then save to disk + // Hash key, encrypt data and then save to disk fn delete_from_disk_with_encrypted_key(&self, tree: &Tree, key: &[u8]) -> Result<()> { trace!("delete from disk with encrypted key"); let encrypted_key = self.create_encrypted_key(key)?; @@ -334,8 +334,8 @@ impl EncryptedStorage { self.contains_encrypted_data(&tree, &key.to_bytes()) } - // Search all entries with requested query_key/query_value - // It has to go through the whole tree elements, decrypt each key/value and verify them against the query filter set + // Search all entries with requested query_key/query_value. + // It has to go through the whole tree elements, decrypt each key/value and verify them against the query filter set. pub fn query_db(&self, tree: impl Into, query_key: Option, query_value: Option, return_on_first: bool) -> Result { trace!("query db"); let tree = self.get_custom_tree(tree)?; @@ -409,8 +409,8 @@ impl EncryptedStorage { Ok(keys) } - // Count entries from a tree - // A query is possible to filter on keys + // Count entries from a tree. + // A query is possible to filter on keys. pub fn count_custom_tree_entries(&self, tree: &String, query_key: &Option, query_value: &Option) -> Result { trace!("count custom tree entries"); let tree = self.get_custom_tree(tree)?; @@ -446,8 +446,8 @@ impl EncryptedStorage { Ok(count) } - // this function is specific because we save the key in encrypted form (and not hashed as others) - // returns all saved assets + // This function is specific because we save the key in encrypted form (and not hashed as others). + // Returns all saved assets. pub async fn get_assets(&self) -> Result> { trace!("get assets"); let mut cache = self.assets_cache.lock().await; @@ -518,7 +518,7 @@ impl EncryptedStorage { self.contains_encrypted_data(&self.assets, asset.as_bytes()) } - // save asset with its corresponding decimals + // Save asset with its corresponding decimals pub async fn add_asset(&mut self, asset: &Hash, decimals: u8) -> Result<()> { trace!("add asset"); if self.contains_asset(asset).await? { @@ -633,11 +633,11 @@ impl EncryptedStorage { // Set the balance for this asset pub async fn set_balance_for(&mut self, asset: &Hash, mut balance: Balance) -> Result<()> { trace!("set balance for {}", asset); - // Clear the cache of all outdated balances - // for this, we simply go through all versions available and delete them all until we find the one we are looking for - // The unconfirmed balances cache may not work during front running + // Clear the cache of all outdated balances. + // For this, we simply go through all versions available and delete them all until we find the one we are looking for. + // The unconfirmed balances cache may not work during front running. // As we only scan the final balances for each asset, if we get any incoming TX, compressed balance - // will be different and we will not be able to find the unconfirmed balance + // will be different and we will not be able to find the unconfirmed balance. { let mut cache = self.unconfirmed_balances_cache.lock().await; let mut delete_entry = false; @@ -682,14 +682,14 @@ impl EncryptedStorage { self.load_from_disk(&self.transactions, hash.as_bytes()) } - // read whole disk and returns all transactions + // Read whole disk and returns all transactions pub fn get_transactions(&self) -> Result> { trace!("get transactions"); self.get_filtered_transactions(None, None, None, true, true, true, true, None) } - // delete all transactions above the specified topoheight - // This will go through each transaction, deserialize it, check topoheight, and delete it if required + // Delete all transactions above the specified topoheight. + // This will go through each transaction, deserialize it, check topoheight, and delete it if required. pub fn delete_transactions_above_topoheight(&mut self, topoheight: u64) -> Result<()> { trace!("delete transactions above topoheight {}", topoheight); for el in self.transactions.iter().values() { @@ -703,9 +703,9 @@ impl EncryptedStorage { Ok(()) } - // delete all transactions at the specified topoheight - // This will go through each transaction, deserialize it, check topoheight, and delete it if required - // Maybe we can optimize it by keeping a lookuptable of topoheight -> txs ? + // Delete all transactions at the specified topoheight. + // This will go through each transaction, deserialize it, check topoheight, and delete it if required. + // Maybe we can optimize it by keeping a lookup table of topoheight -> TXs? pub fn delete_transactions_at_topoheight(&mut self, topoheight: u64) -> Result<()> { trace!("delete transactions at topoheight {}", topoheight); for el in self.transactions.iter().values() { @@ -820,7 +820,7 @@ impl EncryptedStorage { self.clear_tx_cache(); } - // Delete tx cache + // Delete TX cache pub fn clear_tx_cache(&mut self) { trace!("clear tx cache"); self.tx_cache = None; @@ -834,9 +834,9 @@ impl EncryptedStorage { Ok(()) } - // Save the transaction with its TX hash as key - // We hash the hash of the TX to use it as a key to not let anyone being able to see txs saved on disk - // with no access to the decrypted master key + // Save the transaction with its TX hash as key. + // We hash the hash of the TX to use it as a key to not let anyone being able to see TXs saved on disk + // without access to the decrypted master key. pub fn save_transaction(&mut self, hash: &Hash, transaction: &TransactionEntry) -> Result<()> { trace!("save transaction {}", hash); @@ -860,8 +860,8 @@ impl EncryptedStorage { self.load_from_disk(&self.extra, NONCE_KEY) } - // Get the unconfirmed nonce to use to build ordered TXs - // It will fallback to the real nonce if not set + // Get the unconfirmed nonce to use to build ordered TXs. + // It will fallback to the real nonce if not set. pub fn get_unconfirmed_nonce(&self) -> u64 { trace!("get unconfirmed nonce"); self.tx_cache.as_ref().map(|c| c.nonce).unwrap_or_else(|| self.get_nonce().unwrap_or(0)) @@ -879,15 +879,15 @@ impl EncryptedStorage { self.tx_cache.as_ref() } - // Set the new nonce used to create new transactions + // Set the new nonce used to create new transactions. // If the unconfirmed nonce is lower than the new nonce, we reset it pub fn set_nonce(&mut self, nonce: u64) -> Result<()> { trace!("set nonce to {}", nonce); self.save_to_disk(&self.extra, NONCE_KEY, &nonce.to_be_bytes()) } - // Store the last coinbase reward topoheight - // This is used to determine if we should use a stable balance or not + // Store the last coinbase reward topoheight. + // This is used to determine if we should use a stable balance or not. pub fn set_last_coinbase_reward_topoheight(&mut self, topoheight: Option) -> Result<()> { trace!("set last coinbase reward topoheight to {:?}", topoheight); if let Some(topoheight) = topoheight { @@ -995,7 +995,7 @@ impl EncryptedStorage { self.contains_data(&self.extra, NETWORK) } - // Add a topoheight where a change occured + // Add a topoheight where a change occurred pub fn add_topoheight_to_changes(&mut self, topoheight: u64, block_hash: &Hash) -> Result<()> { trace!("add topoheight to changes: {} at {}", topoheight, block_hash); self.save_to_disk_with_encrypted_key(&self.changes_topoheight, &topoheight.to_be_bytes(), block_hash.as_bytes()) @@ -1013,8 +1013,8 @@ impl EncryptedStorage { self.contains_encrypted_data(&self.changes_topoheight, &topoheight.to_be_bytes()) } - // Delete all changes above topoheight - // This will returns true if a changes was deleted + // Delete all changes above topoheight. + // This will returns true if a changes was deleted. pub fn delete_changes_above_topoheight(&mut self, topoheight: u64) -> Result { trace!("delete changes above topoheight {}", topoheight); let mut deleted = false; @@ -1032,8 +1032,8 @@ impl EncryptedStorage { Ok(deleted) } - // Delete changes at topoheight - // This will returns true if a changes was deleted + // Delete changes at topoheight. + // This will returns true if a changes was deleted. pub fn delete_changes_at_topoheight(&mut self, topoheight: u64) -> Result<()> { trace!("delete changes at topoheight {}", topoheight); self.delete_from_disk_with_encrypted_key(&self.changes_topoheight, &topoheight.to_be_bytes())?; @@ -1078,15 +1078,15 @@ impl Storage { }) } - // save the encrypted form of the master key - // it can only be decrypted using the password-based key + // Save the encrypted form of the master key. + // It can only be decrypted using the password-based key. pub fn set_encrypted_master_key(&mut self, encrypted_key: &[u8]) -> Result<()> { trace!("set encrypted master key"); self.db.insert(MASTER_KEY, encrypted_key)?; Ok(()) } - // retrieve the encrypted form of the master key + // Retrieve the encrypted form of the master key pub fn get_encrypted_master_key(&self) -> Result> { trace!("get encrypted master key"); match self.db.get(MASTER_KEY)? { @@ -1099,14 +1099,14 @@ impl Storage { } } - // set password salt used to derive the password-based key + // Set password salt used to derive the password-based key pub fn set_password_salt(&mut self, salt: &[u8]) -> Result<()> { trace!("set password salt"); self.db.insert(PASSWORD_SALT_KEY, salt)?; Ok(()) } - // retrieve password salt used to derive the password-based key + // Retrieve password salt used to derive the password-based key pub fn get_password_salt(&self) -> Result<[u8; SALT_SIZE]> { trace!("get password salt"); let mut salt: [u8; SALT_SIZE] = [0; SALT_SIZE]; @@ -1126,7 +1126,7 @@ impl Storage { Ok(salt) } - // get the salt used for encrypted storage + // Get the salt used for encrypted storage pub fn get_encrypted_storage_salt(&self) -> Result> { trace!("get encrypted storage salt"); let values = self.db.get(SALT_KEY)?.context("encrypted salt for storage was not found")?; @@ -1136,7 +1136,7 @@ impl Storage { Ok(encrypted_salt) } - // set the salt used for encrypted storage + // Set the salt used for encrypted storage pub fn set_encrypted_storage_salt(&mut self, salt: &[u8]) -> Result<()> { trace!("set encrypted storage salt"); self.db.insert(SALT_KEY, salt)?; diff --git a/xelis_wallet/src/transaction_builder.rs b/xelis_wallet/src/transaction_builder.rs index 24e413df..a4dd77f1 100644 --- a/xelis_wallet/src/transaction_builder.rs +++ b/xelis_wallet/src/transaction_builder.rs @@ -7,11 +7,11 @@ use xelis_common::{ }; use crate::{error::WalletError, storage::{Balance, EncryptedStorage, TxCache}}; -// State used to estimate fees for a transaction +// State used to estimate fees for a transaction. // Because fees can be higher if a destination account is not registered -// We need to give this information during the estimation of fees +// we need to give this information during the estimation of fees. pub struct EstimateFeesState { - // this is containing the registered keys that we are aware of + // This is containing the registered keys that we are aware of registered_keys: HashSet } @@ -39,8 +39,8 @@ impl FeeHelper for EstimateFeesState { } } -// State used to build a transaction -// It contains the balances of the wallet and the registered keys +// State used to build a transaction. +// It contains the balances of the wallet and the registered keys. pub struct TransactionBuilderState { inner: EstimateFeesState, mainnet: bool, diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 7969c982..4170d68b 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -126,13 +126,13 @@ use xelis_common::tokio::task::spawn_blocking; #[derive(Serialize, Clone, Debug)] #[serde(untagged)] pub enum Event { - // When a TX is detected from daemon and is added in wallet storage + // When a TX is detected from daemon and is added in wallet storage. NewTransaction(TransactionEntry), - // When a new block is detected from daemon + // When a new block is detected from daemon. // NOTE: Same topoheight can be broadcasted several times if DAG reorg it - // And some topoheight can be skipped because of DAG reorg + // and some topoheight can be skipped because of DAG reorg. // Example: two blocks at same height, both got same topoheight 69, next block reorg them together - // and one of the block get topoheight 69, the other 70, next is 71, but 70 is skipped + // and one of the block get topoheight 69, the other 70, next is 71, but 70 is skipped. NewTopoHeight { topoheight: u64 }, @@ -140,9 +140,9 @@ pub enum Event { BalanceChanged(BalanceChanged), // When a new asset is added to wallet NewAsset(AssetWithData), - // When a rescan happened (because of user request or DAG reorg/fork) - // Value is topoheight until it deleted transactions - // Next sync will restart at this topoheight + // When a rescan happened (because of user request or DAG reorg/fork). + // Value is topoheight until it deleted transactions. + // Next sync will restart at this topoheight. Rescan { start_topoheight: u64 }, @@ -171,12 +171,12 @@ pub struct Wallet { // Encrypted Wallet Storage storage: RwLock, // Inner account with keys and precomputed tables - // so it can be shared to another thread for decrypting ciphertexts + // so it can be shared to another thread for decrypting ciphertexts. inner: Arc, - // network handler for online mode to keep wallet synced + // Network handler for online mode to keep wallet synced #[cfg(feature = "network_handler")] network_handler: Mutex>, - // network on which we are connected + // Network on which we are connected network: Network, // RPC Server #[cfg(feature = "api_server")] @@ -186,10 +186,10 @@ pub struct Wallet { xswd_channel: RwLock>>, // Event broadcaster event_broadcaster: Mutex>>, - // If the wallet should scan also blocks and transactions history - // Set to true by default + // If the wallet should scan also blocks and transactions history. + // Set to true by default. history_scan: AtomicBool, - // flag to prioritize the usage of stable balance version when its online + // Flag to prioritize the usage of stable balance version when its online force_stable_balance: AtomicBool, } @@ -234,7 +234,7 @@ impl Wallet { precomputed_tables::read_or_generate_precomputed_tables(path, progress_report, PRECOMPUTED_TABLES_L1) } - // Create a new wallet with the specificed storage, keypair and its network + // Create a new wallet with the specified storage, keypair and its network fn new(storage: EncryptedStorage, keypair: KeyPair, network: Network, precomputed_tables: PrecomputedTablesShared) -> Arc { let zelf = Self { storage: RwLock::new(storage), @@ -260,7 +260,7 @@ impl Wallet { return Err(WalletError::EmptyName.into()) } - // generate random keypair or recover it from seed + // Generate random keypair or recover it from seed let keypair = if let Some(seed) = seed { debug!("Retrieving keypair from seed..."); let words: Vec = seed.split_whitespace().map(str::to_string).collect(); @@ -271,32 +271,32 @@ impl Wallet { KeyPair::new() }; - // generate random salt for hashed password + // Generate random salt for hashed password let mut salt: [u8; SALT_SIZE] = [0; SALT_SIZE]; OsRng.fill_bytes(&mut salt); - // generate hashed password which will be used as key to encrypt master_key + // Generate hashed password which will be used as key to encrypt master_key debug!("hashing provided password"); let hashed_password = hash_password(password, &salt)?; debug!("Creating storage for {}", name); let mut inner = Storage::new(name)?; - // generate the Cipher + // Generate the Cipher let cipher = Cipher::new(&hashed_password, None)?; - // save the salt used for password + // Save the salt used for password debug!("Save password salt in public storage"); inner.set_password_salt(&salt)?; - // generate the master key which is used for storage and then save it in encrypted form + // Generate the master key which is used for storage and then save it in encrypted form let mut master_key: [u8; 32] = [0; 32]; OsRng.fill_bytes(&mut master_key); let encrypted_master_key = cipher.encrypt_value(&master_key)?; debug!("Save encrypted master key in public storage"); inner.set_encrypted_master_key(&encrypted_master_key)?; - // generate the storage salt and save it in encrypted form + // Generate the storage salt and save it in encrypted form let mut storage_salt = [0; SALT_SIZE]; OsRng.fill_bytes(&mut storage_salt); let encrypted_storage_salt = cipher.encrypt_value(&storage_salt)?; @@ -323,17 +323,17 @@ impl Wallet { debug!("Creating storage for {}", name); let storage = Storage::new(name)?; - // get password salt for KDF + // Get password salt for KDF debug!("Retrieving password salt from public storage"); let salt = storage.get_password_salt()?; - // retrieve encrypted master key from storage + // Retrieve encrypted master key from storage debug!("Retrieving encrypted master key from public storage"); let encrypted_master_key = storage.get_encrypted_master_key()?; let hashed_password = hash_password(password, &salt)?; - // decrypt the encrypted master key using the hashed password (used as key) + // Decrypt the encrypted master key using the hashed password (used as key) let cipher = Cipher::new(&hashed_password, None)?; let master_key = cipher.decrypt_value(&encrypted_master_key).context("Invalid password provided for this wallet")?; @@ -357,9 +357,9 @@ impl Wallet { Ok(Self::new(storage, keypair, network, precomputed_tables)) } - // Close the wallet - // this will stop the network handler and the API Server if it's running - // Because wallet is behind Arc, we need to close differents modules that has a copy of it + // Close the wallet. + // This will stop the network handler and the API Server if it's running. + // Because the wallet is behind Arc, we need to close different modules that have a copy of it. pub async fn close(&self) { trace!("Closing wallet"); @@ -382,7 +382,7 @@ impl Wallet { } } - // Stop gracefully the network handler + // Stop the network handler gracefully #[cfg(feature = "network_handler")] { let mut lock = self.network_handler.lock().await; @@ -393,19 +393,19 @@ impl Wallet { } } - // Stop gracefully the storage + // Stop the storage gracefully { let mut storage = self.storage.write().await; storage.stop().await; } - // Close the event broadcaster - // So all subscribers will be notified + // Close the event broadcaster. + // So all subscribers will be notified. self.close_events_channel().await; } - // Disable/enable the history scan - // This is used by the network handler to avoid scanning history if requested + // Disable/enable the history scan. + // This is used by the network handler to avoid scanning history if requested. pub fn set_history_scan(&self, value: bool) { self.history_scan.store(value, Ordering::SeqCst); } @@ -532,40 +532,40 @@ impl Wallet { Ok(()) } - // change the current password wallet to a new one + // Change the current password wallet to a new one pub async fn set_password(&self, old_password: String, password: String) -> Result<(), Error> { let mut encrypted_storage = self.storage.write().await; let storage = encrypted_storage.get_mutable_public_storage(); let (master_key, storage_salt) = { - // retrieve old salt to build key from current password + // Retrieve old salt to build key from current password let salt = storage.get_password_salt()?; let hashed_password = hash_password(old_password, &salt)?; let encrypted_master_key = storage.get_encrypted_master_key()?; let encrypted_storage_salt = storage.get_encrypted_storage_salt()?; - // decrypt the encrypted master key using the provided password + // Decrypt the encrypted master key using the provided password let cipher = Cipher::new(&hashed_password, None)?; let master_key = cipher.decrypt_value(&encrypted_master_key).context("Invalid password provided")?; let storage_salt = cipher.decrypt_value(&encrypted_storage_salt)?; (master_key, storage_salt) }; - // generate a new salt for password + // Generate a new salt for password let mut salt: [u8; SALT_SIZE] = [0; SALT_SIZE]; OsRng.fill_bytes(&mut salt); - // generate the password-based derivated key to encrypt the master key + // Generate the password-based derived key to encrypt the master key let hashed_password = hash_password(password, &salt)?; let cipher = Cipher::new(&hashed_password, None)?; - // encrypt the master key using the new password + // Encrypt the master key using the new password let encrypted_key = cipher.encrypt_value(&master_key)?; - // encrypt the salt with the new password + // Encrypt the salt with the new password let encrypted_storage_salt = cipher.encrypt_value(&storage_salt)?; - // save on disk + // Save on disk storage.set_password_salt(&salt)?; storage.set_encrypted_master_key(&encrypted_key)?; storage.set_encrypted_storage_salt(&encrypted_storage_salt)?; @@ -603,8 +603,8 @@ impl Wallet { cipher.decrypt(&self.inner.keypair.get_private_key(), handle, role).map_err(|_| WalletError::CiphertextDecode) } - // Create a transaction with the given transaction type and fee - // this will apply the changes to the storage if the transaction + // Create a transaction with the given transaction type and fee. + // This will apply the changes to the storage if the transaction. pub async fn create_transaction(&self, transaction_type: TransactionTypeBuilder, fee: FeeBuilder) -> Result { trace!("create transaction"); let mut storage = self.storage.write().await; @@ -615,11 +615,11 @@ impl Wallet { Ok(transaction) } - // create the final transaction with calculated fees and signature - // also check that we have enough funds for the transaction - // This will returns the transaction builder state along the transaction - // You must handle "apply changes" to the storage - // Warning: this is locking the network handler to access to the daemon api + // Create the final transaction with calculated fees and signature. + // Check that we have enough funds for the transaction. + // Returns the transaction builder state along with the transaction. + // You must handle "apply changes" to the storage. + // Warning: this is locking the network handler to access to the daemon api. pub async fn create_transaction_with_storage(&self, storage: &EncryptedStorage, transaction_type: TransactionTypeBuilder, fee: FeeBuilder, nonce: Option) -> Result<(TransactionBuilderState, Transaction), WalletError> { trace!("create transaction with storage"); let nonce = nonce.unwrap_or_else(|| storage.get_unconfirmed_nonce()); @@ -632,8 +632,8 @@ impl Wallet { reference = Some(cache.reference.clone()); } - // Used to inject it in the state - // So once the state is applied, we verify if the last coinbase reward topoheight is still valid + // Used to inject it in the state. + // So once the state is applied, we verify if the last coinbase reward topoheight is still valid. #[cfg(feature = "network_handler")] let mut daemon_stable_topoheight = None; #[cfg(not(feature = "network_handler"))] @@ -646,8 +646,8 @@ impl Wallet { if (reference.is_none() && used_assets.contains(&XELIS_ASSET)) || force_stable_balance { // debug!("Wallet got a coinbase reward at topoheight: {}, verify that its not unstable", topoheight); if let Some(network_handler) = self.network_handler.lock().await.as_ref() { - // Last mining reward is above stable topoheight, this may increase orphans rate - // To avoid this, we will use the last balance version in stable topoheight as reference + // Last mining reward is above stable topoheight, this may increase orphans rate. + // To avoid this, we will use the last balance version in stable topoheight as reference. let use_stable_balance = if let Some(topoheight) = storage.get_last_coinbase_reward_topoheight() { let stable_topoheight = network_handler.get_api().get_stable_topoheight().await?; daemon_stable_topoheight = Some(stable_topoheight); @@ -665,7 +665,7 @@ impl Wallet { let stable_point = network_handler.get_api().get_stable_balance(&address, &asset).await?; // Store the stable balance version into unconfirmed balance - // So it will be fetch later by state + // so it will be fetch later by state. let mut ciphertext = stable_point.version.take_balance(); debug!("decrypting stable balance for asset {}", asset); let amount = self.inner.decrypt_ciphertext(ciphertext.decompressed().map_err(|_| WalletError::CiphertextDecode)?)?; @@ -675,8 +675,8 @@ impl Wallet { }; storage.set_unconfirmed_balance_for((*asset).clone(), balance).await?; - // Build the stable reference - // We need to find the highest stable point + // Build the stable reference. + // We need to find the highest stable point. if reference.is_none() || reference.as_ref().is_some_and(|r| r.topoheight < stable_point.stable_topoheight) { reference = Some(Reference { topoheight: stable_point.stable_topoheight, @@ -699,7 +699,7 @@ impl Wallet { } }; - // state used to build the transaction + // State used to build the transaction let mut state = TransactionBuilderState::new( self.network.is_mainnet(), reference, @@ -745,9 +745,9 @@ impl Wallet { Ok(transaction) } - // submit a transaction to the network through the connection to daemon - // It will increase the local nonce by 1 if the TX is accepted by the daemon - // returns error if the wallet is in offline mode or if the TX is rejected + // Submit a transaction to the network through the connection to daemon. + // It will increase the local nonce by 1 if the TX is accepted by the daemon. + // Returns error if the wallet is in offline mode or if the TX is rejected. pub async fn submit_transaction(&self, transaction: &Transaction) -> Result<(), WalletError> { trace!("submit transaction {}", transaction.hash()); #[cfg(feature = "network_handler")] @@ -790,8 +790,8 @@ impl Wallet { Ok(()) } - // Estimate fees for a given transaction type - // Estimated fees returned are the minimum required to be valid on chain + // Estimate fees for a given transaction type. + // Estimated fees returned are the minimum required to be valid on chain. pub async fn estimate_fees(&self, tx_type: TransactionTypeBuilder) -> Result { trace!("estimate fees"); let mut state = EstimateFeesState::new(); @@ -806,42 +806,42 @@ impl Wallet { Ok(estimated_fees) } - // set wallet in online mode: start a communication task which will keep the wallet synced + // Set wallet in online mode: start a communication task which will keep the wallet synced #[cfg(feature = "network_handler")] pub async fn set_online_mode(self: &Arc, daemon_address: &String, auto_reconnect: bool) -> Result<(), WalletError> { trace!("Set online mode to daemon {} with auto reconnect set to {}", daemon_address, auto_reconnect); if self.is_online().await { - // user have to set in offline mode himself first + // User have to set in offline mode himself first return Err(WalletError::AlreadyOnlineMode) } - // create the network handler + // Create the network handler let network_handler = NetworkHandler::new(Arc::clone(&self), daemon_address).await?; - // start the task + // Start the task network_handler.start(auto_reconnect).await?; *self.network_handler.lock().await = Some(network_handler); Ok(()) } - // set the wallet in online mode using a shared daemon API - // this allows to share the same connection/Daemon API across several wallets to save resources + // Set the wallet in online mode using a shared daemon API. + // This allows to share the same connection/Daemon API across several wallets to save resources. #[cfg(feature = "network_handler")] pub async fn set_online_mode_with_api(self: &Arc, daemon_api: Arc, auto_reconnect: bool) -> Result<(), WalletError> { trace!("Set online mode with API with auto reconnect set to {}", auto_reconnect); if self.is_online().await { - // user have to set in offline mode himself first + // User have to set in offline mode himself first return Err(WalletError::AlreadyOnlineMode) } - // create the network handler + // Create the network handler let network_handler = NetworkHandler::with_api(Arc::clone(&self), daemon_api).await?; - // start the task + // Start the task network_handler.start(auto_reconnect).await?; *self.network_handler.lock().await = Some(network_handler); Ok(()) } - // set wallet in offline mode: stop communication task if exists + // Set wallet in offline mode: stop communication task if exists #[cfg(feature = "network_handler")] pub async fn set_offline_mode(&self) -> Result<(), WalletError> { trace!("Set offline mode"); @@ -856,14 +856,14 @@ impl Wallet { Ok(()) } - // rescan the wallet from the given topoheight - // that will delete all transactions above the given topoheight and all balances - // then it will re-fetch all transactions and balances from daemon + // Rescan the wallet from the given topoheight. + // This will delete all transactions above the given topoheight and all balances, + // then it will re-fetch all transactions and balances from daemon. #[cfg(feature = "network_handler")] pub async fn rescan(&self, topoheight: u64, auto_reconnect: bool) -> Result<(), WalletError> { trace!("Rescan wallet from topoheight {}", topoheight); if !self.is_online().await { - // user have to set it online + // User have to set it online return Err(WalletError::NotOnlineMode) } @@ -880,10 +880,10 @@ impl Wallet { debug!("set synced topoheight to {}", topoheight); storage.set_synced_topoheight(topoheight)?; storage.delete_top_block_hash()?; - // balances will be re-fetched from daemon + // Balances will be re-fetched from daemon storage.delete_balances().await?; storage.delete_assets().await?; - // unconfirmed balances are going to be outdated, we delete them + // Unconfirmed balances are going to be outdated, we delete them storage.delete_unconfirmed_balances().await; storage.clear_tx_cache(); @@ -917,8 +917,9 @@ impl Wallet { false } - // this function allow to user to get the network handler in case in want to stay in online mode - // but want to pause / resume the syncing task through start/stop functions from it + // This function allows the user to access the network handler. + // It is useful for users who want to stay in online mode but need to pause or resume the syncing task + // using the start/stop functions provided by the network handler. #[cfg(feature = "network_handler")] pub async fn get_network_handler(&self) -> &Mutex>> { &self.network_handler @@ -971,7 +972,7 @@ impl Wallet { #[cfg(feature = "api_server")] pub enum XSWDEvent { RequestPermission(AppStateShared, RpcRequest, oneshot::Sender>), - // bool represents if it was signed or not + // Bool represents if it was signed or not RequestApplication(AppStateShared, bool, oneshot::Sender>), CancelRequest(AppStateShared, oneshot::Sender>) } @@ -981,9 +982,9 @@ pub enum XSWDEvent { impl XSWDPermissionHandler for Arc { async fn request_permission(&self, app_state: &AppStateShared, request: PermissionRequest<'_>) -> Result { if let Some(sender) = self.xswd_channel.read().await.as_ref() { - // no other way ? + // No other way ? let app_state = app_state.clone(); - // create a callback channel to receive the answer + // Create a callback channel to receive the answer let (callback, receiver) = oneshot::channel(); let event = match request { PermissionRequest::Application(signed) => XSWDEvent::RequestApplication(app_state, signed, callback), @@ -1000,8 +1001,9 @@ impl XSWDPermissionHandler for Arc { Err(WalletError::NoHandlerAvailable.into()) } - // there is a lock to acquire so it make it "single threaded" - // the one who has the lock is the one who is requesting so we don't need to check and can cancel directly + // The function acquires a lock to ensure single-threaded access. + // The holder of the lock is the requester, so no additional checks are needed, + // and the request can be directly canceled. async fn cancel_request_permission(&self, app: &AppStateShared) -> Result<(), Error> { if let Some(sender) = self.xswd_channel.read().await.as_ref() { let (callback, receiver) = oneshot::channel();