Skip to content

chore: clippy::uninlined_format_args #6848

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: develop
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
2 changes: 1 addition & 1 deletion consensus/config/src/test_committee.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ pub fn local_committee_and_keys(
fn get_available_local_address() -> Multiaddr {
let host = "127.0.0.1";
let port = get_available_port(host);
format!("/ip4/{}/udp/{}", host, port).parse().unwrap()
format!("/ip4/{host}/udp/{port}").parse().unwrap()
}

/// Returns an ephemeral, available port. On unix systems, the port returned
Expand Down
9 changes: 3 additions & 6 deletions consensus/core/src/authority_node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,7 @@ where
) -> Self {
assert!(
committee.is_valid_index(own_index),
"Invalid own index {}",
own_index
"Invalid own index {own_index}"
);
let own_hostname = &committee.authority(own_index).hostname;
info!(
Expand Down Expand Up @@ -545,8 +544,7 @@ mod tests {
for txn in b.transactions().iter().map(|t| t.data().to_vec()) {
assert!(
expected_transactions.remove(&txn),
"Transaction not submitted or already seen: {:?}",
txn
"Transaction not submitted or already seen: {txn:?}"
);
}
}
Expand Down Expand Up @@ -644,8 +642,7 @@ mod tests {
for txn in b.transactions().iter().map(|t| t.data().to_vec()) {
assert!(
expected_transactions.remove(&txn),
"Transaction not submitted or already seen: {:?}",
txn
"Transaction not submitted or already seen: {txn:?}"
);
}
}
Expand Down
3 changes: 1 addition & 2 deletions consensus/core/src/authority_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,8 +205,7 @@ impl<C: CoreThreadDispatcher> NetworkService for AuthorityService<C> {
return Err(ConsensusError::BlockRejected {
block_ref,
reason: format!(
"Last commit index is lagging quorum commit index too much ({} < {})",
last_commit_index, quorum_commit_index,
"Last commit index is lagging quorum commit index too much ({last_commit_index} < {quorum_commit_index})",
),
});
}
Expand Down
7 changes: 3 additions & 4 deletions consensus/core/src/base_committer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ impl BaseCommitter {
.dag_state
.read()
.get_block(ancestor)
.unwrap_or_else(|| panic!("Block not found in storage: {:?}", ancestor));
.unwrap_or_else(|| panic!("Block not found in storage: {ancestor:?}"));
if let Some(support) = self.find_supported_block(leader_slot, &ancestor) {
return Some(support);
}
Expand Down Expand Up @@ -255,14 +255,13 @@ impl BaseCommitter {
} else {
assert!(
reference.round <= gc_round,
"Block not found in storage: {:?} , and is not below gc_round: {gc_round}",
reference
"Block not found in storage: {reference:?} , and is not below gc_round: {gc_round}"
);
false
}
} else {
let potential_vote = potential_vote
.unwrap_or_else(|| panic!("Block not found in storage: {:?}", reference));
.unwrap_or_else(|| panic!("Block not found in storage: {reference:?}"));
self.is_vote(&potential_vote, leader_block)
};

Expand Down
2 changes: 1 addition & 1 deletion consensus/core/src/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ impl fmt::Display for Slot {

impl fmt::Debug for Slot {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self)
write!(f, "{self}")
}
}

Expand Down
9 changes: 3 additions & 6 deletions consensus/core/src/block_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,7 @@ impl BlockManager {
}
TryAcceptResult::Processed => continue,
TryAcceptResult::Suspended(_) | TryAcceptResult::Skipped => panic!(
"Did not expect to suspend or skip a committed block: {:?}",
block_ref
"Did not expect to suspend or skip a committed block: {block_ref:?}"
),
};
} else {
Expand Down Expand Up @@ -351,8 +350,7 @@ impl BlockManager {
ancestor_blocks.push(None);
} else {
panic!(
"Unsuspended block {:?} has a missing ancestor! Ancestor not found in DagState: {:?}",
b, ancestor_ref
"Unsuspended block {b:?} has a missing ancestor! Ancestor not found in DagState: {ancestor_ref:?}"
);
}
}
Expand Down Expand Up @@ -1104,8 +1102,7 @@ mod tests {

assert_eq!(
all_accepted_blocks, all_blocks,
"Failed acceptance sequence for seed {}",
seed
"Failed acceptance sequence for seed {seed}"
);
assert!(block_manager.is_empty());
}
Expand Down
4 changes: 1 addition & 3 deletions consensus/core/src/commit_consumer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,7 @@ impl CommitConsumerMonitor {
let highest_handled_commit = self.highest_handled_commit();
assert!(
highest_observed_commit_at_startup >= highest_handled_commit,
"we cannot have handled a commit that we do not know about: {} < {}",
highest_observed_commit_at_startup,
highest_handled_commit,
"we cannot have handled a commit that we do not know about: {highest_observed_commit_at_startup} < {highest_handled_commit}",
);

let mut commit = self.highest_observed_commit_at_startup.write().unwrap();
Expand Down
3 changes: 1 addition & 2 deletions consensus/core/src/commit_observer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,8 +166,7 @@ impl CommitObserver {
load_committed_subdag_from_store(self.store.as_ref(), commit, reputation_scores);
self.sender.send(committed_sub_dag).unwrap_or_else(|e| {
panic!(
"Failed to send commit during recovery, probably due to shutdown: {:?}",
e
"Failed to send commit during recovery, probably due to shutdown: {e:?}"
)
});

Expand Down
8 changes: 4 additions & 4 deletions consensus/core/src/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2439,7 +2439,7 @@ mod test {
.try_propose(true)
.unwrap()
.unwrap_or_else(|| {
panic!("Block should have been proposed for round {}", round)
panic!("Block should have been proposed for round {round}")
});
}
}
Expand Down Expand Up @@ -2544,7 +2544,7 @@ mod test {
.try_propose(true)
.unwrap()
.unwrap_or_else(|| {
panic!("Block should have been proposed for round {}", round)
panic!("Block should have been proposed for round {round}")
});
}
}
Expand Down Expand Up @@ -2579,7 +2579,7 @@ mod test {
.try_propose(true)
.unwrap()
.unwrap_or_else(|| {
panic!("Block should have been proposed for round {}", round)
panic!("Block should have been proposed for round {round}")
});
}
}
Expand Down Expand Up @@ -3425,7 +3425,7 @@ mod test {
expected_commit_index: 5,
commit_index: 6,
} => (),
_ => panic!("Unexpected error: {:?}", err),
_ => panic!("Unexpected error: {err:?}"),
}
}

Expand Down
38 changes: 17 additions & 21 deletions consensus/core/src/dag_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,11 +127,11 @@ impl DagState {

let last_commit = store
.read_last_commit()
.unwrap_or_else(|e| panic!("Failed to read from storage: {:?}", e));
.unwrap_or_else(|e| panic!("Failed to read from storage: {e:?}"));

let commit_info = store
.read_last_commit_info()
.unwrap_or_else(|e| panic!("Failed to read from storage: {:?}", e));
.unwrap_or_else(|e| panic!("Failed to read from storage: {e:?}"));
let (mut last_committed_rounds, commit_recovery_start_index) =
if let Some((commit_ref, commit_info)) = commit_info {
tracing::info!("Recovering committed state from {commit_ref} {commit_info:?}");
Expand All @@ -147,7 +147,7 @@ impl DagState {
if let Some(last_commit) = last_commit.as_ref() {
store
.scan_commits((commit_recovery_start_index..=last_commit.index()).into())
.unwrap_or_else(|e| panic!("Failed to read from storage: {:?}", e))
.unwrap_or_else(|e| panic!("Failed to read from storage: {e:?}"))
.iter()
.for_each(|commit| {
for block_ref in commit.blocks() {
Expand Down Expand Up @@ -258,7 +258,7 @@ impl DagState {
loop {
let commits = store
.scan_commits((index..=index).into())
.unwrap_or_else(|e| panic!("Failed to read from storage: {:?}", e));
.unwrap_or_else(|e| panic!("Failed to read from storage: {e:?}"));
let Some(commit) = commits.first() else {
info!(
"Recovering finished up to index {index}, no more commits to recover"
Expand All @@ -283,7 +283,7 @@ impl DagState {
block_ref,
commit.index()
);
assert!(state.set_committed(block_ref), "Attempted to set again a block {:?} as committed when recovering commit {:?}", block_ref, commit);
assert!(state.set_committed(block_ref), "Attempted to set again a block {block_ref:?} as committed when recovering commit {commit:?}");
});

// All commits are indexed starting from 1, so one reach zero exit.
Expand Down Expand Up @@ -425,7 +425,7 @@ impl DagState {
let store_results = self
.store
.read_blocks(&missing_refs)
.unwrap_or_else(|e| panic!("Failed to read from storage: {:?}", e));
.unwrap_or_else(|e| panic!("Failed to read from storage: {e:?}"));
self.context
.metrics
.node_metrics
Expand All @@ -452,8 +452,7 @@ impl DagState {
false
} else {
panic!(
"Block {:?} not found in cache to set as committed.",
block_ref
"Block {block_ref:?} not found in cache to set as committed."
);
}
}
Expand Down Expand Up @@ -488,7 +487,7 @@ impl DagState {
/// checked.
pub(crate) fn get_uncommitted_blocks_at_round(&self, round: Round) -> Vec<VerifiedBlock> {
if round <= self.last_commit_round() {
panic!("Round {} have committed blocks!", round);
panic!("Round {round} have committed blocks!");
}

let mut blocks = vec![];
Expand Down Expand Up @@ -521,7 +520,7 @@ impl DagState {
}
let block_ref = linked.pop_last().unwrap();
let Some(block) = self.get_block(&block_ref) else {
panic!("Block {:?} should exist in DAG!", block_ref);
panic!("Block {block_ref:?} should exist in DAG!");
};
linked.extend(block.ancestors().iter().cloned());
}
Expand All @@ -536,7 +535,7 @@ impl DagState {
))
.map(|r| {
self.get_block(r)
.unwrap_or_else(|| panic!("Block {:?} should exist in DAG!", r))
.unwrap_or_else(|| panic!("Block {r:?} should exist in DAG!"))
.clone()
})
.collect()
Expand Down Expand Up @@ -757,7 +756,7 @@ impl DagState {
let store_results = self
.store
.contains_blocks(&missing_refs)
.unwrap_or_else(|e| panic!("Failed to read from storage: {:?}", e));
.unwrap_or_else(|e| panic!("Failed to read from storage: {e:?}"));
self.context
.metrics
.node_metrics
Expand Down Expand Up @@ -805,8 +804,7 @@ impl DagState {

if commit.timestamp_ms() < last_commit.timestamp_ms() {
panic!(
"Commit timestamps do not monotonically increment, prev commit {:?}, new commit {:?}",
last_commit, commit
"Commit timestamps do not monotonically increment, prev commit {last_commit:?}, new commit {commit:?}"
);
}
} else {
Expand Down Expand Up @@ -994,7 +992,7 @@ impl DagState {
);
self.store
.write(WriteBatch::new(blocks, commits, commit_info_to_write))
.unwrap_or_else(|e| panic!("Failed to write to storage: {:?}", e));
.unwrap_or_else(|e| panic!("Failed to write to storage: {e:?}"));
self.context
.metrics
.node_metrics
Expand Down Expand Up @@ -1031,7 +1029,7 @@ impl DagState {
pub(crate) fn recover_last_commit_info(&self) -> Option<(CommitRef, CommitInfo)> {
self.store
.read_last_commit_info()
.unwrap_or_else(|e| panic!("Failed to read from storage: {:?}", e))
.unwrap_or_else(|e| panic!("Failed to read from storage: {e:?}"))
}

// TODO: Remove four methods below this when DistributedVoteScoring is enabled.
Expand Down Expand Up @@ -1448,8 +1446,7 @@ mod test {
// & 2) might not be in right lexicographical order.
assert_eq!(
ancestors_refs, expected_refs,
"Expected round 11 ancestors: {:?}. Got: {:?}",
expected_refs, ancestors_refs
"Expected round 11 ancestors: {expected_refs:?}. Got: {ancestors_refs:?}"
);
}

Expand Down Expand Up @@ -1558,7 +1555,7 @@ mod test {
for block_ref in block_refs.clone() {
let slot = block_ref.into();
let found = dag_state.contains_cached_block_at_slot(slot);
assert!(found, "A block should be found at slot {}", slot);
assert!(found, "A block should be found at slot {slot}");
}

// Now try to ask also for one block ref that is not in cache
Expand Down Expand Up @@ -2144,8 +2141,7 @@ mod test {
if block_ref.round > gc_round && all_committed_blocks.contains(block_ref) {
assert!(
block_info.committed,
"Block {:?} should be committed",
block_ref
"Block {block_ref:?} should be committed"
);
};
});
Expand Down
10 changes: 4 additions & 6 deletions consensus/core/src/linearizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ impl Linearizer {
);
let serialized = commit
.serialize()
.unwrap_or_else(|e| panic!("Failed to serialize commit: {}", e));
.unwrap_or_else(|e| panic!("Failed to serialize commit: {e}"));
let commit = TrustedCommit::new_trusted(commit, serialized);

// Create the corresponding committed sub dag
Expand Down Expand Up @@ -165,8 +165,7 @@ impl Linearizer {
if context.protocol_config.consensus_linearize_subdag_v2() {
assert!(
dag_state.set_committed(&leader_block_ref),
"Leader block with reference {:?} attempted to be committed twice",
leader_block_ref
"Leader block with reference {leader_block_ref:?} attempted to be committed twice"
);

while let Some(x) = buffer.pop() {
Expand Down Expand Up @@ -247,8 +246,7 @@ impl Linearizer {
if gc_enabled {
assert!(
to_commit.iter().all(|block| block.round() > gc_round),
"No blocks <= {gc_round} should be committed. Leader round {}, blocks {to_commit:?}.",
leader_block_ref
"No blocks <= {gc_round} should be committed. Leader round {leader_block_ref}, blocks {to_commit:?}."
);
}

Expand Down Expand Up @@ -899,7 +897,7 @@ mod tests {
"Block D1 should have been committed."
);
} else {
panic!("Unexpected subdag with index {:?}", idx);
panic!("Unexpected subdag with index {idx:?}");
}

for block in subdag.blocks.iter() {
Expand Down
6 changes: 3 additions & 3 deletions consensus/core/src/network/tonic_network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -886,13 +886,13 @@ fn to_host_port_str(addr: &Multiaddr) -> Result<String, String> {

match (iter.next(), iter.next()) {
(Some(Protocol::Ip4(ipaddr)), Some(Protocol::Udp(port))) => {
Ok(format!("{}:{}", ipaddr, port))
Ok(format!("{ipaddr}:{port}"))
}
(Some(Protocol::Ip6(ipaddr)), Some(Protocol::Udp(port))) => {
Ok(format!("{}:{}", ipaddr, port))
Ok(format!("{ipaddr}:{port}"))
}
(Some(Protocol::Dns(hostname)), Some(Protocol::Udp(port))) => {
Ok(format!("{}:{}", hostname, port))
Ok(format!("{hostname}:{port}"))
}

_ => Err(format!("unsupported multiaddr: {addr}")),
Expand Down
4 changes: 2 additions & 2 deletions consensus/core/src/network/tonic_tls.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ pub(crate) fn create_rustls_server_config(
let tls_private_key = self_signed_cert.rustls_private_key();
let mut tls_config = verifier
.rustls_server_config(vec![tls_cert], tls_private_key)
.unwrap_or_else(|e| panic!("Failed to create TLS server config: {:?}", e));
.unwrap_or_else(|e| panic!("Failed to create TLS server config: {e:?}"));
tls_config.alpn_protocols = vec![b"h2".to_vec()];
tls_config
}
Expand All @@ -54,7 +54,7 @@ pub(crate) fn create_rustls_client_config(
let mut tls_config =
iota_tls::ServerCertVerifier::new(target_public_key, certificate_server_name(context))
.rustls_client_config(vec![tls_cert], tls_private_key)
.unwrap_or_else(|e| panic!("Failed to create TLS client config: {:?}", e));
.unwrap_or_else(|e| panic!("Failed to create TLS client config: {e:?}"));
// ServerCertVerifier sets alpn for completeness, but alpn cannot be predefined
// when using HttpsConnector from hyper-rustls, as in TonicManager.
tls_config.alpn_protocols = vec![];
Expand Down
Loading