Skip to content

Commit 9e3d452

Browse files
committed
Fix rustfmt/clippy issues
1 parent 1a88d91 commit 9e3d452

File tree

7 files changed

+45
-46
lines changed

7 files changed

+45
-46
lines changed

lib/authorization.rs

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@ use crate::types::{
1212
get_address,
1313
};
1414

15-
use hex;
16-
1715
fn borsh_serialize_verifying_key<W>(
1816
vk: &VerifyingKey,
1917
writer: &mut W,
@@ -148,7 +146,7 @@ impl utoipa::ToSchema for Authorization {
148146

149147
impl utoipa::PartialSchema for Authorization {
150148
fn schema() -> utoipa::openapi::RefOr<utoipa::openapi::schema::Schema> {
151-
use utoipa::openapi::*;
149+
use utoipa::openapi::{Object, RefOr, Schema, schema};
152150
let obj = Object::builder()
153151
.property(
154152
"verifying_key",

lib/bench.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33

44
use criterion::{Criterion, criterion_group, criterion_main};
55

6+
#[allow(dead_code, unused_imports)]
7+
mod authorization;
68
#[allow(dead_code)]
79
mod state;
810
#[allow(dead_code)]

lib/state/bench.rs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,11 @@ use sneed::{Env, EnvError};
1212
#[cfg(feature = "utreexo")]
1313
use crate::types::{Accumulator, AccumulatorDiff};
1414
use crate::{
15+
authorization::Authorization,
1516
state::State,
1617
types::{
17-
Address, Authorization, Block, Body, FilledTransaction, GetValue as _,
18-
Header, MerkleRoot, OutPoint, Output, OutputContent, PointedOutputRef,
18+
Address, Block, Body, FilledTransaction, GetValue as _, Header,
19+
MerkleRoot, OutPoint, Output, OutputContent, PointedOutputRef,
1920
Transaction, hash,
2021
proto::mainchain::{self, TwoWayPegData},
2122
},

lib/state/block.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -434,6 +434,7 @@ pub fn connect_prevalidated(
434434
}
435435

436436
// Parallel collection of transaction operations
437+
#[allow(clippy::type_complexity)]
437438
let tx_results: Vec<(
438439
Vec<OutPoint>,
439440
Vec<(OutPoint, SpentOutput)>,
@@ -484,6 +485,7 @@ pub fn connect_prevalidated(
484485
.collect();
485486

486487
// Separate the three vectors
488+
#[allow(clippy::type_complexity)]
487489
let (tx_deletes, tx_stxo_puts, tx_utxo_puts): (
488490
Vec<Vec<OutPoint>>,
489491
Vec<Vec<(OutPoint, SpentOutput)>>,

lib/state/mod.rs

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -71,12 +71,12 @@ impl MemoryPools {
7171

7272
/// Get a pre-allocated vector or create a new one
7373
fn get_outpoint_key_vec(&self, capacity: usize) -> Vec<OutPointKey> {
74-
if let Ok(mut pool) = self.outpoint_key_pool.lock() {
75-
if let Some(mut vec) = pool.pop() {
76-
vec.clear();
77-
vec.reserve(capacity);
78-
return vec;
79-
}
74+
if let Ok(mut pool) = self.outpoint_key_pool.lock()
75+
&& let Some(mut vec) = pool.pop()
76+
{
77+
vec.clear();
78+
vec.reserve(capacity);
79+
return vec;
8080
}
8181
Vec::with_capacity(capacity)
8282
}
@@ -87,11 +87,11 @@ impl MemoryPools {
8787
// Prevent excessive memory usage
8888
vec.shrink_to(1024);
8989
}
90-
if let Ok(mut pool) = self.outpoint_key_pool.lock() {
91-
if pool.len() < 8 {
92-
// Limit pool size
93-
pool.push(vec);
94-
}
90+
if let Ok(mut pool) = self.outpoint_key_pool.lock()
91+
&& pool.len() < 8
92+
{
93+
// Limit pool size
94+
pool.push(vec);
9595
}
9696
}
9797
}
@@ -697,7 +697,7 @@ impl State {
697697
let results = Arc::new(Mutex::new(vec![None; blocks.len()]));
698698
let error_occurred = Arc::new(Mutex::new(None));
699699

700-
let chunk_size = (blocks.len() + num_workers - 1) / num_workers;
700+
let chunk_size = blocks.len().div_ceil(num_workers);
701701
let mut handles = Vec::new();
702702

703703
for worker_id in 0..num_workers {
@@ -768,8 +768,7 @@ impl State {
768768
let results = Arc::try_unwrap(results).unwrap().into_inner().unwrap();
769769
let results: Result<Vec<_>, _> = results
770770
.into_iter()
771-
.enumerate()
772-
.map(|(_i, opt)| opt.ok_or(Error::Authorization))
771+
.map(|opt| opt.ok_or(Error::Authorization))
773772
.collect();
774773

775774
results

lib/state/parallel.rs

Lines changed: 22 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ impl ValidationWorker {
170170
};
171171
let key = OutPointKey::from(&outpoint);
172172
let serialized = borsh::to_vec(output)
173-
.map_err(|e| crate::state::Error::BorshSerialize(e))?;
173+
.map_err(crate::state::Error::BorshSerialize)?;
174174
Ok::<(OutPointKey, Vec<u8>), crate::state::Error>((
175175
key, serialized,
176176
))
@@ -283,7 +283,7 @@ impl ValidationWorker {
283283
};
284284

285285
// Send result to Stage B coordinator
286-
if let Err(_) = self.result_sender.send(validation_result) {
286+
if self.result_sender.send(validation_result).is_err() {
287287
tracing::warn!(
288288
"Worker {} failed to send result - coordinator may have shut down",
289289
self.worker_id
@@ -512,7 +512,7 @@ impl WriterCoordinator {
512512
// Deserialize the SpentOutput to use with the typed database
513513
let spent_output: SpentOutput =
514514
borsh::from_slice(serialized_spent_output)
515-
.map_err(|e| Error::BorshDeserialize(e))?;
515+
.map_err(Error::BorshDeserialize)?;
516516
self.state.stxos.put(rwtxn, key, &spent_output)?;
517517
}
518518

@@ -521,7 +521,7 @@ impl WriterCoordinator {
521521
// Deserialize the Output to use with the typed database
522522
let output: crate::types::Output =
523523
borsh::from_slice(serialized_output)
524-
.map_err(|e| Error::BorshDeserialize(e))?;
524+
.map_err(Error::BorshDeserialize)?;
525525
self.state.utxos.put(rwtxn, key, &output)?;
526526
}
527527

@@ -554,7 +554,7 @@ impl ParallelBlockProcessor {
554554
env: Arc<Env>,
555555
num_workers: usize,
556556
) -> Result<Self, Error> {
557-
let num_workers = num_workers.min(MAX_PARALLEL_BLOCKS).max(1);
557+
let num_workers = num_workers.clamp(1, MAX_PARALLEL_BLOCKS);
558558

559559
// Create channels for Stage A (parallel workers)
560560
let mut work_senders = Vec::with_capacity(num_workers);
@@ -713,13 +713,10 @@ impl Drop for ParallelBlockProcessor {
713713
self.work_senders.clear();
714714

715715
// Wait for coordinator if still running
716-
if let Some(coordinator_handle) = self.coordinator_handle.take() {
717-
if let Err(e) = coordinator_handle.join() {
718-
tracing::error!(
719-
"Coordinator panicked during drop: {:?}",
720-
e
721-
);
722-
}
716+
if let Some(coordinator_handle) = self.coordinator_handle.take()
717+
&& let Err(e) = coordinator_handle.join()
718+
{
719+
tracing::error!("Coordinator panicked during drop: {:?}", e);
723720
}
724721
}
725722
}
@@ -832,17 +829,17 @@ mod tests {
832829
};
833830

834831
// Test coordination logic
835-
if let Some(work) = pending_work.remove(&result.block_id) {
836-
if result.result.is_ok() {
837-
let pending = PendingBlock {
838-
block_id: work.block_id,
839-
header: work.header,
840-
body: work.body,
841-
prevalidated: result.result.unwrap(),
842-
serialized_data: result.serialized_data,
843-
};
844-
pending_blocks.insert(work.block_id, pending);
845-
}
832+
if let Some(work) = pending_work.remove(&result.block_id)
833+
&& result.result.is_ok()
834+
{
835+
let pending = PendingBlock {
836+
block_id: work.block_id,
837+
header: work.header,
838+
body: work.body,
839+
prevalidated: result.result.unwrap(),
840+
serialized_data: result.serialized_data,
841+
};
842+
pending_blocks.insert(work.block_id, pending);
846843
}
847844

848845
assert_eq!(pending_blocks.len(), 1);
@@ -900,6 +897,7 @@ mod tests {
900897

901898
/// Integration test demonstrating Phase 1, 2, and 3 optimization compatibility
902899
#[test]
900+
#[allow(clippy::assertions_on_constants)]
903901
fn test_phase_integration() {
904902
// This test validates that Phase 3 parallel processing integrates
905903
// correctly with Phase 1 (sorted operations) and Phase 2 (memory pools)
@@ -916,7 +914,7 @@ mod tests {
916914
assert_eq!(memory_pool.capacity(), 100);
917915

918916
// Phase 3: Parallel processing maintains data integrity
919-
let block_ids = vec![0, 1, 2, 3, 4];
917+
let block_ids = [0, 1, 2, 3, 4];
920918
let processed_in_parallel =
921919
block_ids.iter().map(|&id| id * 2).collect::<Vec<_>>();
922920
assert_eq!(processed_in_parallel, vec![0, 2, 4, 6, 8]);

lib/wallet.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ use ed25519_dalek_bip32::{ChildIndex, DerivationPath, ExtendedSigningKey};
99
use fallible_iterator::FallibleIterator as _;
1010
use futures::{Stream, StreamExt};
1111

12-
use crate::types::get_address;
1312
use heed::types::{Bytes, SerdeBincode, U8};
1413
#[cfg(feature = "utreexo")]
1514
use rustreexo::accumulator::node_hash::BitcoinNodeHash;
@@ -21,10 +20,10 @@ use sneed::{
2120
use tokio_stream::{StreamMap, wrappers::WatchStream};
2221

2322
#[cfg(feature = "utreexo")]
24-
use crate::types::{Accumulator, UtreexoError, get_address};
23+
use crate::types::{Accumulator, UtreexoError};
2524
pub use crate::types::{
2625
Address, Authorization, AuthorizedTransaction, GetValue, InPoint, OutPoint,
27-
OutPointKey, Output, OutputContent, SpentOutput, Transaction,
26+
OutPointKey, Output, OutputContent, SpentOutput, Transaction, get_address,
2827
};
2928
use crate::{
3029
types::{

0 commit comments

Comments
 (0)