Skip to content

Commit 6b7ec85

Browse files
committed
refactor(storage): use associated type for BatchWriter and add SQLite CI
- Change StorageBackend::batch() to return Self::Batch associated type instead of Box<dyn BatchWriter> to avoid unnecessary heap allocation - Update BatchWriter::commit() signature from Box<Self> to self - Update all three backends (RocksDB, SQLite, IndexedDB) to use the new pattern - Add BatchWriter import to storage_trait.rs for default implementations - Add SQLite CI test job for ubuntu, macos, and windows - Add make clippy-sqlite and test-sqlite targets - Forward sqlite feature from light-client-bin to light-client-lib - Fix TempDir lifetime issue in test helpers to keep temp directory alive
1 parent 52b3c11 commit 6b7ec85

File tree

10 files changed

+98
-56
lines changed

10 files changed

+98
-56
lines changed

.github/workflows/ci.yaml

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,22 @@ jobs:
9191
- name: UnitTest
9292
if: matrix.os == 'macos-latest'
9393
run: make test-portable
94+
test-sqlite:
95+
name: Tests / Build & Test (SQLite)
96+
needs: [ rustfmt, clippy ]
97+
runs-on: ${{ matrix.os }}
98+
strategy:
99+
matrix:
100+
os: [ ubuntu-latest, macos-latest, windows-2022 ]
101+
steps:
102+
- name: Checkout the Repository
103+
uses: actions/checkout@v4
104+
- name: Install cargo-nextest
105+
uses: taiki-e/install-action@nextest
106+
- name: Clippy (SQLite)
107+
run: make clippy-sqlite
108+
- name: UnitTest (SQLite)
109+
run: make test-sqlite
94110
code_coverage:
95111
name: Code Coverage
96112
needs: [ test ]

Makefile

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,11 @@ clippy:
1414
cargo clippy --target wasm32-unknown-unknown -p light-client-wasm -p ckb-light-client-lib -p light-client-db-common -p light-client-db-worker --locked -- --deny warnings
1515
# Run clippy for native targets
1616
cargo clippy -p ckb-light-client --locked -- --deny warnings
17+
18+
clippy-sqlite:
19+
# Run clippy for native targets with sqlite feature
20+
cargo clippy --features sqlite -p ckb-light-client-lib -p ckb-light-client --locked -- --deny warnings
21+
1722
build:
1823
cargo build
1924

@@ -27,6 +32,9 @@ test:
2732
test-portable:
2833
cargo nextest run --features portable --hide-progress-bar --success-output immediate --failure-output immediate -p ckb-light-client-lib -p ckb-light-client
2934

35+
test-sqlite:
36+
cargo nextest run --features sqlite --hide-progress-bar --success-output immediate --failure-output immediate -p ckb-light-client-lib -p ckb-light-client
37+
3038
test-wasm:
3139
wasm-pack test --node ./wasm/light-client-db-common/
3240

light-client-bin/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ tempfile = "3.0"
4949
default = []
5050
portable = ["rocksdb/portable"]
5151
march-native = ["rocksdb/march-native"]
52+
sqlite = ["ckb-light-client-lib/sqlite"]
5253

5354
# [profile.release]
5455
# overflow-checks = true

light-client-bin/src/tests/mod.rs

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,15 @@ use ckb_light_client_lib::{
77
storage::{LightClientStorage, Storage, StorageWithChainData},
88
};
99
use ckb_resource::Resource;
10+
use tempfile::TempDir;
1011

1112
use crate::rpc::{BlockFilterRpcImpl, ChainRpcImpl, TransactionRpcImpl};
1213
use std::sync::Arc;
1314

14-
pub(crate) fn new_storage(prefix: &str) -> Storage {
15+
pub(crate) fn new_storage(prefix: &str) -> (Storage, TempDir) {
1516
let tmp_dir = tempfile::Builder::new().prefix(prefix).tempdir().unwrap();
16-
Storage::new(tmp_dir.path().to_str().unwrap())
17+
let storage = Storage::new(tmp_dir.path().to_str().unwrap());
18+
(storage, tmp_dir)
1719
}
1820

1921
pub(crate) fn create_peers() -> Arc<Peers> {
@@ -32,6 +34,8 @@ pub(crate) fn create_peers() -> Arc<Peers> {
3234
pub(crate) struct MockChain {
3335
storage: Storage,
3436
consensus: Consensus,
37+
#[allow(dead_code)]
38+
tmp_dir: TempDir,
3539
}
3640

3741
impl MockChain {
@@ -43,7 +47,11 @@ impl MockChain {
4347
.build_consensus()
4448
.expect("build consensus should be OK");
4549
storage.init_genesis_block(consensus.genesis_block().data());
46-
MockChain { storage, consensus }
50+
MockChain {
51+
storage,
52+
consensus,
53+
tmp_dir,
54+
}
4755
}
4856

4957
pub(crate) fn new_with_default_pow(prefix: &str) -> Self {

light-client-bin/src/tests/service.rs

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ use crate::{
3434

3535
#[test]
3636
fn rpc() {
37-
let storage = new_storage("rpc");
37+
let (storage, _tmp_dir) = new_storage("rpc");
3838
let rpc = create_block_filter_rpc(storage.clone(), create_peers());
3939

4040
// setup test data
@@ -1062,7 +1062,7 @@ fn rpc() {
10621062

10631063
#[test]
10641064
fn get_cells_capacity_bug() {
1065-
let storage = new_storage("get_cells_capacity_bug");
1065+
let (storage, _tmp_dir) = new_storage("get_cells_capacity_bug");
10661066
let rpc = create_block_filter_rpc(storage.clone(), create_peers());
10671067

10681068
// setup test data
@@ -1186,7 +1186,7 @@ fn get_cells_capacity_bug() {
11861186

11871187
#[test]
11881188
fn get_cells_after_rollback_bug() {
1189-
let storage = new_storage("get_cells_after_rollback_bug");
1189+
let (storage, _tmp_dir) = new_storage("get_cells_after_rollback_bug");
11901190
let rpc = create_block_filter_rpc(storage.clone(), create_peers());
11911191

11921192
// setup test data
@@ -1378,7 +1378,7 @@ fn get_cells_after_rollback_bug() {
13781378

13791379
#[test]
13801380
fn test_set_scripts_clear_matched_blocks() {
1381-
let storage = new_storage("set-scripts-clear-matched-blocks");
1381+
let (storage, _tmp_dir) = new_storage("set-scripts-clear-matched-blocks");
13821382
let peers = create_peers();
13831383
let rpc = create_block_filter_rpc(storage.clone(), Arc::clone(&peers));
13841384

@@ -1427,7 +1427,7 @@ fn test_set_scripts_clear_matched_blocks() {
14271427

14281428
#[test]
14291429
fn test_set_scripts_command() {
1430-
let storage = new_storage("set-scripts-command");
1430+
let (storage, _tmp_dir) = new_storage("set-scripts-command");
14311431
let peers = create_peers();
14321432
let rpc = create_block_filter_rpc(storage.clone(), Arc::clone(&peers));
14331433

@@ -1513,7 +1513,7 @@ fn test_set_scripts_command() {
15131513

15141514
#[test]
15151515
fn test_set_scripts_partial_min_filtered_block_number_bug() {
1516-
let storage = new_storage("set_scripts_partial_min_filtered_block_number_bug");
1516+
let (storage, _tmp_dir) = new_storage("set_scripts_partial_min_filtered_block_number_bug");
15171517
let peers = create_peers();
15181518
let rpc = create_block_filter_rpc(storage.clone(), Arc::clone(&peers));
15191519

@@ -1562,7 +1562,7 @@ fn test_set_scripts_partial_min_filtered_block_number_bug() {
15621562

15631563
#[test]
15641564
fn test_set_scripts_delete_min_filtered_block_number_bug() {
1565-
let storage = new_storage("set_scripts_delete_min_filtered_block_number_bug");
1565+
let (storage, _tmp_dir) = new_storage("set_scripts_delete_min_filtered_block_number_bug");
15661566
let peers = create_peers();
15671567
let rpc = create_block_filter_rpc(storage.clone(), Arc::clone(&peers));
15681568

@@ -1608,7 +1608,7 @@ fn test_set_scripts_delete_min_filtered_block_number_bug() {
16081608

16091609
#[test]
16101610
fn test_chain_txs_in_same_block_bug() {
1611-
let storage = new_storage("chain_txs_in_same_block_bug");
1611+
let (storage, _tmp_dir) = new_storage("chain_txs_in_same_block_bug");
16121612
let rpc = create_block_filter_rpc(storage.clone(), create_peers());
16131613

16141614
// setup test data

light-client-lib/src/storage/backend.rs

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ pub trait BatchWriter {
2626
fn delete(&mut self, key: &[u8]);
2727

2828
/// Commit all operations in the batch atomically
29-
fn commit(self: Box<Self>) -> Result<()>;
29+
fn commit(self) -> Result<()>;
3030
}
3131

3232
/// Low-level storage backend trait
@@ -35,6 +35,9 @@ pub trait BatchWriter {
3535
/// (RocksDB, SQLite, IndexedDB) must implement. Business logic is implemented
3636
/// as default methods in `LightClientStorage` which builds on top of this trait.
3737
pub trait StorageBackend: Send + Sync {
38+
/// The batch type for this storage backend
39+
type Batch: BatchWriter;
40+
3841
// ========== Basic KV operations ==========
3942

4043
/// Get value by key
@@ -49,7 +52,7 @@ pub trait StorageBackend: Send + Sync {
4952
// ========== Batch operations ==========
5053

5154
/// Create a new batch for atomic writes
52-
fn batch(&self) -> Box<dyn BatchWriter>;
55+
fn batch(&self) -> Self::Batch;
5356

5457
// ========== Iterator operations ==========
5558

light-client-lib/src/storage/db/browser.rs

Lines changed: 21 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -299,14 +299,6 @@ impl Storage {
299299
Self { channel: chan }
300300
}
301301

302-
fn batch(&self) -> Batch {
303-
Batch {
304-
add: vec![],
305-
delete: vec![],
306-
comm_arrays: self.channel.clone(),
307-
}
308-
}
309-
310302
pub fn get<K: AsRef<[u8]>>(&self, key: K) -> Result<Option<Vec<u8>>> {
311303
let values = self
312304
.channel
@@ -404,7 +396,19 @@ pub struct Batch {
404396
comm_arrays: CommunicationChannel,
405397
}
406398

407-
impl Batch {
399+
// Implement BatchWriter trait for Batch
400+
impl BatchWriter for Batch {
401+
fn put(&mut self, key: &[u8], value: &[u8]) {
402+
self.add.push(KV {
403+
key: key.to_vec(),
404+
value: value.to_vec(),
405+
});
406+
}
407+
408+
fn delete(&mut self, key: &[u8]) {
409+
self.delete.push(key.to_vec());
410+
}
411+
408412
fn commit(self) -> Result<()> {
409413
if !self.add.is_empty() {
410414
self.comm_arrays
@@ -428,26 +432,10 @@ impl Batch {
428432
}
429433
}
430434

431-
// Implement BatchWriter trait for Batch
432-
impl BatchWriter for Batch {
433-
fn put(&mut self, key: &[u8], value: &[u8]) {
434-
self.add.push(KV {
435-
key: key.to_vec(),
436-
value: value.to_vec(),
437-
});
438-
}
439-
440-
fn delete(&mut self, key: &[u8]) {
441-
self.delete.push(key.to_vec());
442-
}
443-
444-
fn commit(self: Box<Self>) -> Result<()> {
445-
Batch::commit(*self)
446-
}
447-
}
448-
449435
// Implementation of StorageBackend trait for IndexedDB
450436
impl StorageBackend for Storage {
437+
type Batch = Batch;
438+
451439
fn get(&self, key: Vec<u8>) -> Result<Option<Vec<u8>>> {
452440
let values = self
453441
.channel
@@ -479,8 +467,12 @@ impl StorageBackend for Storage {
479467
.map_err(|e| Error::Indexdb(format!("{:?}", e)))
480468
}
481469

482-
fn batch(&self) -> Box<dyn BatchWriter> {
483-
Box::new(Storage::batch(self))
470+
fn batch(&self) -> Self::Batch {
471+
Batch {
472+
add: vec![],
473+
delete: vec![],
474+
comm_arrays: self.channel.clone(),
475+
}
484476
}
485477

486478
fn collect_iterator(

light-client-lib/src/storage/db/native.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,14 +70,16 @@ impl BatchWriter for Batch {
7070
self.wb.delete(key).expect("batch delete should be ok");
7171
}
7272

73-
fn commit(self: Box<Self>) -> Result<()> {
73+
fn commit(self) -> Result<()> {
7474
self.db.write(&self.wb)?;
7575
Ok(())
7676
}
7777
}
7878

7979
// Implementation of StorageBackend trait for RocksDB
8080
impl StorageBackend for Storage {
81+
type Batch = Batch;
82+
8183
fn get(&self, key: Vec<u8>) -> Result<Option<Vec<u8>>> {
8284
self.db
8385
.get(key.as_slice())
@@ -93,8 +95,8 @@ impl StorageBackend for Storage {
9395
self.db.delete(key).map_err(Into::into)
9496
}
9597

96-
fn batch(&self) -> Box<dyn BatchWriter> {
97-
Box::new(Storage::batch(self))
98+
fn batch(&self) -> Self::Batch {
99+
Storage::batch(self)
98100
}
99101

100102
fn collect_iterator(

light-client-lib/src/storage/db/sqlite.rs

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ use ckb_types::{
1818
};
1919
use rusqlite::{Connection, OpenFlags};
2020
use std::{
21-
path::Path,
21+
path::{Path, PathBuf},
2222
sync::{Arc, Mutex},
2323
};
2424

@@ -29,13 +29,21 @@ pub struct Storage {
2929

3030
impl Storage {
3131
pub fn new<P: AsRef<Path>>(path: P) -> Self {
32+
// If path is a directory, append db.sqlite filename
33+
// This allows consistent API with RocksDB which expects a directory
34+
let db_path: PathBuf = if path.as_ref().is_dir() {
35+
path.as_ref().join("db.sqlite")
36+
} else {
37+
path.as_ref().to_path_buf()
38+
};
39+
3240
let conn = Connection::open_with_flags(
33-
path,
41+
&db_path,
3442
OpenFlags::SQLITE_OPEN_READ_WRITE
3543
| OpenFlags::SQLITE_OPEN_CREATE
3644
| OpenFlags::SQLITE_OPEN_NO_MUTEX,
3745
)
38-
.expect("Failed to open sqlite database");
46+
.unwrap_or_else(|e| panic!("Failed to open sqlite database: {:?}", e));
3947

4048
// Initialize the schema
4149
Self::init_schema(&conn).expect("Failed to initialize sqlite schema");
@@ -115,7 +123,7 @@ impl BatchWriter for Batch {
115123
self.operations.push(BatchOp::Delete(key.to_vec()));
116124
}
117125

118-
fn commit(self: Box<Self>) -> Result<()> {
126+
fn commit(self) -> Result<()> {
119127
let conn = self.conn.lock().unwrap();
120128
let tx = conn.unchecked_transaction().map_err(|e| {
121129
crate::error::Error::runtime(format!("Failed to begin transaction: {}", e))
@@ -153,6 +161,8 @@ impl BatchWriter for Batch {
153161

154162
// Implementation of StorageBackend trait for SQLite
155163
impl StorageBackend for Storage {
164+
type Batch = Batch;
165+
156166
fn get(&self, key: Vec<u8>) -> Result<Option<Vec<u8>>> {
157167
Storage::get(self, key)
158168
}
@@ -174,8 +184,8 @@ impl StorageBackend for Storage {
174184
Ok(())
175185
}
176186

177-
fn batch(&self) -> Box<dyn BatchWriter> {
178-
Box::new(Storage::batch(self))
187+
fn batch(&self) -> Self::Batch {
188+
Storage::batch(self)
179189
}
180190

181191
fn collect_iterator(

light-client-lib/src/storage/storage_trait.rs

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,13 @@
66
use std::collections::{HashMap, HashSet};
77

88
use super::{
9-
backend::StorageBackend, db::IteratorDirection, parse_matched_blocks, BlockNumber, Byte32,
10-
CellIndex, CellType, CpIndex, HeaderWithExtension, Key, KeyPrefix, MatchedBlock, MatchedBlocks,
11-
OutputIndex, Script, ScriptStatus, ScriptType, SetScriptsCommand, TxIndex, Value,
12-
WrappedBlockView, FILTER_SCRIPTS_KEY, GENESIS_BLOCK_KEY, LAST_N_HEADERS_KEY, LAST_STATE_KEY,
13-
MATCHED_FILTER_BLOCKS_KEY, MAX_CHECK_POINT_INDEX, MIN_FILTERED_BLOCK_NUMBER,
9+
backend::{BatchWriter, StorageBackend},
10+
db::IteratorDirection,
11+
parse_matched_blocks, BlockNumber, Byte32, CellIndex, CellType, CpIndex, HeaderWithExtension,
12+
Key, KeyPrefix, MatchedBlock, MatchedBlocks, OutputIndex, Script, ScriptStatus, ScriptType,
13+
SetScriptsCommand, TxIndex, Value, WrappedBlockView, FILTER_SCRIPTS_KEY, GENESIS_BLOCK_KEY,
14+
LAST_N_HEADERS_KEY, LAST_STATE_KEY, MATCHED_FILTER_BLOCKS_KEY, MAX_CHECK_POINT_INDEX,
15+
MIN_FILTERED_BLOCK_NUMBER,
1416
};
1517
use ckb_types::{
1618
core::HeaderView,

0 commit comments

Comments
 (0)