diff --git a/Cargo.lock b/Cargo.lock index 5c9af3b7..7c22136a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -138,9 +138,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-trait" -version = "0.1.85" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", @@ -358,12 +358,19 @@ dependencies = [ "walkdir", ] +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cc" -version = "1.2.9" +version = "1.2.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8293772165d9345bdaaa39b45b2109591e63fe5e6fbc23c6ff930a048aa310b" +checksum = "7a0aeaff4ff1a90589618835a598e545176939b97874f7abc7851caa0618f203" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -778,6 +785,7 @@ dependencies = [ "ckb-chain-spec", "ckb-jsonrpc-types", "ckb-light-client-lib", + "ckb-light-client-rpc", "ckb-network", "ckb-resource", "ckb-rocksdb", @@ -794,6 +802,7 @@ dependencies = [ "jsonrpc-server-utils", "log", "rand 0.8.5", + "rusqlite", "serde_json", "tempfile", "tikv-jemallocator", @@ -839,8 +848,10 @@ dependencies = [ "linked-hash-map", "log", "numext-fixed-uint", + "parking_lot 0.12.5", "path-clean", "rand 0.8.5", + "rusqlite", "serde", "serde-wasm-bindgen", "serde_json", @@ -854,6 +865,22 @@ dependencies = [ "web-time", ] +[[package]] +name = "ckb-light-client-rpc" +version = "0.5.4" +dependencies = [ + "ckb-chain-spec", + "ckb-jsonrpc-types", + "ckb-light-client-lib", + "ckb-network", + "ckb-rocksdb", + "ckb-systemtime", + "ckb-traits", + "ckb-types", + "log", + "rusqlite", +] + [[package]] name = "ckb-logger" version = "1.0.0" @@ -1296,7 +1323,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8bef9c44650fc332dc72d419a60d25ccde5d6f9cc3bbbad33483ff55b305ae5" dependencies = [ "linked-hash-map", - "parking_lot 0.12.3", + "parking_lot 0.12.5", ] [[package]] @@ -1702,7 +1729,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core 0.9.12", ] [[package]] @@ -1716,7 +1743,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core 0.9.12", ] [[package]] @@ -1904,6 +1931,18 @@ dependencies = [ "rand 0.7.3", ] +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + [[package]] name = "fast-socks5" version = "0.10.0" @@ -1936,6 +1975,12 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +[[package]] +name = "find-msvc-tools" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645cbb3a84e60b7531617d5ae4e57f7e27308f6445f5abf653209ea76dec8dff" + [[package]] name = "flate2" version = "1.0.35" @@ -1958,6 +2003,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + [[package]] name = "foreign-types" version = "0.3.2" @@ -2228,7 +2279,7 @@ dependencies = [ "futures-timer", "no-std-compat", "nonzero_ext", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "portable-atomic", "quanta", "rand 0.8.5", @@ -2249,7 +2300,7 @@ dependencies = [ "getrandom 0.3.3", "hashbrown 0.15.2", "nonzero_ext", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "portable-atomic", "quanta", "rand 0.9.1", @@ -2306,7 +2357,25 @@ checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.1.5", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +dependencies = [ + "foldhash 0.2.0", +] + +[[package]] +name = "hashlink" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0b22561a9c04a7cb1a302c013e0259cd3b4bb619f145b32f72b8b4bcbed230" +dependencies = [ + "hashbrown 0.16.1", ] [[package]] @@ -2794,9 +2863,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", @@ -2891,6 +2960,23 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libsqlite3-sys" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b4103cffefa72eb8428cb6b47d6627161e51c2739fc5e3b734584157bc642a" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "light-client-db-common" version = "0.1.0" @@ -2933,6 +3019,7 @@ dependencies = [ "ckb-chain-spec", "ckb-jsonrpc-types", "ckb-light-client-lib", + "ckb-light-client-rpc", "ckb-network", "ckb-resource", "ckb-stop-handler", @@ -2974,11 +3061,10 @@ checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] @@ -3176,6 +3262,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "num-conv" version = "0.1.0" @@ -3189,6 +3284,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -3277,6 +3373,12 @@ version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + [[package]] name = "opaque-debug" version = "0.3.1" @@ -3354,12 +3456,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", + "parking_lot_core 0.9.12", ] [[package]] @@ -3378,15 +3480,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall 0.5.8", "smallvec", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -3612,7 +3714,7 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "protobuf", "thiserror 1.0.69", ] @@ -3877,6 +3979,21 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rusqlite" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1c93dd1c9683b438c392c492109cb702b8090b2bfc8fed6f6e4eb4523f17af3" +dependencies = [ + "bitflags 2.9.1", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", + "sqlite-wasm-rs", +] + [[package]] name = "rust-ini" version = "0.19.0" @@ -4191,6 +4308,19 @@ dependencies = [ "lock_api", ] +[[package]] +name = "sqlite-wasm-rs" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e98301bf8b0540c7de45ecd760539b9c62f5772aed172f08efba597c11cd5d" +dependencies = [ + "cc", + "hashbrown 0.16.1", + "js-sys", + "thiserror 2.0.17", + "wasm-bindgen", +] + [[package]] name = "ssri" version = "9.2.0" @@ -4312,7 +4442,7 @@ dependencies = [ "log", "molecule", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "rand 0.8.5", "socket2", "tentacle-multiaddr", @@ -4382,11 +4512,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.17", ] [[package]] @@ -4402,9 +4532,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", @@ -4686,7 +4816,7 @@ dependencies = [ "log", "rand 0.9.1", "sha1", - "thiserror 2.0.11", + "thiserror 2.0.17", "utf-8", ] @@ -4890,35 +5020,22 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if 1.0.0", "once_cell", "rustversion", "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.96", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -4929,9 +5046,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4939,34 +5056,42 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ + "bumpalo", "proc-macro2", "quote", "syn 2.0.96", - "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] [[package]] name = "wasm-bindgen-test" -version = "0.3.50" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c8d5e33ca3b6d9fa3b4676d774c5778031d27a578c2b007f905acf816152c3" +checksum = "25e90e66d265d3a1efc0e72a54809ab90b9c0c515915c67cdf658689d2c22c6c" dependencies = [ + "async-trait", + "cast", "js-sys", + "libm", "minicov", + "nu-ansi-term", + "num-traits", + "oorandom", + "serde", + "serde_json", "wasm-bindgen", "wasm-bindgen-futures", "wasm-bindgen-test-macro", @@ -4974,9 +5099,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.50" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" +checksum = "7150335716dce6028bead2b848e72f47b45e7b9422f64cccdc23bedca89affc1" dependencies = [ "proc-macro2", "quote", @@ -4996,9 +5121,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index acf5a637..1d590521 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,8 @@ members = [ "light-client-bin", "./wasm/light-client-wasm", "./wasm/light-client-db-worker", - "./wasm/light-client-db-common", + "./wasm/light-client-db-common", + "light-client-rpc", ] resolver = "2" diff --git a/Makefile b/Makefile index 0782b632..f47f912d 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ fmt: clippy: # cargo clippy --workspace --locked -- --deny warnings # Run clippy for wasm targets - cargo clippy --target wasm32-unknown-unknown -p light-client-wasm -p ckb-light-client-lib -p light-client-db-common -p light-client-db-worker --locked -- --deny warnings + cargo clippy --target wasm32-unknown-unknown -p light-client-wasm -p ckb-light-client-lib -p light-client-db-common -p light-client-db-worker -p ckb-light-client-rpc --locked -- --deny warnings # Run clippy for native targets cargo clippy -p ckb-light-client --locked -- --deny warnings build: diff --git a/light-client-bin/Cargo.toml b/light-client-bin/Cargo.toml index 6772c556..03832784 100644 --- a/light-client-bin/Cargo.toml +++ b/light-client-bin/Cargo.toml @@ -1,44 +1,47 @@ [package] -name = "ckb-light-client" -version = "0.5.4" authors = ["Nervos Core Dev "] -edition = "2021" -license = "MIT" description = "A CKB light client based on FlyClient." +edition = "2021" homepage = "https://github.com/nervosnetwork/ckb-light-client" +license = "MIT" +name = "ckb-light-client" repository = "https://github.com/nervosnetwork/ckb-light-client" +version = "0.5.4" [dependencies] ckb-async-runtime = "1" ckb-chain-spec = "1" +ckb-jsonrpc-types = "1" ckb-network = "1" -ckb-stop-handler = "1" ckb-resource = "1" -ckb-jsonrpc-types = "1" -ckb-types = "1" -ckb-traits = "1" +ckb-stop-handler = "1" ckb-systemtime = "1" +ckb-traits = "1" +ckb-types = "1" -ckb-light-client-lib = { path = "../light-client-lib" } -clap = { version = "4", features = ["cargo"] } -log = "0.4" -ctrlc = { version = "3.2.1", features = ["termination"] } +ckb-light-client-lib = {path = "../light-client-lib", default-features = false} +ckb-light-client-rpc = {path = "../light-client-rpc", default-features = false} +clap = {version = "4", features = ["cargo"]} +ctrlc = {version = "3.2.1", features = ["termination"]} jsonrpc-core = "18.0" jsonrpc-derive = "18.0" jsonrpc-http-server = "18.0" jsonrpc-server-utils = "18.0" -rocksdb = { package = "ckb-rocksdb", version = "=0.21.1", features = [ - "snappy", -], default-features = false } -env_logger = "0.11" +log = "0.4" +rocksdb = {package = "ckb-rocksdb", version = "=0.21.1", features = [ + "snappy", +], default-features = false, optional = true} +rusqlite = {version = "0.38.0", features = ["bundled"], optional = true} + anyhow = "1.0.56" +env_logger = "0.11" [target.'cfg(not(target_env = "msvc"))'.dependencies] tikv-jemallocator = "0.6" [build-dependencies] -vergen-gitcl = { version = "1", default-features = false } chrono = "0.4" +vergen-gitcl = {version = "1", default-features = false} [dev-dependencies] rand = "0.8" @@ -46,12 +49,13 @@ serde_json = "1.0" tempfile = "3.0" [features] -default = [] -portable = ["rocksdb/portable"] +default = ["rocksdb"] march-native = ["rocksdb/march-native"] - +portable = ["rocksdb/portable"] +rocksdb = ["dep:rocksdb", "ckb-light-client-lib/rocksdb", "ckb-light-client-rpc/rocksdb"] +rusqlite = ["ckb-light-client-lib/rusqlite", "ckb-light-client-rpc/rusqlite", "dep:rusqlite"] # [profile.release] # overflow-checks = true [badges] -maintenance = { status = "experimental" } +maintenance = {status = "experimental"} diff --git a/light-client-bin/src/main.rs b/light-client-bin/src/main.rs index 4858383d..a7b66f0d 100644 --- a/light-client-bin/src/main.rs +++ b/light-client-bin/src/main.rs @@ -1,5 +1,11 @@ mod cli; mod rpc; +#[cfg(any( + all(feature = "rocksdb", feature = "rusqlite"), + not(any(feature = "rocksdb", feature = "rusqlite")) +))] +compile_error!("Exact one of features `rocksdb` and `rusqlite` can be selected"); + mod subcmds; #[cfg(test)] diff --git a/light-client-bin/src/rpc/mod.rs b/light-client-bin/src/rpc/mod.rs new file mode 100644 index 00000000..8c682b6f --- /dev/null +++ b/light-client-bin/src/rpc/mod.rs @@ -0,0 +1,281 @@ +use ckb_light_client_lib::{ + protocols::PendingTxs, + storage::{ + db::{ + StorageGeneralOperations, StorageGetPinnedRelatedOperations, StorageHighLevelOperations, + }, + Storage, + }, + types::RwLock, +}; +use ckb_light_client_rpc::{ + BlockFilterRpcImpl, BlockFilterRpcMethods, ChainRpcImpl, ChainRpcMethods, NetRpcImpl, + NetRpcMethods, TransactionRpcImpl, TransactionRpcMethods, +}; +use jsonrpc_core::{Error, IoHandler, Result}; +use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, Server, ServerBuilder}; +use std::{net::ToSocketAddrs, sync::Arc}; + +use ckb_chain_spec::consensus::Consensus; +use ckb_jsonrpc_types::{BlockView, EstimateCycles, HeaderView, JsonBytes, Transaction, Uint32}; +use ckb_light_client_lib::{ + protocols::Peers, + service::{ + Cell, CellsCapacity, FetchStatus, LocalNode, Order, Pagination, RemoteNode, ScriptStatus, + SearchKey, SetScriptsCommand, TransactionWithStatus, Tx, + }, + storage::StorageWithChainData, +}; +use ckb_network::NetworkController; +use ckb_traits::CellDataProvider; +use ckb_types::H256; +use jsonrpc_derive::rpc; + +#[rpc(server)] +pub trait BlockFilterRpc { + /// curl http://localhost:9000/ -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0", "method":"set_scripts", "params": [{"script": {"code_hash": "0x9bd7e06f3ecf4be0f2fcd2188b23f1b9fcc88e5d4b65a8637b17723bbda3cce8", "hash_type": "type", "args": "0x50878ce52a68feb47237c29574d82288f58b5d21"}, "block_number": "0x59F74D"}], "id": 1}' + #[rpc(name = "set_scripts")] + fn set_scripts( + &self, + scripts: Vec, + command: Option, + ) -> Result<()>; + + #[rpc(name = "get_scripts")] + fn get_scripts(&self) -> Result>; + + #[rpc(name = "get_cells")] + fn get_cells( + &self, + search_key: SearchKey, + order: Order, + limit: Uint32, + after: Option, + ) -> Result>; + + #[rpc(name = "get_transactions")] + fn get_transactions( + &self, + search_key: SearchKey, + order: Order, + limit: Uint32, + after: Option, + ) -> Result>; + + #[rpc(name = "get_cells_capacity")] + fn get_cells_capacity(&self, search_key: SearchKey) -> Result; +} + +#[rpc(server)] +pub trait TransactionRpc { + #[rpc(name = "send_transaction")] + fn send_transaction(&self, tx: Transaction) -> Result; + + #[rpc(name = "get_transaction")] + fn get_transaction(&self, tx_hash: H256) -> Result; + + #[rpc(name = "fetch_transaction")] + fn fetch_transaction(&self, tx_hash: H256) -> Result>; +} + +#[rpc(server)] +pub trait ChainRpc { + #[rpc(name = "get_tip_header")] + fn get_tip_header(&self) -> Result; + + #[rpc(name = "get_genesis_block")] + fn get_genesis_block(&self) -> Result; + + #[rpc(name = "get_header")] + fn get_header(&self, block_hash: H256) -> Result>; + + #[rpc(name = "fetch_header")] + fn fetch_header( + &self, + block_hash: H256, + ) -> Result>; + + #[rpc(name = "estimate_cycles")] + fn estimate_cycles(&self, tx: Transaction) -> Result; +} + +#[rpc(server)] +pub trait NetRpc { + #[rpc(name = "local_node_info")] + fn local_node_info(&self) -> Result; + + #[rpc(name = "get_peers")] + fn get_peers(&self) -> Result>; +} + +impl BlockFilterRpc + for BlockFilterRpcImpl +{ + fn set_scripts( + &self, + scripts: Vec, + command: Option, + ) -> Result<()> { + BlockFilterRpcMethods::set_scripts(self, scripts, command) + .map_err(|e| Error::invalid_params(e.to_string())) + } + + fn get_scripts(&self) -> Result> { + BlockFilterRpcMethods::get_scripts(self).map_err(|e| Error::invalid_params(e.to_string())) + } + + fn get_cells( + &self, + search_key: SearchKey, + order: Order, + limit: Uint32, + after: Option, + ) -> Result> { + BlockFilterRpcMethods::get_cells(self, search_key, order, limit, after) + .map_err(|e| Error::invalid_params(e.to_string())) + } + + fn get_transactions( + &self, + search_key: SearchKey, + order: Order, + limit: Uint32, + after: Option, + ) -> Result> { + BlockFilterRpcMethods::get_transactions(self, search_key, order, limit, after) + .map_err(|e| Error::invalid_params(e.to_string())) + } + + fn get_cells_capacity(&self, search_key: SearchKey) -> Result { + BlockFilterRpcMethods::get_cells_capacity(self, search_key) + .map_err(|e| Error::invalid_params(e.to_string())) + } +} + +impl< + S: StorageHighLevelOperations + + StorageGetPinnedRelatedOperations + + CellDataProvider + + Send + + Sync + + Clone + + 'static, + > TransactionRpc for TransactionRpcImpl +{ + fn send_transaction(&self, tx: Transaction) -> Result { + TransactionRpcMethods::send_transaction(self, tx) + .map_err(|e| Error::invalid_params(e.to_string())) + } + + fn get_transaction(&self, tx_hash: H256) -> Result { + TransactionRpcMethods::get_transaction(self, tx_hash) + .map_err(|e| Error::invalid_params(e.to_string())) + } + + fn fetch_transaction(&self, tx_hash: H256) -> Result> { + TransactionRpcMethods::fetch_transaction(self, tx_hash) + .map_err(|e| Error::invalid_params(e.to_string())) + } +} + +impl< + S: StorageHighLevelOperations + + StorageGeneralOperations + + StorageGetPinnedRelatedOperations + + CellDataProvider + + Send + + Sync + + Clone + + 'static, + > ChainRpc for ChainRpcImpl +{ + fn get_tip_header(&self) -> Result { + ChainRpcMethods::get_tip_header(self).map_err(|e| Error::invalid_params(e.to_string())) + } + + fn get_genesis_block(&self) -> Result { + ChainRpcMethods::get_genesis_block(self).map_err(|e| Error::invalid_params(e.to_string())) + } + + fn get_header(&self, block_hash: H256) -> Result> { + ChainRpcMethods::get_header(self, block_hash) + .map_err(|e| Error::invalid_params(e.to_string())) + } + + fn fetch_header( + &self, + block_hash: H256, + ) -> Result> { + ChainRpcMethods::fetch_header(self, block_hash) + .map_err(|e| Error::invalid_params(e.to_string())) + } + + fn estimate_cycles(&self, tx: Transaction) -> Result { + ChainRpcMethods::estimate_cycles(self, tx).map_err(|e| Error::invalid_params(e.to_string())) + } +} + +impl NetRpc for NetRpcImpl { + fn local_node_info(&self) -> Result { + NetRpcMethods::local_node_info(self).map_err(|e| Error::invalid_params(e.to_string())) + } + + fn get_peers(&self) -> Result> { + NetRpcMethods::get_peers(self).map_err(|e| Error::invalid_params(e.to_string())) + } +} + +pub struct Service { + listen_address: String, +} + +impl Service { + pub fn new(listen_address: &str) -> Self { + Self { + listen_address: listen_address.to_string(), + } + } + + pub fn start( + &self, + network_controller: NetworkController, + storage: Storage, + peers: Arc, + pending_txs: Arc>, + consensus: Consensus, + ) -> Server { + let mut io_handler = IoHandler::new(); + let swc = StorageWithChainData::new(storage, Arc::clone(&peers), Arc::clone(&pending_txs)); + let consensus = Arc::new(consensus); + let block_filter_rpc_impl = BlockFilterRpcImpl { swc: swc.clone() }; + let chain_rpc_impl = ChainRpcImpl { + swc: swc.clone(), + consensus: Arc::clone(&consensus), + }; + let transaction_rpc_impl = TransactionRpcImpl { swc, consensus }; + let net_rpc_impl = NetRpcImpl { + network_controller, + peers, + }; + io_handler.extend_with(block_filter_rpc_impl.to_delegate()); + io_handler.extend_with(chain_rpc_impl.to_delegate()); + io_handler.extend_with(transaction_rpc_impl.to_delegate()); + io_handler.extend_with(net_rpc_impl.to_delegate()); + + ServerBuilder::new(io_handler) + .cors(DomainsValidation::AllowOnly(vec![ + AccessControlAllowOrigin::Null, + AccessControlAllowOrigin::Any, + ])) + .health_api(("/ping", "ping")) + .start_http( + &self + .listen_address + .to_socket_addrs() + .expect("config listen_address parsed") + .next() + .expect("config listen_address parsed"), + ) + .expect("Start Jsonrpc HTTP service") + } +} diff --git a/light-client-bin/src/subcmds.rs b/light-client-bin/src/subcmds.rs index 2441c9ef..d9a674af 100644 --- a/light-client-bin/src/subcmds.rs +++ b/light-client-bin/src/subcmds.rs @@ -16,7 +16,10 @@ use ckb_light_client_lib::{ FilterProtocol, LightClientProtocol, Peers, PendingTxs, RelayProtocol, SyncProtocol, BAD_MESSAGE_ALLOWED_EACH_HOUR, CHECK_POINT_INTERVAL, }, - storage::Storage, + storage::{ + db::{StorageBatchRelatedOperations, StorageGeneralOperations}, + Storage, + }, utils, }; diff --git a/light-client-bin/src/tests/mod.rs b/light-client-bin/src/tests/mod.rs index 043776df..1015bdd4 100644 --- a/light-client-bin/src/tests/mod.rs +++ b/light-client-bin/src/tests/mod.rs @@ -3,15 +3,16 @@ mod service; use ckb_chain_spec::{consensus::Consensus, ChainSpec}; use ckb_light_client_lib::{ protocols::{Peers, CHECK_POINT_INTERVAL}, - storage::Storage, + storage::{db::StorageBatchRelatedOperations, Storage}, }; use ckb_resource::Resource; +use tempfile::TempDir; use std::sync::Arc; -pub(crate) fn new_storage(prefix: &str) -> Storage { +pub(crate) fn new_storage(prefix: &str) -> (Storage, TempDir) { let tmp_dir = tempfile::Builder::new().prefix(prefix).tempdir().unwrap(); - Storage::new(tmp_dir.path().to_str().unwrap()) + (Storage::new(tmp_dir.path().to_str().unwrap()), tmp_dir) } pub(crate) fn create_peers() -> Arc { @@ -30,6 +31,8 @@ pub(crate) fn create_peers() -> Arc { pub(crate) struct MockChain { storage: Storage, consensus: Consensus, + #[allow(unused)] + temp_dir: TempDir, } impl MockChain { @@ -41,7 +44,11 @@ impl MockChain { .build_consensus() .expect("build consensus should be OK"); storage.init_genesis_block(consensus.genesis_block().data()); - MockChain { storage, consensus } + MockChain { + storage, + consensus, + temp_dir: tmp_dir, + } } pub(crate) fn new_with_default_pow(prefix: &str) -> Self { diff --git a/light-client-bin/src/tests/service.rs b/light-client-bin/src/tests/service.rs index a1cf2b28..a41b177a 100644 --- a/light-client-bin/src/tests/service.rs +++ b/light-client-bin/src/tests/service.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use ckb_chain_spec::consensus::Consensus; +use ckb_light_client_rpc::{BlockFilterRpcImpl, ChainRpcImpl, TransactionRpcImpl}; use ckb_network::PeerIndex; use ckb_types::{ bytes::Bytes, @@ -15,28 +16,32 @@ use ckb_types::{ H256, U256, }; +use crate::{ + rpc::{BlockFilterRpc, ChainRpc, TransactionRpc}, + // tests::prelude::*, + // tests::utils::{create_peers, new_storage, MockChain}, + tests::{create_peers, new_storage, MockChain}, +}; +use ckb_light_client_lib::storage::db::StorageHighLevelOperations; use ckb_light_client_lib::{ protocols::{FetchInfo, LastState, ProveRequest, ProveState}, service::{ FetchStatus, Order, ScriptStatus, ScriptType, SearchKey, SearchKeyFilter, SetScriptsCommand, Status, TransactionWithStatus, TxStatus, }, - storage::{self, HeaderWithExtension, StorageWithChainData}, -}; - -use crate::{ - rpc::{ - BlockFilterRpc, BlockFilterRpcImpl, ChainRpc, ChainRpcImpl, TransactionRpc, - TransactionRpcImpl, + storage::{ + self, + db::{ + StorageBatchRelatedOperations, StorageGeneralOperations, + StorageGetPinnedRelatedOperations, + }, + HeaderWithExtension, StorageWithChainData, }, - // tests::prelude::*, - // tests::utils::{create_peers, new_storage, MockChain}, - tests::{create_peers, new_storage, MockChain}, }; #[test] fn rpc() { - let storage = new_storage("rpc"); + let (storage, _tmpdir) = new_storage("rpc"); let swc = StorageWithChainData::new(storage.clone(), create_peers(), Default::default()); let rpc = BlockFilterRpcImpl { swc }; @@ -1068,7 +1073,7 @@ fn rpc() { #[test] fn get_cells_capacity_bug() { - let storage = new_storage("get_cells_capacity_bug"); + let (storage, _tmpdir) = new_storage("get_cells_capacity_bug"); let swc = StorageWithChainData::new(storage.clone(), create_peers(), Default::default()); let rpc = BlockFilterRpcImpl { swc }; @@ -1193,7 +1198,7 @@ fn get_cells_capacity_bug() { #[test] fn get_cells_after_rollback_bug() { - let storage = new_storage("get_cells_after_rollback_bug"); + let (storage, _tmpdir) = new_storage("get_cells_after_rollback_bug"); let swc = StorageWithChainData::new(storage.clone(), create_peers(), Default::default()); let rpc = BlockFilterRpcImpl { swc }; @@ -1386,7 +1391,7 @@ fn get_cells_after_rollback_bug() { #[test] fn test_set_scripts_clear_matched_blocks() { - let storage = new_storage("set-scripts-clear-matched-blocks"); + let (storage, _tmpdir) = new_storage("set-scripts-clear-matched-blocks"); let peers = create_peers(); let swc = StorageWithChainData::new(storage.clone(), Arc::clone(&peers), Default::default()); let rpc = BlockFilterRpcImpl { swc }; @@ -1436,7 +1441,7 @@ fn test_set_scripts_clear_matched_blocks() { #[test] fn test_set_scripts_command() { - let storage = new_storage("set-scripts-command"); + let (storage, _tmpdir) = new_storage("set-scripts-command"); let peers = create_peers(); let swc = StorageWithChainData::new(storage.clone(), Arc::clone(&peers), Default::default()); let rpc = BlockFilterRpcImpl { swc }; @@ -1523,7 +1528,7 @@ fn test_set_scripts_command() { #[test] fn test_set_scripts_partial_min_filtered_block_number_bug() { - let storage = new_storage("set_scripts_partial_min_filtered_block_number_bug"); + let (storage, _tmpdir) = new_storage("set_scripts_partial_min_filtered_block_number_bug"); let peers = create_peers(); let swc = StorageWithChainData::new(storage.clone(), Arc::clone(&peers), Default::default()); let rpc = BlockFilterRpcImpl { swc }; @@ -1573,7 +1578,7 @@ fn test_set_scripts_partial_min_filtered_block_number_bug() { #[test] fn test_set_scripts_delete_min_filtered_block_number_bug() { - let storage = new_storage("set_scripts_delete_min_filtered_block_number_bug"); + let (storage, _tmpdir) = new_storage("set_scripts_delete_min_filtered_block_number_bug"); let peers = create_peers(); let swc = StorageWithChainData::new(storage.clone(), Arc::clone(&peers), Default::default()); let rpc = BlockFilterRpcImpl { swc }; @@ -1620,7 +1625,7 @@ fn test_set_scripts_delete_min_filtered_block_number_bug() { #[test] fn test_chain_txs_in_same_block_bug() { - let storage = new_storage("chain_txs_in_same_block_bug"); + let (storage, _tmpdir) = new_storage("chain_txs_in_same_block_bug"); let swc = StorageWithChainData::new(storage.clone(), create_peers(), Default::default()); let rpc = BlockFilterRpcImpl { swc }; diff --git a/light-client-lib/Cargo.toml b/light-client-lib/Cargo.toml index 9860b851..4faaefe2 100644 --- a/light-client-lib/Cargo.toml +++ b/light-client-lib/Cargo.toml @@ -1,81 +1,86 @@ [package] -name = "ckb-light-client-lib" -version = "0.5.4" authors = ["Nervos Core Dev "] -edition = "2021" -license = "MIT" description = "A CKB light client based on FlyClient." +edition = "2021" homepage = "https://github.com/nervosnetwork/ckb-light-client" +license = "MIT" +name = "ckb-light-client-lib" repository = "https://github.com/nervosnetwork/ckb-light-client" +version = "0.5.4" [lib] crate-type = ["cdylib", "rlib"] [dependencies] ckb-app-config = "1" +ckb-chain-spec = "1" ckb-constant = "1" -ckb-types = "1" -ckb-network = "1" -ckb-jsonrpc-types = "1" +ckb-dao = "1" +ckb-dao-utils = "1" ckb-error = "1" +ckb-hash = "1" +ckb-jsonrpc-types = "1" +ckb-network = "1" +ckb-resource = "1" ckb-script = "1" -ckb-chain-spec = "1" +ckb-systemtime = "1" ckb-traits = "1" -ckb-resource = "1" +ckb-types = "1" ckb-verification = "1" -ckb-hash = "1" -ckb-systemtime = "1" -ckb-dao-utils = "1" -ckb-dao = "1" +ckb-merkle-mountain-range = "0.5.1" +dashmap = "5.3" +golomb-coded-set = "0.2.1" +governor = "0.6.3" +linked-hash-map = "0.5.6" log = "0.4.14" +numext-fixed-uint = {version = "0.1", features = [ + "support_rand", + "support_heapsize", + "support_serde", +]} path-clean = "0.1.0" rand = "0.8.5" -dashmap = "5.3" -linked-hash-map = "0.5.6" -governor = "0.6.3" -serde = { version = "1.0.136", features = ["derive"] } -ckb-merkle-mountain-range = "0.5.1" -golomb-coded-set = "0.2.1" -numext-fixed-uint = { version = "0.1", features = [ - "support_rand", - "support_heapsize", - "support_serde", -] } +serde = {version = "1.0.136", features = ["derive"]} anyhow = "1.0.56" thiserror = "1.0.30" +tokio = {version = "1.20"} toml = "0.5.8" -tokio = { version = "1.20" } +parking_lot = "0.12.5" + +[features] +default = ["rocksdb"] [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -rocksdb = { package = "ckb-rocksdb", version = "=0.21.1", features = [ - "snappy", -], default-features = false } +rocksdb = {package = "ckb-rocksdb", version = "=0.21.1", features = [ + "snappy", +], default-features = false, optional = true} +rusqlite = {version = "0.38.0", features = ["bundled"], optional = true} [target.'cfg(target_arch = "wasm32")'.dependencies] -web-time = "1.1.0" -wasm-bindgen = "0.2.63" +console_error_panic_hook = {version = "0.1.7"} idb = "0.6" +light-client-db-common = {path = "../wasm/light-client-db-common"} serde-wasm-bindgen = "0.6.5" -light-client-db-common = { path = "../wasm/light-client-db-common" } -web-sys = "0.3.72" -console_error_panic_hook = { version = "0.1.7" } serde_json = "1.0.134" +wasm-bindgen = "0.2.63" +web-sys = "0.3.72" +web-time = "1.1.0" [target.'cfg(target_arch = "wasm32")'.dev-dependencies] wasm-bindgen-test = "0.3.45" [dev-dependencies] -ckb-shared = "1" ckb-chain = "1" -ckb-tx-pool = "1" +ckb-db = "1" +ckb-db-schema = "1" +ckb-shared = "1" ckb-store = "1" -ckb-systemtime = { version = "1", features = ["enable_faketime"] } -tempfile = "3.0" +ckb-systemtime = {version = "1", features = ["enable_faketime"]} +ckb-tx-pool = "1" +env_logger = "0.11" +lazy_static = "1.3.0" rand = "0.8" serde_json = "1.0" -lazy_static = "1.3.0" -env_logger = "0.11" -ckb-db = "1" -ckb-db-schema = "1" +tempfile = "3.0" diff --git a/light-client-lib/src/error.rs b/light-client-lib/src/error.rs index 45ef316b..fe93f2dd 100644 --- a/light-client-lib/src/error.rs +++ b/light-client-lib/src/error.rs @@ -11,10 +11,14 @@ pub enum Error { #[error("runtime error: {0}")] Runtime(String), - #[cfg(not(target_arch = "wasm32"))] + #[cfg(all(not(target_arch = "wasm32"), feature = "rocksdb"))] #[error("db error: {0}")] Db(#[from] rocksdb::Error), + #[cfg(all(not(target_arch = "wasm32"), feature = "rusqlite"))] + #[error("db error: {0}")] + Db(#[from] rusqlite::Error), + #[cfg(target_arch = "wasm32")] #[error("db error: {0}")] Indexdb(String), diff --git a/light-client-lib/src/lib.rs b/light-client-lib/src/lib.rs index 3e2e9406..b7213506 100644 --- a/light-client-lib/src/lib.rs +++ b/light-client-lib/src/lib.rs @@ -11,3 +11,9 @@ pub mod storage; pub mod types; pub mod utils; pub mod verify; + +#[cfg(any( + all(feature = "rocksdb", feature = "rusqlite"), + not(any(feature = "rocksdb", feature = "rusqlite")) +))] +compile_error!("Exact one of features `rocksdb` and `rusqlite` can be selected"); diff --git a/light-client-lib/src/protocols/filter/block_filter.rs b/light-client-lib/src/protocols/filter/block_filter.rs index 376a21ff..45cd0c29 100644 --- a/light-client-lib/src/protocols/filter/block_filter.rs +++ b/light-client-lib/src/protocols/filter/block_filter.rs @@ -1,5 +1,8 @@ use super::{components, BAD_MESSAGE_BAN_TIME}; use crate::protocols::{Peers, Status, StatusCode}; +use crate::storage::db::{ + StorageGeneralOperations, StorageGetPinnedRelatedOperations, StorageHighLevelOperations, +}; use crate::storage::Storage; use crate::types::{Duration, Instant, RwLock}; use crate::utils::network::prove_or_download_matched_blocks; diff --git a/light-client-lib/src/protocols/filter/components/block_filter_hashes_process.rs b/light-client-lib/src/protocols/filter/components/block_filter_hashes_process.rs index aef8117e..04b037d0 100644 --- a/light-client-lib/src/protocols/filter/components/block_filter_hashes_process.rs +++ b/light-client-lib/src/protocols/filter/components/block_filter_hashes_process.rs @@ -1,10 +1,10 @@ +use crate::protocols::{FilterProtocol, Status, StatusCode}; +use crate::storage::db::{StorageGeneralOperations, StorageHighLevelOperations}; use ckb_network::{BoxedCKBProtocolContext, PeerIndex}; use ckb_types::{core::BlockNumber, packed, prelude::*}; use log::{debug, trace}; use rand::seq::SliceRandom as _; -use crate::protocols::{FilterProtocol, Status, StatusCode}; - pub struct BlockFilterHashesProcess<'a> { message: packed::BlockFilterHashesReader<'a>, protocol: &'a FilterProtocol, diff --git a/light-client-lib/src/protocols/filter/components/block_filters_process.rs b/light-client-lib/src/protocols/filter/components/block_filters_process.rs index 2eed9778..f13c5015 100644 --- a/light-client-lib/src/protocols/filter/components/block_filters_process.rs +++ b/light-client-lib/src/protocols/filter/components/block_filters_process.rs @@ -1,5 +1,8 @@ use crate::protocols::FilterProtocol; use crate::protocols::{Status, StatusCode}; +use crate::storage::db::{ + StorageGeneralOperations, StorageGetPinnedRelatedOperations, StorageHighLevelOperations, +}; use crate::utils::network::prove_or_download_matched_blocks; use ckb_constant::sync::INIT_BLOCKS_IN_TRANSIT_PER_PEER; use ckb_network::{BoxedCKBProtocolContext, PeerIndex}; diff --git a/light-client-lib/src/protocols/light_client/components/send_blocks_proof.rs b/light-client-lib/src/protocols/light_client/components/send_blocks_proof.rs index b06d83df..4064c79b 100644 --- a/light-client-lib/src/protocols/light_client/components/send_blocks_proof.rs +++ b/light-client-lib/src/protocols/light_client/components/send_blocks_proof.rs @@ -8,7 +8,7 @@ use ckb_types::{ use log::{debug, error, info}; use rand::seq::SliceRandom; -use crate::storage::HeaderWithExtension; +use crate::storage::{db::StorageBatchRelatedOperations, HeaderWithExtension}; use super::{ super::{LightClientProtocol, Status, StatusCode}, diff --git a/light-client-lib/src/protocols/light_client/components/send_transactions_proof.rs b/light-client-lib/src/protocols/light_client/components/send_transactions_proof.rs index 91bf2ccd..b0e8b3a5 100644 --- a/light-client-lib/src/protocols/light_client/components/send_transactions_proof.rs +++ b/light-client-lib/src/protocols/light_client/components/send_transactions_proof.rs @@ -6,7 +6,10 @@ use ckb_types::{ }; use log::{debug, error}; -use crate::{protocols::light_client::components::verify_extra_hash, storage::HeaderWithExtension}; +use crate::{ + protocols::light_client::components::verify_extra_hash, + storage::{db::StorageBatchRelatedOperations, HeaderWithExtension}, +}; use super::{ super::{LightClientProtocol, Status, StatusCode}, diff --git a/light-client-lib/src/protocols/light_client/mod.rs b/light-client-lib/src/protocols/light_client/mod.rs index 36e7cd42..6b4b2f1b 100644 --- a/light-client-lib/src/protocols/light_client/mod.rs +++ b/light-client-lib/src/protocols/light_client/mod.rs @@ -44,6 +44,10 @@ use super::{ status::{Status, StatusCode}, BAD_MESSAGE_BAN_TIME, }; +use crate::storage::db::{ + StorageBatchRelatedOperations, StorageGeneralOperations, StorageGetPinnedRelatedOperations, + StorageHighLevelOperations, +}; use crate::protocols::{GET_BLOCKS_PROOF_LIMIT, GET_TRANSACTIONS_PROOF_LIMIT, LAST_N_BLOCKS}; use crate::storage::Storage; diff --git a/light-client-lib/src/protocols/relayer.rs b/light-client-lib/src/protocols/relayer.rs index b007c66c..feda105b 100644 --- a/light-client-lib/src/protocols/relayer.rs +++ b/light-client-lib/src/protocols/relayer.rs @@ -10,6 +10,7 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; use crate::protocols::{Peers, BAD_MESSAGE_BAN_TIME}; +use crate::storage::db::StorageGetPinnedRelatedOperations; use crate::storage::Storage; use crate::types::{Duration, Instant, RwLock}; use crate::{read_lock, write_lock}; diff --git a/light-client-lib/src/protocols/synchronizer.rs b/light-client-lib/src/protocols/synchronizer.rs index e3309d3c..8495dadd 100644 --- a/light-client-lib/src/protocols/synchronizer.rs +++ b/light-client-lib/src/protocols/synchronizer.rs @@ -9,6 +9,9 @@ use std::sync::Arc; use super::BAD_MESSAGE_BAN_TIME; use crate::protocols::Peers; +use crate::storage::db::{ + StorageBatchRelatedOperations, StorageGeneralOperations, StorageHighLevelOperations, +}; use crate::storage::Storage; use crate::utils::network::prove_or_download_matched_blocks; diff --git a/light-client-lib/src/storage/db/browser.rs b/light-client-lib/src/storage/db/browser.rs index 3b1048be..f3ca8abc 100644 --- a/light-client-lib/src/storage/db/browser.rs +++ b/light-client-lib/src/storage/db/browser.rs @@ -1,43 +1,36 @@ -use std::{ - cell::RefCell, - collections::{HashMap, HashSet}, - path::Path, - sync::atomic::AtomicBool, -}; +use std::{cell::RefCell, path::Path, sync::atomic::AtomicBool}; use super::super::{ BlockNumber, Byte32, CellType, Script, ScriptStatus, ScriptType, SetScriptsCommand, }; use anyhow::{anyhow, bail, Context}; +use ckb_traits::CellDataProvider; +use ckb_types::bytes::Bytes; +use crate::storage::db::StorageBatchRelatedOperations; +use crate::storage::db::{StorageGeneralOperations, StorageGetPinnedRelatedOperations}; use ckb_types::{ core::{ cell::{CellMeta, CellStatus}, HeaderView, TransactionInfo, }, - packed::{self, Block, CellOutput, Header, OutPoint, Transaction}, - prelude::{ - Builder, Entity, FromSliceShouldBeOk, IntoBlockView, IntoHeaderView, IntoTransactionView, - Pack, PackVec, Reader, Unpack, - }, - utilities::{build_filter_data, calc_filter_hash}, - U256, + packed::{self, CellOutput, Header, OutPoint}, + prelude::{Entity, FromSliceShouldBeOk, IntoHeaderView, IntoTransactionView, Reader, Unpack}, }; pub use idb::CursorDirection; use light_client_db_common::{ idb_cursor_direction_to_ckb, read_command_payload, write_command_with_payload, DbCommandRequest, DbCommandResponse, InputCommand, OutputCommand, KV, }; - use log::debug; use crate::{ error::{Error, Result}, storage::{ - extract_raw_data, parse_matched_blocks, CellIndex, CpIndex, HeaderWithExtension, Key, - KeyPrefix, MatchedBlock, MatchedBlocks, OutputIndex, TxIndex, Value, WrappedBlockView, - FILTER_SCRIPTS_KEY, GENESIS_BLOCK_KEY, LAST_N_HEADERS_KEY, LAST_STATE_KEY, - MATCHED_FILTER_BLOCKS_KEY, MAX_CHECK_POINT_INDEX, MIN_FILTERED_BLOCK_NUMBER, + db::{GeneralDirection, StorageHighLevelOperations}, + extract_raw_data, parse_matched_blocks, CellIndex, CpIndex, Key, KeyPrefix, MatchedBlock, + MatchedBlocks, TxIndex, FILTER_SCRIPTS_KEY, MATCHED_FILTER_BLOCKS_KEY, + MIN_FILTERED_BLOCK_NUMBER, }, }; use wasm_bindgen::{prelude::wasm_bindgen, JsCast, JsValue}; @@ -317,14 +310,14 @@ impl Storage { Self { channel: chan } } - fn batch(&self) -> Batch { + pub fn batch(&self) -> Batch { Batch { add: vec![], delete: vec![], comm_arrays: self.channel.clone(), } } - fn put(&self, key: K, value: V) -> Result<()> + pub fn put(&self, key: K, value: V) -> Result<()> where K: AsRef<[u8]>, V: AsRef<[u8]>, @@ -352,13 +345,13 @@ impl Storage { } } #[allow(clippy::needless_lifetimes)] - fn get_pinned<'a, K>(&'a self, key: K) -> Result>> + pub fn get_pinned<'a, K>(&'a self, key: K) -> Result>> where K: AsRef<[u8]>, { self.get(key) } - fn delete>(&self, key: K) -> Result<()> { + pub fn delete>(&self, key: K) -> Result<()> { self.channel .dispatch_database_command(CommandRequestWithTakeWhileAndFilterMap::Delete { keys: vec![key.as_ref().to_vec()], @@ -366,7 +359,64 @@ impl Storage { .map(|_| ()) .map_err(|e| Error::Indexdb(format!("{:?}", e))) } - pub fn is_filter_scripts_empty(&self) -> bool { +} + +impl StorageHighLevelOperations for Storage { + fn get_header(&self, hash: &Byte32) -> Option { + self.get(Key::BlockHash(hash).into_vec()) + .map(|v| { + v.map(|v| { + Header::from_slice(&v[..Header::TOTAL_SIZE]) + .expect("stored Header") + .into_view() + }) + }) + .expect("db get should be ok") + } + fn put(&self, key: K, value: V) -> Result<()> + where + K: AsRef<[u8]>, + V: AsRef<[u8]>, + { + self.put(key, value) + } + + fn delete>(&self, key: K) -> Result<()> { + self.delete(key) + } + fn get>(&self, key: K) -> Result>> { + self.get(key) + } + fn collect_iterator( + &self, + start_key_bound: Vec, + order: GeneralDirection, + take_while: Box bool + Send + 'static>, + filter_map: Box Option> + Send + 'static>, + limit: usize, + skip: usize, + ) -> Vec<(Vec, Vec)> { + let value = self + .channel + .dispatch_database_command(CommandRequestWithTakeWhileAndFilterMap::Iterator { + start_key_bound, + order: match order { + GeneralDirection::Forward => CursorDirection::NextUnique, + GeneralDirection::Reverse => CursorDirection::PrevUnique, + }, + take_while, + filter_map, + limit, + skip, + }) + .unwrap(); + if let DbCommandResponse::Iterator { kvs } = value { + kvs.into_iter().map(|x| (x.key, x.value)).collect() + } else { + unreachable!() + } + } + fn is_filter_scripts_empty(&self) -> bool { let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); let value = match self.channel.dispatch_database_command( @@ -389,7 +439,7 @@ impl Storage { unreachable!() } } - pub fn get_filter_scripts(&self) -> Vec { + fn get_filter_scripts(&self) -> Vec { let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); let key_prefix_clone = key_prefix.clone(); let value = self @@ -431,7 +481,7 @@ impl Storage { unreachable!() } } - pub fn update_filter_scripts(&self, scripts: Vec, command: SetScriptsCommand) { + fn update_filter_scripts(&self, scripts: Vec, command: SetScriptsCommand) { let mut should_filter_genesis_block = false; let mut batch = self.batch(); let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); @@ -565,7 +615,7 @@ impl Storage { unreachable!() } } - pub fn get_scripts_hash(&self, block_number: BlockNumber) -> Vec { + fn get_scripts_hash(&self, block_number: BlockNumber) -> Vec { let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); let key_prefix_clone = key_prefix.clone(); @@ -630,16 +680,15 @@ impl Storage { unreachable!() } } - fn get_matched_blocks(&self, direction: CursorDirection) -> Option { + fn get_matched_blocks(&self, direction: GeneralDirection) -> Option { let key_prefix = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); let iter_from = match direction { - CursorDirection::NextUnique => key_prefix.clone(), - CursorDirection::PrevUnique => { + GeneralDirection::Forward => key_prefix.clone(), + GeneralDirection::Reverse => { let mut key = key_prefix.clone(); key.extend(u64::MAX.to_be_bytes()); key } - _ => panic!("Invalid direction"), }; let key_prefix_clone = key_prefix.clone(); @@ -648,7 +697,10 @@ impl Storage { .channel .dispatch_database_command(CommandRequestWithTakeWhileAndFilterMap::Iterator { start_key_bound: iter_from, - order: CursorDirection::NextUnique, + order: match direction { + GeneralDirection::Forward => CursorDirection::NextUnique, + GeneralDirection::Reverse => CursorDirection::PrevUnique, + }, take_while: Box::new(move |raw_key: &[u8]| raw_key.starts_with(&key_prefix_clone)), filter_map: Box::new(|s| Some(s.to_vec())), limit: 1, @@ -679,8 +731,8 @@ impl Storage { } } - pub fn get_earliest_matched_blocks(&self) -> Option { - let result = self.get_matched_blocks(CursorDirection::NextUnique); + fn get_earliest_matched_blocks(&self) -> Option { + let result = self.get_matched_blocks(GeneralDirection::Forward); debug!( "Called get earliest matched blocks: {:?}, task id {:?}", result, @@ -689,10 +741,10 @@ impl Storage { result } - pub fn get_latest_matched_blocks(&self) -> Option { - self.get_matched_blocks(CursorDirection::PrevUnique) + fn get_latest_matched_blocks(&self) -> Option { + self.get_matched_blocks(GeneralDirection::Reverse) } - pub fn get_check_points(&self, start_index: CpIndex, limit: usize) -> Vec { + fn get_check_points(&self, start_index: CpIndex, limit: usize) -> Vec { let start_key = Key::CheckPointIndex(start_index).into_vec(); let key_prefix = [KeyPrefix::CheckPointIndex as u8]; @@ -717,7 +769,7 @@ impl Storage { unreachable!() } } - pub fn update_block_number(&self, block_number: BlockNumber) { + fn update_block_number(&self, block_number: BlockNumber) { let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); let mut batch = self.batch(); @@ -751,7 +803,7 @@ impl Storage { batch.commit().expect("batch commit should be ok"); } } - pub fn rollback_to_block(&self, to_number: BlockNumber) { + fn rollback_to_block(&self, to_number: BlockNumber) { let scripts = self.get_filter_scripts(); let mut batch = self.batch(); @@ -770,7 +822,7 @@ impl Storage { let value = self .channel .dispatch_database_command(CommandRequestWithTakeWhileAndFilterMap::Iterator { - start_key_bound: key_prefix.clone(), + start_key_bound: start_key.clone(), order: CursorDirection::PrevUnique, take_while: Box::new(move |raw_key: &[u8]| { raw_key.starts_with(&key_prefix) @@ -917,6 +969,52 @@ impl Storage { batch.commit().expect("batch commit should be ok"); } + fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { + if let Some((block_number, tx_index, tx)) = self.get_transaction(&out_point.tx_hash()) { + let block_hash = Byte32::from_slice( + &self + .get(Key::BlockNumber(block_number).into_vec()) + .expect("db get should be ok") + .expect("stored block number / hash mapping"), + ) + .expect("stored block hash should be OK"); + + let header = Header::from_slice( + &self + .get(Key::BlockHash(&block_hash).into_vec()) + .expect("db get should be ok") + .expect("stored block hash / header mapping")[..Header::TOTAL_SIZE], + ) + .expect("stored header should be OK") + .into_view(); + + let output_index = out_point.index().unpack(); + let tx = tx.into_view(); + if let Some(cell_output) = tx.outputs().get(output_index) { + let output_data = tx + .outputs_data() + .get(output_index) + .expect("output_data's index should be same as output") + .raw_data(); + let output_data_data_hash = CellOutput::calc_data_hash(&output_data); + let cell_meta = CellMeta { + out_point: out_point.clone(), + cell_output, + transaction_info: Some(TransactionInfo { + block_hash, + block_epoch: header.epoch(), + block_number, + index: tx_index as usize, + }), + data_bytes: output_data.len() as u64, + mem_cell_data: Some(output_data), + mem_cell_data_hash: Some(output_data_data_hash), + }; + return CellStatus::Live(cell_meta); + } + } + CellStatus::Unknown + } } pub struct Batch { @@ -926,7 +1024,7 @@ pub struct Batch { } impl Batch { - fn put_kv>, V: Into>>(&mut self, key: K, value: V) -> Result<()> { + pub fn put_kv>, V: Into>>(&mut self, key: K, value: V) -> Result<()> { self.add.push(KV { key: key.into(), value: value.into(), @@ -934,7 +1032,7 @@ impl Batch { Ok(()) } - fn put, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<()> { + pub fn put, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<()> { self.add.push(KV { key: key.as_ref().to_vec(), value: value.as_ref().to_vec(), @@ -942,19 +1040,19 @@ impl Batch { Ok(()) } - fn delete>(&mut self, key: K) -> Result<()> { + pub fn delete>(&mut self, key: K) -> Result<()> { self.delete.push(key.as_ref().to_vec()); Ok(()) } - fn delete_many(&mut self, keys: Vec>) -> Result<()> { + pub fn delete_many(&mut self, keys: Vec>) -> Result<()> { self.comm_arrays .dispatch_database_command(CommandRequestWithTakeWhileAndFilterMap::Delete { keys }) .map(|_| ()) .map_err(|e| Error::Indexdb(format!("{:?}", e))) } - fn commit(self) -> Result<()> { + pub fn commit(self) -> Result<()> { if !self.add.is_empty() { self.comm_arrays .dispatch_database_command(CommandRequestWithTakeWhileAndFilterMap::Put { @@ -977,620 +1075,13 @@ impl Batch { } } -impl Storage { - #[allow(clippy::type_complexity)] - pub fn collect_iterator( - &self, - start_key_bound: Vec, - order: CursorDirection, - take_while: Box bool + Send + 'static>, - filter_map: Box Option> + Send + 'static>, - limit: usize, - skip: usize, - ) -> Vec { - let value = self - .channel - .dispatch_database_command(CommandRequestWithTakeWhileAndFilterMap::Iterator { - start_key_bound, - order, - take_while, - filter_map, - limit, - skip, - }) - .unwrap(); - if let DbCommandResponse::Iterator { kvs } = value { - kvs - } else { - unreachable!() - } - } - pub fn init_genesis_block(&self, block: Block) { - let genesis_hash = block.calc_header_hash(); - let genesis_block_key = Key::Meta(GENESIS_BLOCK_KEY).into_vec(); - if let Some(stored_genesis_hash) = self - .get(genesis_block_key.as_slice()) - .expect("get genesis block") - .map(|v| v[0..32].to_vec()) - { - if genesis_hash.as_slice() != stored_genesis_hash.as_slice() { - panic!( - "genesis hash mismatch: stored={:#?}, new={}", - stored_genesis_hash, genesis_hash - ); - } - } else { - let mut batch = self.batch(); - let block_hash = block.calc_header_hash(); - batch - .put_kv(Key::Meta(LAST_STATE_KEY), block.header().as_slice()) - .expect("batch put should be ok"); - batch - .put_kv(Key::BlockHash(&block_hash), block.header().as_slice()) - .expect("batch put should be ok"); - batch - .put_kv(Key::BlockNumber(0), block_hash.as_slice()) - .expect("batch put should be ok"); - let mut genesis_hash_and_txs_hash = genesis_hash.as_slice().to_vec(); - block - .transactions() - .into_iter() - .enumerate() - .for_each(|(tx_index, tx)| { - let tx_hash = tx.calc_tx_hash(); - genesis_hash_and_txs_hash.extend_from_slice(tx_hash.as_slice()); - let key = Key::TxHash(&tx_hash).into_vec(); - let value = Value::Transaction(0, tx_index as TxIndex, &tx); - batch.put_kv(key, value).expect("batch put should be ok"); - }); - batch - .put_kv(genesis_block_key, genesis_hash_and_txs_hash.as_slice()) - .expect("batch put should be ok"); - batch.commit().expect("batch commit should be ok"); - self.update_last_state(&U256::zero(), &block.header(), &[]); - let genesis_block_filter_hash: Byte32 = { - let block_view = block.into_view(); - let provider: WrappedBlockView<'_> = WrappedBlockView::new(&block_view); - let parent_block_filter_hash = Byte32::zero(); - let (genesis_block_filter_vec, missing_out_points) = - build_filter_data(provider, &block_view.transactions()); - if !missing_out_points.is_empty() { - panic!("Genesis block shouldn't missing any out points."); - } - let genesis_block_filter_data = genesis_block_filter_vec.pack(); - calc_filter_hash(&parent_block_filter_hash, &genesis_block_filter_data).pack() - }; - self.update_max_check_point_index(0); - self.update_check_points(0, &[genesis_block_filter_hash]); - self.update_min_filtered_block_number(0); - } +impl CellDataProvider for Storage { + // we load all cells data eagerly in Storage's CellProivder impl + fn get_cell_data(&self, _out_point: &OutPoint) -> Option { + unreachable!() } - pub fn get_genesis_block(&self) -> Block { - let genesis_hash_and_txs_hash = self - .get(Key::Meta(GENESIS_BLOCK_KEY).into_vec()) - .expect("get genesis block") - .expect("inited storage"); - let genesis_hash = Byte32::from_slice(&genesis_hash_and_txs_hash[0..32]) - .expect("stored genesis block hash"); - let genesis_header = Header::from_slice( - &self - .get(Key::BlockHash(&genesis_hash).into_vec()) - .expect("db get should be ok") - .expect("stored block hash / header mapping"), - ) - .expect("stored header should be OK"); - - let mut transactions: Vec = Vec::new(); - for tx_hash in genesis_hash_and_txs_hash[32..].chunks_exact(32) { - transactions.push( - Transaction::from_slice( - &self - .get( - Key::TxHash( - &Byte32::from_slice(tx_hash).expect("stored genesis block tx hash"), - ) - .into_vec(), - ) - .expect("db get should be ok") - .expect("stored genesis block tx")[12..], - ) - .expect("stored Transaction"), - ) - } - Block::new_builder() - .header(genesis_header) - .transactions(transactions.pack()) - .build() - } - pub fn update_last_state( - &self, - total_difficulty: &U256, - tip_header: &Header, - last_n_headers: &[HeaderView], - ) { - let key = Key::Meta(LAST_STATE_KEY).into_vec(); - let mut value = total_difficulty.to_le_bytes().to_vec(); - value.extend(tip_header.as_slice()); - self.put(key, &value) - .expect("db put last state should be ok"); - self.update_last_n_headers(last_n_headers); - } - pub fn get_last_state(&self) -> (U256, Header) { - let key = Key::Meta(LAST_STATE_KEY).into_vec(); - self.get_pinned(&key) - .expect("db get last state should be ok") - .map(|data| { - let mut total_difficulty_bytes = [0u8; 32]; - total_difficulty_bytes.copy_from_slice(&data[0..32]); - let total_difficulty = U256::from_le_bytes(&total_difficulty_bytes); - let header = packed::HeaderReader::from_slice_should_be_ok(&data[32..]).to_entity(); - (total_difficulty, header) - }) - .expect("tip header should be inited") - } - fn update_last_n_headers(&self, headers: &[HeaderView]) { - let key = Key::Meta(LAST_N_HEADERS_KEY).into_vec(); - let mut value: Vec = Vec::with_capacity(headers.len() * 40); - for header in headers { - value.extend(header.number().to_le_bytes()); - - value.extend(header.hash().as_slice()); - } - self.put(key, &value) - .expect("db put last n headers should be ok"); - } - pub fn get_last_n_headers(&self) -> Vec<(u64, Byte32)> { - let key = Key::Meta(LAST_N_HEADERS_KEY).into_vec(); - self.get_pinned(&key) - .expect("db get last n headers should be ok") - .map(|data| { - assert!(data.len() % 40 == 0); - let mut headers = Vec::with_capacity(data.len() / 40); - for part in data.chunks(40) { - let number = u64::from_le_bytes(part[0..8].try_into().unwrap()); - let hash = Byte32::from_slice(&part[8..]).expect("byte32 block hash"); - headers.push((number, hash)); - } - headers - }) - .expect("last n headers should be inited") - } - pub fn remove_matched_blocks(&self, start_number: u64) { - let mut key = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); - key.extend(start_number.to_be_bytes()); - self.delete(&key).expect("delete matched blocks"); - } - pub fn add_matched_blocks( - &self, - start_number: u64, - blocks_count: u64, - // (block-hash, proved) - matched_blocks: Vec<(Byte32, bool)>, - ) { - debug!( - "Adding matched blocks: ({:?}, {:?}, {:?}), task_id={:?}", - start_number, - blocks_count, - matched_blocks, - tokio::task::try_id() - ); - assert!(!matched_blocks.is_empty()); - let mut key = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); - key.extend(start_number.to_be_bytes()); - - let mut value = blocks_count.to_le_bytes().to_vec(); - for (block_hash, proved) in matched_blocks { - value.extend(block_hash.as_slice()); - value.push(u8::from(proved)); - } - self.put(key, &value) - .expect("db put matched blocks should be ok"); - } - - pub fn cleanup_invalid_matched_blocks(&self) { - use ckb_types::prelude::Unpack; - use log::warn; - - let tip_number: u64 = self.get_tip_header().raw().number().unpack(); - - loop { - let entry = self.get_earliest_matched_blocks(); - if entry.is_none() { - break; - } - - let matched_blocks = entry.unwrap(); - let start_number = matched_blocks.start_number; - let blocks_count = matched_blocks.blocks_count; - let mut should_remove = false; - - for block in &matched_blocks.blocks { - if let Some(header) = self.get_header(&block.hash) { - let stored_number: u64 = header.number(); - if stored_number < start_number || stored_number >= start_number + blocks_count - { - warn!( - "Invalid matched block {:#x} at number {} outside expected range [{}, {}), removing entry at start_number={}", - block.hash, stored_number, start_number, start_number + blocks_count, start_number - ); - should_remove = true; - break; - } - } else if start_number + 1000 < tip_number { - warn!( - "Matched block {:#x} not found in storage, entry at start_number={} is {} blocks behind tip, removing", - block.hash, start_number, tip_number - start_number - ); - should_remove = true; - break; - } - } - - if should_remove { - self.remove_matched_blocks(start_number); - } else { - break; - } - } - } - - pub fn add_fetched_header(&self, hwe: &HeaderWithExtension) { - let mut batch = self.batch(); - let block_hash = hwe.header.calc_header_hash(); - batch - .put(Key::BlockHash(&block_hash).into_vec(), hwe.to_vec()) - .expect("batch put should be ok"); - batch - .put( - Key::BlockNumber(hwe.header.raw().number().unpack()).into_vec(), - block_hash.as_slice(), - ) - .expect("batch put should be ok"); - batch.commit().expect("batch commit should be ok"); - } - pub fn add_fetched_tx(&self, tx: &Transaction, hwe: &HeaderWithExtension) { - let mut batch = self.batch(); - let block_hash = hwe.header.calc_header_hash(); - let block_number: u64 = hwe.header.raw().number().unpack(); - batch - .put(Key::BlockHash(&block_hash).into_vec(), hwe.to_vec()) - .expect("batch put should be ok"); - batch - .put( - Key::BlockNumber(block_number).into_vec(), - block_hash.as_slice(), - ) - .expect("batch put should be ok"); - let tx_hash = tx.calc_tx_hash(); - let tx_index = u32::MAX; - let key = Key::TxHash(&tx_hash).into_vec(); - let value = Value::Transaction(block_number, tx_index as TxIndex, tx); - batch.put_kv(key, value).expect("batch put should be ok"); - batch.commit().expect("batch commit should be ok"); - } - - pub fn get_tip_header(&self) -> Header { - self.get_last_state().1 - } - - pub fn get_min_filtered_block_number(&self) -> BlockNumber { - let key = Key::Meta(MIN_FILTERED_BLOCK_NUMBER).into_vec(); - self.get_pinned(&key) - .expect("db get min filtered block number should be ok") - .map(|data| u64::from_le_bytes(AsRef::<[u8]>::as_ref(&data).try_into().unwrap())) - .unwrap_or_default() - } - pub fn update_min_filtered_block_number(&self, block_number: BlockNumber) { - let key = Key::Meta(MIN_FILTERED_BLOCK_NUMBER).into_vec(); - let value = block_number.to_le_bytes(); - self.put(key, value) - .expect("db put min filtered block number should be ok"); - } - pub fn get_last_check_point(&self) -> (CpIndex, Byte32) { - let index = self.get_max_check_point_index(); - let hash = self - .get_check_points(index, 1) - .first() - .cloned() - .expect("db get last check point should be ok"); - (index, hash) - } - pub fn get_max_check_point_index(&self) -> CpIndex { - let key = Key::Meta(MAX_CHECK_POINT_INDEX).into_vec(); - self.get_pinned(&key) - .expect("db get max check point index should be ok") - .map(|data| CpIndex::from_be_bytes(AsRef::<[u8]>::as_ref(&data).try_into().unwrap())) - .expect("db get max check point index should be ok 1") - } - pub fn update_max_check_point_index(&self, index: CpIndex) { - let key = Key::Meta(MAX_CHECK_POINT_INDEX).into_vec(); - let value = index.to_be_bytes(); - self.put(key, value) - .expect("db put max check point index should be ok"); - } - pub fn update_check_points(&self, start_index: CpIndex, check_points: &[Byte32]) { - let mut index = start_index; - let mut batch = self.batch(); - for cp in check_points { - let key = Key::CheckPointIndex(index).into_vec(); - let value = Value::BlockFilterHash(cp); - batch.put_kv(key, value).expect("batch put should be ok"); - index += 1; - } - batch.commit().expect("batch commit should be ok"); - } - pub fn filter_block(&self, block: Block) { - let scripts: HashSet<(Script, ScriptType)> = self - .get_filter_scripts() - .into_iter() - .map(|ss| (ss.script, ss.script_type)) - .collect(); - let block_number: BlockNumber = block.header().raw().number().unpack(); - let mut filter_matched = false; - let mut batch = self.batch(); - let mut txs: HashMap = HashMap::new(); - for (tx_index, tx) in block.transactions().into_iter().enumerate() { - for (input_index, input) in tx.raw().inputs().into_iter().enumerate() { - let previous_tx_hash = input.previous_output().tx_hash(); - if let Some((generated_by_block_number, generated_by_tx_index, previous_tx)) = - self.get_transaction(&previous_tx_hash).or(txs - .get(&previous_tx_hash) - .map(|(tx_index, tx)| (block_number, *tx_index, tx.clone()))) - { - let previous_output_index = input.previous_output().index().unpack(); - if let Some(previous_output) = - previous_tx.raw().outputs().get(previous_output_index) - { - let script = previous_output.lock(); - if scripts.contains(&(script.clone(), ScriptType::Lock)) { - filter_matched = true; - // delete utxo - let key = Key::CellLockScript( - &script, - generated_by_block_number, - generated_by_tx_index, - previous_output_index as OutputIndex, - ) - .into_vec(); - batch.delete(key).expect("batch delete should be ok"); - // insert tx history - let key = Key::TxLockScript( - &script, - block_number, - tx_index as TxIndex, - input_index as CellIndex, - CellType::Input, - ) - .into_vec(); - let tx_hash = tx.calc_tx_hash(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx - let key = Key::TxHash(&tx_hash).into_vec(); - let value = Value::Transaction(block_number, tx_index as TxIndex, &tx); - batch.put_kv(key, value).expect("batch put should be ok"); - } - if let Some(script) = previous_output.type_().to_opt() { - if scripts.contains(&(script.clone(), ScriptType::Type)) { - filter_matched = true; - // delete utxo - let key = Key::CellTypeScript( - &script, - generated_by_block_number, - generated_by_tx_index, - previous_output_index as OutputIndex, - ) - .into_vec(); - batch.delete(key).expect("batch delete should be ok"); - // insert tx history - let key = Key::TxTypeScript( - &script, - block_number, - tx_index as TxIndex, - input_index as CellIndex, - CellType::Input, - ) - .into_vec(); - let tx_hash = tx.calc_tx_hash(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx - let key = Key::TxHash(&tx_hash).into_vec(); - let value = - Value::Transaction(block_number, tx_index as TxIndex, &tx); - batch.put_kv(key, value).expect("batch put should be ok"); - } - } - } - } - } - - tx.raw() - .outputs() - .into_iter() - .enumerate() - .for_each(|(output_index, output)| { - let script = output.lock(); - if scripts.contains(&(script.clone(), ScriptType::Lock)) { - filter_matched = true; - let tx_hash = tx.calc_tx_hash(); - // insert utxo - let key = Key::CellLockScript( - &script, - block_number, - tx_index as TxIndex, - output_index as OutputIndex, - ) - .into_vec(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx history - let key = Key::TxLockScript( - &script, - block_number, - tx_index as TxIndex, - output_index as CellIndex, - CellType::Output, - ) - .into_vec(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx - let key = Key::TxHash(&tx_hash).into_vec(); - let value = Value::Transaction(block_number, tx_index as TxIndex, &tx); - batch.put_kv(key, value).expect("batch put should be ok"); - } - if let Some(script) = output.type_().to_opt() { - if scripts.contains(&(script.clone(), ScriptType::Type)) { - filter_matched = true; - let tx_hash = tx.calc_tx_hash(); - // insert utxo - let key = Key::CellTypeScript( - &script, - block_number, - tx_index as TxIndex, - output_index as OutputIndex, - ) - .into_vec(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx history - let key = Key::TxTypeScript( - &script, - block_number, - tx_index as TxIndex, - output_index as CellIndex, - CellType::Output, - ) - .into_vec(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx - let key = Key::TxHash(&tx_hash).into_vec(); - let value = Value::Transaction(block_number, tx_index as TxIndex, &tx); - batch.put_kv(key, value).expect("batch put should be ok"); - } - } - }); - - txs.insert(tx.calc_tx_hash(), (tx_index as u32, tx)); - } - if filter_matched { - let block_hash = block.calc_header_hash(); - let hwe = HeaderWithExtension { - header: block.header(), - extension: block.extension(), - }; - batch - .put(Key::BlockHash(&block_hash).into_vec(), hwe.to_vec()) - .expect("batch put should be ok"); - batch - .put( - Key::BlockNumber(block.header().raw().number().unpack()).into_vec(), - block_hash.as_slice(), - ) - .expect("batch put should be ok"); - } - batch.commit().expect("batch commit should be ok"); - } - fn get_transaction(&self, tx_hash: &Byte32) -> Option<(BlockNumber, TxIndex, Transaction)> { - self.get(Key::TxHash(tx_hash).into_vec()) - .map(|v| { - v.map(|v| { - ( - BlockNumber::from_be_bytes(v[0..8].try_into().expect("stored BlockNumber")), - TxIndex::from_be_bytes(v[8..12].try_into().expect("stored TxIndex")), - Transaction::from_slice(&v[12..]).expect("stored Transaction"), - ) - }) - }) - .expect("db get should be ok") - } - pub fn get_transaction_with_header(&self, tx_hash: &Byte32) -> Option<(Transaction, Header)> { - match self.get_transaction(tx_hash) { - Some((block_number, _tx_index, tx)) => { - let block_hash = Byte32::from_slice( - &self - .get(Key::BlockNumber(block_number).into_vec()) - .expect("db get should be ok") - .expect("stored block number / hash mapping"), - ) - .expect("stored block hash should be OK"); - - let header = Header::from_slice( - &self - .get(Key::BlockHash(&block_hash).into_vec()) - .expect("db get should be ok") - .expect("stored block hash / header mapping")[..Header::TOTAL_SIZE], - ) - .expect("stored header should be OK"); - Some((tx, header)) - } - None => None, - } - } - pub fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - if let Some((block_number, tx_index, tx)) = self.get_transaction(&out_point.tx_hash()) { - let block_hash = Byte32::from_slice( - &self - .get(Key::BlockNumber(block_number).into_vec()) - .expect("db get should be ok") - .expect("stored block number / hash mapping"), - ) - .expect("stored block hash should be OK"); - - let header = Header::from_slice( - &self - .get(Key::BlockHash(&block_hash).into_vec()) - .expect("db get should be ok") - .expect("stored block hash / header mapping")[..Header::TOTAL_SIZE], - ) - .expect("stored header should be OK") - .into_view(); - - let output_index = out_point.index().unpack(); - let tx = tx.into_view(); - if let Some(cell_output) = tx.outputs().get(output_index) { - let output_data = tx - .outputs_data() - .get(output_index) - .expect("output_data's index should be same as output") - .raw_data(); - let output_data_data_hash = CellOutput::calc_data_hash(&output_data); - let cell_meta = CellMeta { - out_point: out_point.clone(), - cell_output, - transaction_info: Some(TransactionInfo { - block_hash, - block_epoch: header.epoch(), - block_number, - index: tx_index as usize, - }), - data_bytes: output_data.len() as u64, - mem_cell_data: Some(output_data), - mem_cell_data_hash: Some(output_data_data_hash), - }; - return CellStatus::Live(cell_meta); - } - } - CellStatus::Unknown - } - pub fn get_header(&self, hash: &Byte32) -> Option { - self.get(Key::BlockHash(hash).into_vec()) - .map(|v| { - v.map(|v| { - Header::from_slice(&v[..Header::TOTAL_SIZE]) - .expect("stored Header") - .into_view() - }) - }) - .expect("db get should be ok") + fn get_cell_data_hash(&self, _out_point: &OutPoint) -> Option { + unreachable!() } } diff --git a/light-client-lib/src/storage/db/mod.rs b/light-client-lib/src/storage/db/mod.rs index fad06c7f..c1be22e6 100644 --- a/light-client-lib/src/storage/db/mod.rs +++ b/light-client-lib/src/storage/db/mod.rs @@ -1,9 +1,702 @@ -#[cfg(not(target_arch = "wasm32"))] -mod native; -#[cfg(not(target_arch = "wasm32"))] -pub use native::{Batch, Storage}; +#[cfg(all(not(target_arch = "wasm32"), feature = "rocksdb"))] +mod native_rocksdb; +use ckb_types::core::cell::CellStatus; +use ckb_types::packed::OutPoint; +#[cfg(all(not(target_arch = "wasm32"), feature = "rocksdb"))] +pub use native_rocksdb::{Batch, Storage}; + +#[cfg(all(not(target_arch = "wasm32"), feature = "rusqlite"))] +mod native_rusqlite; +#[cfg(all(not(target_arch = "wasm32"), feature = "rusqlite"))] +pub use native_rusqlite::{Batch, Storage, KV}; #[cfg(target_arch = "wasm32")] mod browser; #[cfg(target_arch = "wasm32")] -pub use browser::{Batch, CursorDirection, Storage}; +pub use browser::{Batch, Storage}; + +use crate::error::Result; +use ckb_types::prelude::Reader; +use ckb_types::prelude::{Builder, FromSliceShouldBeOk}; +use ckb_types::{ + core::{BlockNumber, HeaderView}, + packed::{self, Block, Byte32, Header, Script, Transaction}, + prelude::{IntoBlockView, Pack, PackVec, Unpack}, + utilities::{build_filter_data, calc_filter_hash}, +}; +use std::collections::{HashMap, HashSet}; + +use crate::storage::{ + CellIndex, CellType, CpIndex, HeaderWithExtension, Key, MatchedBlocks, OutputIndex, + ScriptStatus, ScriptType, SetScriptsCommand, TxIndex, Value, WrappedBlockView, + GENESIS_BLOCK_KEY, LAST_N_HEADERS_KEY, LAST_STATE_KEY, MATCHED_FILTER_BLOCKS_KEY, + MAX_CHECK_POINT_INDEX, MIN_FILTERED_BLOCK_NUMBER, +}; +use ckb_types::prelude::Entity; +use ckb_types::U256; + +#[derive(Clone, Copy)] +pub enum GeneralDirection { + Forward, + Reverse, +} + +/** + * These functions will be implemented in each database implementation + */ +pub trait StorageHighLevelOperations { + fn is_filter_scripts_empty(&self) -> bool; + fn get_filter_scripts(&self) -> Vec; + fn update_filter_scripts(&self, scripts: Vec, command: SetScriptsCommand); + fn update_min_filtered_block_number_by_scripts(&self); + // get scripts hash that should be filtered below the given block number + fn get_scripts_hash(&self, block_number: BlockNumber) -> Vec; + fn clear_matched_blocks(&self); + fn get_matched_blocks(&self, direction: GeneralDirection) -> Option; + fn get_earliest_matched_blocks(&self) -> Option; + fn get_latest_matched_blocks(&self) -> Option; + fn get_check_points(&self, start_index: CpIndex, limit: usize) -> Vec; + fn update_block_number(&self, block_number: BlockNumber); + /// Rollback filtered block data to specified block number + /// + /// N.B. The specified block will be removed. + fn rollback_to_block(&self, to_number: BlockNumber); + #[allow(clippy::type_complexity)] + fn collect_iterator( + &self, + start_key_bound: Vec, + order: GeneralDirection, + take_while: Box bool + Send + 'static>, + filter_map: Box Option> + Send + 'static>, + limit: usize, + skip: usize, + ) -> Vec<(Vec, Vec)>; + fn cell(&self, out_point: &OutPoint, eager_load: bool) -> CellStatus; + fn get>(&self, key: K) -> Result>>; + fn put(&self, key: K, value: V) -> Result<()> + where + K: AsRef<[u8]>, + V: AsRef<[u8]>; + fn delete>(&self, key: K) -> Result<()>; + fn get_header(&self, hash: &Byte32) -> Option; +} + +/** + * These functions can be automatically implemented based on StorageHighLevelOperations + */ +pub trait StorageGeneralOperations { + fn get_genesis_block(&self) -> Block; + fn update_last_state( + &self, + total_difficulty: &U256, + tip_header: &Header, + last_n_headers: &[HeaderView], + ); + fn update_last_n_headers(&self, headers: &[HeaderView]); + /// 0 all blocks downloaded and inserted into storage call this function. + fn remove_matched_blocks(&self, start_number: u64); + /// the matched blocks must not empty + fn add_matched_blocks( + &self, + start_number: u64, + blocks_count: u64, + // (block-hash, proved) + matched_blocks: Vec<(Byte32, bool)>, + ); + fn cleanup_invalid_matched_blocks(&self); + + fn get_tip_header(&self) -> Header; + fn update_min_filtered_block_number(&self, block_number: BlockNumber); + fn get_last_check_point(&self) -> (CpIndex, Byte32); + fn update_max_check_point_index(&self, index: CpIndex); + + fn get_transaction(&self, tx_hash: &Byte32) -> Option<(BlockNumber, TxIndex, Transaction)>; + fn get_transaction_with_header(&self, tx_hash: &Byte32) -> Option<(Transaction, Header)>; +} + +/** + * These functions will call `Storage::get_pinned`, which has different return value type between RocksDB and other implementations + * This trait will be implemented in this module file. + */ +pub trait StorageGetPinnedRelatedOperations { + fn get_last_state(&self) -> (U256, Header); + fn get_last_n_headers(&self) -> Vec<(u64, Byte32)>; + fn get_min_filtered_block_number(&self) -> BlockNumber; + fn get_max_check_point_index(&self) -> CpIndex; +} + +/** + * These functions will use `Batch`, which has different implementations among database implementations. + * This trait will be implemented in this module file. + */ +pub trait StorageBatchRelatedOperations { + fn init_genesis_block(&self, block: Block); + fn add_fetched_header(&self, hwe: &HeaderWithExtension); + fn add_fetched_tx(&self, tx: &Transaction, hwe: &HeaderWithExtension); + fn update_check_points(&self, start_index: CpIndex, check_points: &[Byte32]); + fn filter_block(&self, block: Block); +} + +impl StorageGetPinnedRelatedOperations for Storage { + fn get_max_check_point_index(&self) -> CpIndex { + let key = Key::Meta(MAX_CHECK_POINT_INDEX).into_vec(); + self.get_pinned(&key) + .expect("db get max check point index should be ok") + .map(|data| CpIndex::from_be_bytes(AsRef::<[u8]>::as_ref(&data).try_into().unwrap())) + .expect("db get max check point index should be ok") + } + fn get_min_filtered_block_number(&self) -> BlockNumber { + let key = Key::Meta(MIN_FILTERED_BLOCK_NUMBER).into_vec(); + self.get_pinned(&key) + .expect("db get min filtered block number should be ok") + .map(|data| u64::from_le_bytes(AsRef::<[u8]>::as_ref(&data).try_into().unwrap())) + .unwrap_or_default() + } + + fn get_last_n_headers(&self) -> Vec<(u64, Byte32)> { + let key = Key::Meta(LAST_N_HEADERS_KEY).into_vec(); + self.get_pinned(&key) + .expect("db get last n headers should be ok") + .map(|data| { + assert!(AsRef::<[u8]>::as_ref(&data).len().is_multiple_of(40)); + let mut headers = Vec::with_capacity(&AsRef::<[u8]>::as_ref(&data).len() / 40); + for part in AsRef::<[u8]>::as_ref(&data).chunks(40) { + let number = u64::from_le_bytes(part[0..8].try_into().unwrap()); + let hash = Byte32::from_slice(&part[8..]).expect("byte32 block hash"); + headers.push((number, hash)); + } + headers + }) + .expect("last n headers should be inited") + } + + fn get_last_state(&self) -> (U256, Header) { + let key = Key::Meta(LAST_STATE_KEY).into_vec(); + self.get_pinned(&key) + .expect("db get last state should be ok") + .map(|data| { + let mut total_difficulty_bytes = [0u8; 32]; + total_difficulty_bytes.copy_from_slice(&AsRef::<[u8]>::as_ref(&data)[0..32]); + let total_difficulty = U256::from_le_bytes(&total_difficulty_bytes); + let header = packed::HeaderReader::from_slice_should_be_ok( + &AsRef::<[u8]>::as_ref(&data)[32..], + ) + .to_entity(); + (total_difficulty, header) + }) + .expect("tip header should be inited") + } +} + +impl StorageBatchRelatedOperations for Storage { + fn init_genesis_block(&self, block: Block) { + let genesis_hash = block.calc_header_hash(); + let genesis_block_key = Key::Meta(GENESIS_BLOCK_KEY).into_vec(); + if let Some(stored_genesis_hash) = self + .get(genesis_block_key.as_slice()) + .expect("get genesis block") + .map(|v| v[0..32].to_vec()) + { + if genesis_hash.as_slice() != stored_genesis_hash.as_slice() { + panic!( + "genesis hash mismatch: stored={:#?}, new={}", + stored_genesis_hash, genesis_hash + ); + } + } else { + let mut batch = self.batch(); + let block_hash = block.calc_header_hash(); + batch + .put_kv(Key::Meta(LAST_STATE_KEY), block.header().as_slice()) + .expect("batch put should be ok"); + batch + .put_kv(Key::BlockHash(&block_hash), block.header().as_slice()) + .expect("batch put should be ok"); + batch + .put_kv(Key::BlockNumber(0), block_hash.as_slice()) + .expect("batch put should be ok"); + let mut genesis_hash_and_txs_hash = genesis_hash.as_slice().to_vec(); + block + .transactions() + .into_iter() + .enumerate() + .for_each(|(tx_index, tx)| { + let tx_hash = tx.calc_tx_hash(); + genesis_hash_and_txs_hash.extend_from_slice(tx_hash.as_slice()); + let key = Key::TxHash(&tx_hash).into_vec(); + let value = Value::Transaction(0, tx_index as TxIndex, &tx); + batch.put_kv(key, value).expect("batch put should be ok"); + }); + batch + .put_kv(genesis_block_key, genesis_hash_and_txs_hash.as_slice()) + .expect("batch put should be ok"); + batch.commit().expect("batch commit should be ok"); + self.update_last_state(&U256::zero(), &block.header(), &[]); + let genesis_block_filter_hash: Byte32 = { + let block_view = block.into_view(); + let provider = WrappedBlockView::new(&block_view); + let parent_block_filter_hash = Byte32::zero(); + let (genesis_block_filter_vec, missing_out_points) = + build_filter_data(provider, &block_view.transactions()); + if !missing_out_points.is_empty() { + panic!("Genesis block shouldn't missing any out points."); + } + let genesis_block_filter_data = genesis_block_filter_vec.pack(); + calc_filter_hash(&parent_block_filter_hash, &genesis_block_filter_data).pack() + }; + self.update_max_check_point_index(0); + self.update_check_points(0, &[genesis_block_filter_hash]); + self.update_min_filtered_block_number(0); + } + } + fn add_fetched_header(&self, hwe: &HeaderWithExtension) { + let mut batch = self.batch(); + let block_hash = hwe.header.calc_header_hash(); + batch + .put(Key::BlockHash(&block_hash).into_vec(), hwe.to_vec()) + .expect("batch put should be ok"); + batch + .put( + Key::BlockNumber(hwe.header.raw().number().unpack()).into_vec(), + block_hash.as_slice(), + ) + .expect("batch put should be ok"); + batch.commit().expect("batch commit should be ok"); + } + + fn add_fetched_tx(&self, tx: &Transaction, hwe: &HeaderWithExtension) { + let mut batch = self.batch(); + let block_hash = hwe.header.calc_header_hash(); + let block_number: u64 = hwe.header.raw().number().unpack(); + batch + .put(Key::BlockHash(&block_hash).into_vec(), hwe.to_vec()) + .expect("batch put should be ok"); + batch + .put( + Key::BlockNumber(block_number).into_vec(), + block_hash.as_slice(), + ) + .expect("batch put should be ok"); + let tx_hash = tx.calc_tx_hash(); + let tx_index = u32::MAX; + let key = Key::TxHash(&tx_hash).into_vec(); + let value = Value::Transaction(block_number, tx_index as TxIndex, tx); + batch.put_kv(key, value).expect("batch put should be ok"); + batch.commit().expect("batch commit should be ok"); + } + fn update_check_points(&self, start_index: CpIndex, check_points: &[Byte32]) { + let mut index = start_index; + let mut batch = self.batch(); + for cp in check_points { + let key = Key::CheckPointIndex(index).into_vec(); + let value = Value::BlockFilterHash(cp); + batch.put_kv(key, value).expect("batch put should be ok"); + index += 1; + } + batch.commit().expect("batch commit should be ok"); + } + + fn filter_block(&self, block: Block) { + let scripts: HashSet<(Script, ScriptType)> = self + .get_filter_scripts() + .into_iter() + .map(|ss| (ss.script, ss.script_type)) + .collect(); + let block_number: BlockNumber = block.header().raw().number().unpack(); + let mut filter_matched = false; + let mut batch = self.batch(); + let mut txs: HashMap = HashMap::new(); + block + .transactions() + .into_iter() + .enumerate() + .for_each(|(tx_index, tx)| { + tx.raw() + .inputs() + .into_iter() + .enumerate() + .for_each(|(input_index, input)| { + let previous_tx_hash = input.previous_output().tx_hash(); + if let Some(( + generated_by_block_number, + generated_by_tx_index, + previous_tx, + )) = self.get_transaction(&previous_tx_hash).or(txs + .get(&previous_tx_hash) + .map(|(tx_index, tx)| (block_number, *tx_index, tx.clone()))) + { + let previous_output_index = input.previous_output().index().unpack(); + if let Some(previous_output) = + previous_tx.raw().outputs().get(previous_output_index) + { + let script = previous_output.lock(); + if scripts.contains(&(script.clone(), ScriptType::Lock)) { + filter_matched = true; + // delete utxo + let key = Key::CellLockScript( + &script, + generated_by_block_number, + generated_by_tx_index, + previous_output_index as OutputIndex, + ) + .into_vec(); + batch.delete(key).expect("batch delete should be ok"); + // insert tx history + let key = Key::TxLockScript( + &script, + block_number, + tx_index as TxIndex, + input_index as CellIndex, + CellType::Input, + ) + .into_vec(); + let tx_hash = tx.calc_tx_hash(); + batch + .put(key, tx_hash.as_slice()) + .expect("batch put should be ok"); + // insert tx + let key = Key::TxHash(&tx_hash).into_vec(); + let value = + Value::Transaction(block_number, tx_index as TxIndex, &tx); + batch.put_kv(key, value).expect("batch put should be ok"); + } + if let Some(script) = previous_output.type_().to_opt() { + if scripts.contains(&(script.clone(), ScriptType::Type)) { + filter_matched = true; + // delete utxo + let key = Key::CellTypeScript( + &script, + generated_by_block_number, + generated_by_tx_index, + previous_output_index as OutputIndex, + ) + .into_vec(); + batch.delete(key).expect("batch delete should be ok"); + // insert tx history + let key = Key::TxTypeScript( + &script, + block_number, + tx_index as TxIndex, + input_index as CellIndex, + CellType::Input, + ) + .into_vec(); + let tx_hash = tx.calc_tx_hash(); + batch + .put(key, tx_hash.as_slice()) + .expect("batch put should be ok"); + // insert tx + let key = Key::TxHash(&tx_hash).into_vec(); + let value = Value::Transaction( + block_number, + tx_index as TxIndex, + &tx, + ); + batch.put_kv(key, value).expect("batch put should be ok"); + } + } + } + } + }); + + tx.raw() + .outputs() + .into_iter() + .enumerate() + .for_each(|(output_index, output)| { + let script = output.lock(); + if scripts.contains(&(script.clone(), ScriptType::Lock)) { + filter_matched = true; + let tx_hash = tx.calc_tx_hash(); + // insert utxo + let key = Key::CellLockScript( + &script, + block_number, + tx_index as TxIndex, + output_index as OutputIndex, + ) + .into_vec(); + batch + .put(key, tx_hash.as_slice()) + .expect("batch put should be ok"); + // insert tx history + let key = Key::TxLockScript( + &script, + block_number, + tx_index as TxIndex, + output_index as CellIndex, + CellType::Output, + ) + .into_vec(); + batch + .put(key, tx_hash.as_slice()) + .expect("batch put should be ok"); + // insert tx + let key = Key::TxHash(&tx_hash).into_vec(); + let value = Value::Transaction(block_number, tx_index as TxIndex, &tx); + batch.put_kv(key, value).expect("batch put should be ok"); + } + if let Some(script) = output.type_().to_opt() { + if scripts.contains(&(script.clone(), ScriptType::Type)) { + filter_matched = true; + let tx_hash = tx.calc_tx_hash(); + // insert utxo + let key = Key::CellTypeScript( + &script, + block_number, + tx_index as TxIndex, + output_index as OutputIndex, + ) + .into_vec(); + batch + .put(key, tx_hash.as_slice()) + .expect("batch put should be ok"); + // insert tx history + let key = Key::TxTypeScript( + &script, + block_number, + tx_index as TxIndex, + output_index as CellIndex, + CellType::Output, + ) + .into_vec(); + batch + .put(key, tx_hash.as_slice()) + .expect("batch put should be ok"); + // insert tx + let key = Key::TxHash(&tx_hash).into_vec(); + let value = + Value::Transaction(block_number, tx_index as TxIndex, &tx); + batch.put_kv(key, value).expect("batch put should be ok"); + } + } + }); + + txs.insert(tx.calc_tx_hash(), (tx_index as u32, tx)); + }); + if filter_matched { + let block_hash = block.calc_header_hash(); + let hwe = HeaderWithExtension { + header: block.header(), + extension: block.extension(), + }; + batch + .put(Key::BlockHash(&block_hash).into_vec(), hwe.to_vec()) + .expect("batch put should be ok"); + batch + .put( + Key::BlockNumber(block.header().raw().number().unpack()).into_vec(), + block_hash.as_slice(), + ) + .expect("batch put should be ok"); + } + batch.commit().expect("batch commit should be ok"); + } +} + +impl StorageGeneralOperations + for T +{ + fn get_genesis_block(&self) -> Block { + let genesis_hash_and_txs_hash = self + .get(Key::Meta(GENESIS_BLOCK_KEY).into_vec()) + .expect("get genesis block") + .expect("inited storage"); + let genesis_hash = Byte32::from_slice(&genesis_hash_and_txs_hash[0..32]) + .expect("stored genesis block hash"); + let genesis_header = Header::from_slice( + &self + .get(Key::BlockHash(&genesis_hash).into_vec()) + .expect("db get should be ok") + .expect("stored block hash / header mapping"), + ) + .expect("stored header should be OK"); + + let transactions: Vec = genesis_hash_and_txs_hash[32..] + .chunks_exact(32) + .map(|tx_hash| { + Transaction::from_slice( + &self + .get( + Key::TxHash( + &Byte32::from_slice(tx_hash).expect("stored genesis block tx hash"), + ) + .into_vec(), + ) + .expect("db get should be ok") + .expect("stored genesis block tx")[12..], + ) + .expect("stored Transaction") + }) + .collect(); + + Block::new_builder() + .header(genesis_header) + .transactions(transactions.pack()) + .build() + } + + fn update_last_state( + &self, + total_difficulty: &U256, + tip_header: &Header, + last_n_headers: &[HeaderView], + ) { + let key = Key::Meta(LAST_STATE_KEY).into_vec(); + let mut value = total_difficulty.to_le_bytes().to_vec(); + value.extend(tip_header.as_slice()); + self.put(key, &value) + .expect("db put last state should be ok"); + self.update_last_n_headers(last_n_headers); + } + + fn update_last_n_headers(&self, headers: &[HeaderView]) { + let key = Key::Meta(LAST_N_HEADERS_KEY).into_vec(); + let mut value: Vec = Vec::with_capacity(headers.len() * 40); + for header in headers { + value.extend(header.number().to_le_bytes()); + value.extend(header.hash().as_slice()); + } + self.put(key, &value) + .expect("db put last n headers should be ok"); + } + + /// 0 all blocks downloaded and inserted into storage call this function. + fn remove_matched_blocks(&self, start_number: u64) { + let mut key = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); + key.extend(start_number.to_be_bytes()); + self.delete(&key).expect("delete matched blocks"); + } + + /// the matched blocks must not empty + fn add_matched_blocks( + &self, + start_number: u64, + blocks_count: u64, + // (block-hash, proved) + matched_blocks: Vec<(Byte32, bool)>, + ) { + assert!(!matched_blocks.is_empty()); + let mut key = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); + key.extend(start_number.to_be_bytes()); + + let mut value = blocks_count.to_le_bytes().to_vec(); + for (block_hash, proved) in matched_blocks { + value.extend(block_hash.as_slice()); + value.push(u8::from(proved)); + } + self.put(key, &value) + .expect("db put matched blocks should be ok"); + } + + fn cleanup_invalid_matched_blocks(&self) { + use ckb_types::prelude::Unpack; + use log::warn; + + let tip_number: u64 = self.get_tip_header().raw().number().unpack(); + + loop { + let entry = self.get_earliest_matched_blocks(); + if entry.is_none() { + break; + } + + let matched_blocks = entry.unwrap(); + let start_number = matched_blocks.start_number; + let blocks_count = matched_blocks.blocks_count; + let mut should_remove = false; + + for block in &matched_blocks.blocks { + if let Some(header) = self.get_header(&block.hash) { + let stored_number: u64 = header.number(); + if stored_number < start_number || stored_number >= start_number + blocks_count + { + warn!( + "Invalid matched block {:#x} at number {} outside expected range [{}, {}), removing entry at start_number={}", + block.hash, stored_number, start_number, start_number + blocks_count, start_number + ); + should_remove = true; + break; + } + } else if start_number + 1000 < tip_number { + warn!( + "Matched block {:#x} not found in storage, entry at start_number={} is {} blocks behind tip, removing", + block.hash, start_number, tip_number - start_number + ); + should_remove = true; + break; + } + } + + if should_remove { + self.remove_matched_blocks(start_number); + } else { + break; + } + } + } + + fn get_tip_header(&self) -> Header { + self.get_last_state().1 + } + + fn update_min_filtered_block_number(&self, block_number: BlockNumber) { + let key = Key::Meta(MIN_FILTERED_BLOCK_NUMBER).into_vec(); + let value = block_number.to_le_bytes(); + self.put(key, value) + .expect("db put min filtered block number should be ok"); + } + + fn get_last_check_point(&self) -> (CpIndex, Byte32) { + let index = self.get_max_check_point_index(); + let hash = self + .get_check_points(index, 1) + .first() + .cloned() + .expect("db get last check point should be ok"); + (index, hash) + } + + fn update_max_check_point_index(&self, index: CpIndex) { + let key = Key::Meta(MAX_CHECK_POINT_INDEX).into_vec(); + let value = index.to_be_bytes(); + self.put(key, value) + .expect("db put max check point index should be ok"); + } + + fn get_transaction(&self, tx_hash: &Byte32) -> Option<(BlockNumber, TxIndex, Transaction)> { + self.get(Key::TxHash(tx_hash).into_vec()) + .map(|v| { + v.map(|v| { + ( + BlockNumber::from_be_bytes(v[0..8].try_into().expect("stored BlockNumber")), + TxIndex::from_be_bytes(v[8..12].try_into().expect("stored TxIndex")), + Transaction::from_slice(&v[12..]).expect("stored Transaction"), + ) + }) + }) + .expect("db get should be ok") + } + + fn get_transaction_with_header(&self, tx_hash: &Byte32) -> Option<(Transaction, Header)> { + self.get_transaction(tx_hash) + .map(|(block_number, _tx_index, tx)| { + let block_hash = Byte32::from_slice( + &self + .get(Key::BlockNumber(block_number).into_vec()) + .expect("db get should be ok") + .expect("stored block number / hash mapping"), + ) + .expect("stored block hash should be OK"); + + let header = Header::from_slice( + &self + .get(Key::BlockHash(&block_hash).into_vec()) + .expect("db get should be ok") + .expect("stored block hash / header mapping")[..Header::TOTAL_SIZE], + ) + .expect("stored header should be OK"); + (tx, header) + }) + } +} diff --git a/light-client-lib/src/storage/db/native.rs b/light-client-lib/src/storage/db/native.rs deleted file mode 100644 index 5e0d59e1..00000000 --- a/light-client-lib/src/storage/db/native.rs +++ /dev/null @@ -1,1172 +0,0 @@ -use super::super::{ - extract_raw_data, parse_matched_blocks, BlockNumber, Byte32, CellIndex, CellType, CpIndex, - HeaderWithExtension, Key, KeyPrefix, MatchedBlock, MatchedBlocks, OutputIndex, Script, - ScriptStatus, ScriptType, SetScriptsCommand, TxIndex, Value, WrappedBlockView, - FILTER_SCRIPTS_KEY, GENESIS_BLOCK_KEY, LAST_N_HEADERS_KEY, LAST_STATE_KEY, - MATCHED_FILTER_BLOCKS_KEY, MAX_CHECK_POINT_INDEX, MIN_FILTERED_BLOCK_NUMBER, -}; -use crate::error::Result; -use ckb_traits::{CellDataProvider, HeaderProvider}; -use ckb_types::{ - bytes::Bytes, - core::{ - cell::{CellMeta, CellProvider, CellStatus}, - HeaderView, TransactionInfo, - }, - packed::{self, Block, CellOutput, Header, OutPoint, Transaction}, - prelude::*, - utilities::{build_filter_data, calc_filter_hash}, - U256, -}; -use rocksdb::{ - ops::{Delete, GetPinned}, - prelude::{Get, Iterate, Open, Put, WriteOps}, - DBPinnableSlice, Direction, IteratorMode, Options, Snapshot, WriteBatch, DB, -}; -use std::{ - collections::{HashMap, HashSet}, - path::Path, - sync::Arc, -}; - -pub struct PinnedSlice<'a> { - inner: rocksdb::DBPinnableSlice<'a>, -} - -impl<'a> From> for PinnedSlice<'a> { - fn from(value: DBPinnableSlice<'a>) -> Self { - PinnedSlice { inner: value } - } -} - -impl AsRef<[u8]> for PinnedSlice<'_> { - fn as_ref(&self) -> &[u8] { - self.inner.as_ref() - } -} - -#[derive(Clone)] -pub struct Storage { - pub(crate) db: Arc, -} - -impl Storage { - pub fn new>(path: P) -> Self { - let mut opts = Options::default(); - opts.create_if_missing(true); - opts.set_max_total_wal_size(128 * 1024 * 1024); - opts.set_write_buffer_size(128 * 1024 * 1024); - opts.set_max_write_buffer_number(2); - let db = Arc::new(DB::open(&opts, path).expect("Failed to open rocksdb")); - Self { db } - } - - fn batch(&self) -> Batch { - Batch { - db: Arc::clone(&self.db), - wb: WriteBatch::default(), - } - } - - pub fn snapshot(&self) -> Snapshot<'_> { - self.db.snapshot() - } - - fn put(&self, key: K, value: V) -> Result<()> - where - K: AsRef<[u8]>, - V: AsRef<[u8]>, - { - self.db.put(key, value).map_err(Into::into) - } - - pub fn get>(&self, key: K) -> Result>> { - self.db - .get(key.as_ref()) - .map(|v| v.map(|vi| vi.to_vec())) - .map_err(Into::into) - } - - fn get_pinned(&self, key: K) -> Result>> - where - K: AsRef<[u8]>, - { - self.db - .get_pinned(key) - .map_err(Into::into) - .map(|a| a.map(Into::into)) - } - - fn delete>(&self, key: K) -> Result<()> { - self.db.delete(key).map_err(Into::into) - } - - pub fn is_filter_scripts_empty(&self) -> bool { - let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); - let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); - self.db - .iterator(mode) - .take_while(|(key, _value)| key.starts_with(&key_prefix)) - .next() - .is_none() - } - - pub fn get_filter_scripts(&self) -> Vec { - let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); - let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); - - self.db - .iterator(mode) - .take_while(|(key, _value)| key.starts_with(&key_prefix)) - .map(|(key, value)| { - let script = Script::from_slice(&key[key_prefix.len()..key.len() - 1]) - .expect("stored Script"); - let script_type = match key[key.len() - 1] { - 0 => ScriptType::Lock, - 1 => ScriptType::Type, - _ => panic!("invalid script type"), - }; - let block_number = BlockNumber::from_be_bytes( - value.as_ref().try_into().expect("stored BlockNumber"), - ); - ScriptStatus { - script, - script_type, - block_number, - } - }) - .collect() - } - - pub fn update_filter_scripts(&self, scripts: Vec, command: SetScriptsCommand) { - let mut should_filter_genesis_block = false; - let mut batch = self.batch(); - let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); - - match command { - SetScriptsCommand::All => { - should_filter_genesis_block = scripts.iter().any(|ss| ss.block_number == 0); - let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); - - self.db - .iterator(mode) - .take_while(|(key, _value)| key.starts_with(&key_prefix)) - .for_each(|(key, _value)| { - batch.delete(key).expect("batch delete should be ok"); - }); - - for ss in scripts { - let key = [ - key_prefix.as_ref(), - ss.script.as_slice(), - match ss.script_type { - ScriptType::Lock => &[0], - ScriptType::Type => &[1], - }, - ] - .concat(); - batch - .put(key, ss.block_number.to_be_bytes()) - .expect("batch put should be ok"); - } - } - SetScriptsCommand::Partial => { - if scripts.is_empty() { - return; - } - let min_script_block_number = scripts.iter().map(|ss| ss.block_number).min(); - should_filter_genesis_block = min_script_block_number == Some(0); - - for ss in scripts { - let key = [ - key_prefix.as_ref(), - ss.script.as_slice(), - match ss.script_type { - ScriptType::Lock => &[0], - ScriptType::Type => &[1], - }, - ] - .concat(); - batch - .put(key, ss.block_number.to_be_bytes()) - .expect("batch put should be ok"); - } - } - SetScriptsCommand::Delete => { - if scripts.is_empty() { - return; - } - - for ss in scripts { - let key = [ - key_prefix.as_ref(), - ss.script.as_slice(), - match ss.script_type { - ScriptType::Lock => &[0], - ScriptType::Type => &[1], - }, - ] - .concat(); - batch.delete(key).expect("batch delete should be ok"); - } - } - } - - batch.commit().expect("batch commit should be ok"); - - self.update_min_filtered_block_number_by_scripts(); - self.clear_matched_blocks(); - - if should_filter_genesis_block { - let block = self.get_genesis_block(); - self.filter_block(block); - } - } - - fn update_min_filtered_block_number_by_scripts(&self) { - let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); - let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); - - let min_block_number = self - .db - .iterator(mode) - .take_while(|(key, _value)| key.starts_with(&key_prefix)) - .map(|(_key, value)| { - BlockNumber::from_be_bytes(value.as_ref().try_into().expect("stored BlockNumber")) - }) - .min(); - - if let Some(n) = min_block_number { - self.update_min_filtered_block_number(n); - } - } - - // get scripts hash that should be filtered below the given block number - pub fn get_scripts_hash(&self, block_number: BlockNumber) -> Vec { - let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); - let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); - - self.db - .iterator(mode) - .take_while(|(key, _value)| key.starts_with(&key_prefix)) - .filter_map(|(key, value)| { - let stored_block_number = BlockNumber::from_be_bytes( - value.as_ref().try_into().expect("stored BlockNumber"), - ); - if stored_block_number < block_number { - let script = Script::from_slice(&key[key_prefix.len()..key.len() - 1]) - .expect("stored Script"); - Some(script.calc_script_hash()) - } else { - None - } - }) - .collect() - } - - fn clear_matched_blocks(&self) { - let key_prefix = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); - let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); - let mut batch = self.batch(); - for (key, _) in self - .db - .iterator(mode) - .take_while(|(key, _value)| key.starts_with(&key_prefix)) - { - batch.delete(key).expect("batch delete should be ok"); - } - batch.commit().expect("batch commit should be ok"); - } - - fn get_matched_blocks(&self, direction: Direction) -> Option { - let key_prefix = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); - let iter_from = match direction { - Direction::Forward => key_prefix.clone(), - Direction::Reverse => { - let mut key = key_prefix.clone(); - key.extend(u64::MAX.to_be_bytes()); - key - } - }; - let mode = IteratorMode::From(iter_from.as_ref(), direction); - self.db - .iterator(mode) - .take_while(|(key, _value)| key.starts_with(&key_prefix)) - .map(|(key, value)| { - let mut u64_bytes = [0u8; 8]; - u64_bytes.copy_from_slice(&key[key_prefix.len()..]); - let start_number = u64::from_be_bytes(u64_bytes); - let (blocks_count, raw_blocks) = parse_matched_blocks(&value); - let blocks = raw_blocks - .into_iter() - .map(|(hash, proved)| MatchedBlock { hash, proved }) - .collect(); - MatchedBlocks { - start_number, - blocks_count, - blocks, - } - }) - .next() - } - - pub fn get_earliest_matched_blocks(&self) -> Option { - self.get_matched_blocks(Direction::Forward) - } - - pub fn get_latest_matched_blocks(&self) -> Option { - self.get_matched_blocks(Direction::Reverse) - } - - pub fn get_check_points(&self, start_index: CpIndex, limit: usize) -> Vec { - let start_key = Key::CheckPointIndex(start_index).into_vec(); - let key_prefix = [KeyPrefix::CheckPointIndex as u8]; - let mode = IteratorMode::From(start_key.as_ref(), Direction::Forward); - self.db - .iterator(mode) - .take_while(|(key, _value)| key.starts_with(&key_prefix)) - .take(limit) - .map(|(_key, value)| Byte32::from_slice(&value).expect("stored block filter hash")) - .collect() - } - - pub fn update_block_number(&self, block_number: BlockNumber) { - let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); - let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); - - let mut batch = self.batch(); - self.db - .iterator(mode) - .take_while(|(key, _value)| key.starts_with(&key_prefix)) - .for_each(|(key, value)| { - let stored_block_number = BlockNumber::from_be_bytes( - value.as_ref().try_into().expect("stored BlockNumber"), - ); - if stored_block_number < block_number { - batch - .put(key, block_number.to_be_bytes()) - .expect("batch put should be ok") - } - }); - batch.commit().expect("batch commit should be ok"); - } - - /// Rollback filtered block data to specified block number - /// - /// N.B. The specified block will be removed. - pub fn rollback_to_block(&self, to_number: BlockNumber) { - let scripts = self.get_filter_scripts(); - let mut batch = self.batch(); - - for ss in scripts { - if ss.block_number >= to_number { - let script = ss.script; - let mut key_prefix = vec![match ss.script_type { - ScriptType::Lock => KeyPrefix::TxLockScript as u8, - ScriptType::Type => KeyPrefix::TxTypeScript as u8, - }]; - key_prefix.extend_from_slice(&extract_raw_data(&script)); - let mut start_key = key_prefix.clone(); - start_key.extend_from_slice(BlockNumber::MAX.to_be_bytes().as_ref()); - let mode = IteratorMode::From(start_key.as_ref(), Direction::Reverse); - let key_prefix_len = key_prefix.len(); - - self.db - .iterator(mode) - .take_while(|(key, _value)| { - key.starts_with(&key_prefix) - && BlockNumber::from_be_bytes( - key[key_prefix_len..key_prefix_len + 8] - .try_into() - .expect("stored BlockNumber"), - ) >= to_number - }) - .for_each(|(key, value)| { - let block_number = BlockNumber::from_be_bytes( - key[key_prefix_len..key_prefix_len + 8] - .try_into() - .expect("stored BlockNumber"), - ); - log::debug!("rollback {}", block_number); - let tx_index = TxIndex::from_be_bytes( - key[key_prefix_len + 8..key_prefix_len + 12] - .try_into() - .expect("stored TxIndex"), - ); - let cell_index = CellIndex::from_be_bytes( - key[key_prefix_len + 12..key_prefix_len + 16] - .try_into() - .expect("stored CellIndex"), - ); - let tx_hash = - packed::Byte32Reader::from_slice_should_be_ok(&value).to_entity(); - if key[key_prefix_len + 16] == 0 { - let (_, _, tx) = self - .get_transaction(&tx_hash) - .expect("stored transaction history"); - let input = tx.raw().inputs().get(cell_index as usize).unwrap(); - if let Some(( - generated_by_block_number, - generated_by_tx_index, - _previous_tx, - )) = self.get_transaction(&input.previous_output().tx_hash()) - { - let key = match ss.script_type { - ScriptType::Lock => Key::CellLockScript( - &script, - generated_by_block_number, - generated_by_tx_index, - input.previous_output().index().unpack(), - ), - ScriptType::Type => Key::CellTypeScript( - &script, - generated_by_block_number, - generated_by_tx_index, - input.previous_output().index().unpack(), - ), - }; - batch - .put_kv(key, input.previous_output().tx_hash().as_slice()) - .expect("batch put should be ok"); - }; - // delete tx history - let key = match ss.script_type { - ScriptType::Lock => Key::TxLockScript( - &script, - block_number, - tx_index, - cell_index, - CellType::Input, - ), - ScriptType::Type => Key::TxTypeScript( - &script, - block_number, - tx_index, - cell_index, - CellType::Input, - ), - } - .into_vec(); - batch.delete(key).expect("batch delete should be ok"); - } else { - // delete utxo - let key = match ss.script_type { - ScriptType::Lock => { - Key::CellLockScript(&script, block_number, tx_index, cell_index) - } - ScriptType::Type => { - Key::CellTypeScript(&script, block_number, tx_index, cell_index) - } - } - .into_vec(); - batch.delete(key).expect("batch delete should be ok"); - - // delete tx history - let key = match ss.script_type { - ScriptType::Lock => Key::TxLockScript( - &script, - block_number, - tx_index, - cell_index, - CellType::Output, - ), - ScriptType::Type => Key::TxTypeScript( - &script, - block_number, - tx_index, - cell_index, - CellType::Output, - ), - } - .into_vec(); - batch.delete(key).expect("batch delete should be ok"); - }; - }); - - // update script filter block number - { - let mut key = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); - key.extend_from_slice(script.as_slice()); - key.extend_from_slice(match ss.script_type { - ScriptType::Lock => &[0], - ScriptType::Type => &[1], - }); - let value = to_number.to_be_bytes().to_vec(); - batch.put(key, value).expect("batch put should be ok"); - } - } - } - - // we should also sync block filters again - if self.get_min_filtered_block_number() >= to_number { - batch - .put( - Key::Meta(MIN_FILTERED_BLOCK_NUMBER).into_vec(), - to_number.saturating_sub(1).to_le_bytes(), - ) - .expect("batch put should be ok"); - } - - batch.commit().expect("batch commit should be ok"); - } -} - -pub struct Batch { - db: Arc, - wb: WriteBatch, -} - -impl Batch { - fn put_kv>, V: Into>>(&mut self, key: K, value: V) -> Result<()> { - self.put(Into::>::into(key), Into::>::into(value)) - } - - fn put, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<()> { - self.wb.put(key, value)?; - Ok(()) - } - - fn delete>(&mut self, key: K) -> Result<()> { - self.wb.delete(key.as_ref())?; - Ok(()) - } - - fn commit(self) -> Result<()> { - self.db.write(&self.wb)?; - Ok(()) - } -} - -impl Storage { - pub fn init_genesis_block(&self, block: Block) { - let genesis_hash = block.calc_header_hash(); - let genesis_block_key = Key::Meta(GENESIS_BLOCK_KEY).into_vec(); - if let Some(stored_genesis_hash) = self - .get(genesis_block_key.as_slice()) - .expect("get genesis block") - .map(|v| v[0..32].to_vec()) - { - if genesis_hash.as_slice() != stored_genesis_hash.as_slice() { - panic!( - "genesis hash mismatch: stored={:#?}, new={}", - stored_genesis_hash, genesis_hash - ); - } - } else { - let mut batch = self.batch(); - let block_hash = block.calc_header_hash(); - batch - .put_kv(Key::Meta(LAST_STATE_KEY), block.header().as_slice()) - .expect("batch put should be ok"); - batch - .put_kv(Key::BlockHash(&block_hash), block.header().as_slice()) - .expect("batch put should be ok"); - batch - .put_kv(Key::BlockNumber(0), block_hash.as_slice()) - .expect("batch put should be ok"); - let mut genesis_hash_and_txs_hash = genesis_hash.as_slice().to_vec(); - block - .transactions() - .into_iter() - .enumerate() - .for_each(|(tx_index, tx)| { - let tx_hash = tx.calc_tx_hash(); - genesis_hash_and_txs_hash.extend_from_slice(tx_hash.as_slice()); - let key = Key::TxHash(&tx_hash).into_vec(); - let value = Value::Transaction(0, tx_index as TxIndex, &tx); - batch.put_kv(key, value).expect("batch put should be ok"); - }); - batch - .put_kv(genesis_block_key, genesis_hash_and_txs_hash.as_slice()) - .expect("batch put should be ok"); - batch.commit().expect("batch commit should be ok"); - self.update_last_state(&U256::zero(), &block.header(), &[]); - let genesis_block_filter_hash: Byte32 = { - let block_view = block.into_view(); - let provider = WrappedBlockView::new(&block_view); - let parent_block_filter_hash = Byte32::zero(); - let (genesis_block_filter_vec, missing_out_points) = - build_filter_data(provider, &block_view.transactions()); - if !missing_out_points.is_empty() { - panic!("Genesis block shouldn't missing any out points."); - } - let genesis_block_filter_data = genesis_block_filter_vec.pack(); - calc_filter_hash(&parent_block_filter_hash, &genesis_block_filter_data).pack() - }; - self.update_max_check_point_index(0); - self.update_check_points(0, &[genesis_block_filter_hash]); - self.update_min_filtered_block_number(0); - } - } - - pub fn get_genesis_block(&self) -> Block { - let genesis_hash_and_txs_hash = self - .get(Key::Meta(GENESIS_BLOCK_KEY).into_vec()) - .expect("get genesis block") - .expect("inited storage"); - let genesis_hash = Byte32::from_slice(&genesis_hash_and_txs_hash[0..32]) - .expect("stored genesis block hash"); - let genesis_header = Header::from_slice( - &self - .get(Key::BlockHash(&genesis_hash).into_vec()) - .expect("db get should be ok") - .expect("stored block hash / header mapping"), - ) - .expect("stored header should be OK"); - - let transactions: Vec = genesis_hash_and_txs_hash[32..] - .chunks_exact(32) - .map(|tx_hash| { - Transaction::from_slice( - &self - .get( - Key::TxHash( - &Byte32::from_slice(tx_hash).expect("stored genesis block tx hash"), - ) - .into_vec(), - ) - .expect("db get should be ok") - .expect("stored genesis block tx")[12..], - ) - .expect("stored Transaction") - }) - .collect(); - - Block::new_builder() - .header(genesis_header) - .transactions(transactions.pack()) - .build() - } - - pub fn update_last_state( - &self, - total_difficulty: &U256, - tip_header: &Header, - last_n_headers: &[HeaderView], - ) { - let key = Key::Meta(LAST_STATE_KEY).into_vec(); - let mut value = total_difficulty.to_le_bytes().to_vec(); - value.extend(tip_header.as_slice()); - self.put(key, &value) - .expect("db put last state should be ok"); - self.update_last_n_headers(last_n_headers); - } - - pub fn get_last_state(&self) -> (U256, Header) { - let key = Key::Meta(LAST_STATE_KEY).into_vec(); - self.get_pinned(&key) - .expect("db get last state should be ok") - .map(|data| { - let mut total_difficulty_bytes = [0u8; 32]; - total_difficulty_bytes.copy_from_slice(&data.as_ref()[0..32]); - let total_difficulty = U256::from_le_bytes(&total_difficulty_bytes); - let header = - packed::HeaderReader::from_slice_should_be_ok(&data.as_ref()[32..]).to_entity(); - (total_difficulty, header) - }) - .expect("tip header should be inited") - } - - fn update_last_n_headers(&self, headers: &[HeaderView]) { - let key = Key::Meta(LAST_N_HEADERS_KEY).into_vec(); - let mut value: Vec = Vec::with_capacity(headers.len() * 40); - for header in headers { - value.extend(header.number().to_le_bytes()); - value.extend(header.hash().as_slice()); - } - self.put(key, &value) - .expect("db put last n headers should be ok"); - } - - pub fn get_last_n_headers(&self) -> Vec<(u64, Byte32)> { - let key = Key::Meta(LAST_N_HEADERS_KEY).into_vec(); - self.get_pinned(&key) - .expect("db get last n headers should be ok") - .map(|data| { - assert!(data.as_ref().len() % 40 == 0); - let mut headers = Vec::with_capacity(data.as_ref().len() / 40); - for part in data.as_ref().chunks(40) { - let number = u64::from_le_bytes(part[0..8].try_into().unwrap()); - let hash = Byte32::from_slice(&part[8..]).expect("byte32 block hash"); - headers.push((number, hash)); - } - headers - }) - .expect("last n headers should be inited") - } - - /// 0 all blocks downloaded and inserted into storage call this function. - pub fn remove_matched_blocks(&self, start_number: u64) { - let mut key = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); - key.extend(start_number.to_be_bytes()); - self.delete(&key).expect("delete matched blocks"); - } - - /// the matched blocks must not empty - pub fn add_matched_blocks( - &self, - start_number: u64, - blocks_count: u64, - // (block-hash, proved) - matched_blocks: Vec<(Byte32, bool)>, - ) { - assert!(!matched_blocks.is_empty()); - let mut key = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); - key.extend(start_number.to_be_bytes()); - - let mut value = blocks_count.to_le_bytes().to_vec(); - for (block_hash, proved) in matched_blocks { - value.extend(block_hash.as_slice()); - value.push(u8::from(proved)); - } - self.put(key, &value) - .expect("db put matched blocks should be ok"); - } - - pub fn cleanup_invalid_matched_blocks(&self) { - use ckb_types::prelude::Unpack; - use log::warn; - - let tip_number: u64 = self.get_tip_header().raw().number().unpack(); - - loop { - let entry = self.get_earliest_matched_blocks(); - if entry.is_none() { - break; - } - - let matched_blocks = entry.unwrap(); - let start_number = matched_blocks.start_number; - let blocks_count = matched_blocks.blocks_count; - let mut should_remove = false; - - for block in &matched_blocks.blocks { - if let Some(header) = self.get_header(&block.hash) { - let stored_number: u64 = header.number(); - if stored_number < start_number || stored_number >= start_number + blocks_count - { - warn!( - "Invalid matched block {:#x} at number {} outside expected range [{}, {}), removing entry at start_number={}", - block.hash, stored_number, start_number, start_number + blocks_count, start_number - ); - should_remove = true; - break; - } - } else if start_number + 1000 < tip_number { - warn!( - "Matched block {:#x} not found in storage, entry at start_number={} is {} blocks behind tip, removing", - block.hash, start_number, tip_number - start_number - ); - should_remove = true; - break; - } - } - - if should_remove { - self.remove_matched_blocks(start_number); - } else { - break; - } - } - } - - pub fn add_fetched_header(&self, hwe: &HeaderWithExtension) { - let mut batch = self.batch(); - let block_hash = hwe.header.calc_header_hash(); - batch - .put(Key::BlockHash(&block_hash).into_vec(), hwe.to_vec()) - .expect("batch put should be ok"); - batch - .put( - Key::BlockNumber(hwe.header.raw().number().unpack()).into_vec(), - block_hash.as_slice(), - ) - .expect("batch put should be ok"); - batch.commit().expect("batch commit should be ok"); - } - - pub fn add_fetched_tx(&self, tx: &Transaction, hwe: &HeaderWithExtension) { - let mut batch = self.batch(); - let block_hash = hwe.header.calc_header_hash(); - let block_number: u64 = hwe.header.raw().number().unpack(); - batch - .put(Key::BlockHash(&block_hash).into_vec(), hwe.to_vec()) - .expect("batch put should be ok"); - batch - .put( - Key::BlockNumber(block_number).into_vec(), - block_hash.as_slice(), - ) - .expect("batch put should be ok"); - let tx_hash = tx.calc_tx_hash(); - let tx_index = u32::MAX; - let key = Key::TxHash(&tx_hash).into_vec(); - let value = Value::Transaction(block_number, tx_index as TxIndex, tx); - batch.put_kv(key, value).expect("batch put should be ok"); - batch.commit().expect("batch commit should be ok"); - } - - pub fn get_tip_header(&self) -> Header { - self.get_last_state().1 - } - - pub fn get_min_filtered_block_number(&self) -> BlockNumber { - let key = Key::Meta(MIN_FILTERED_BLOCK_NUMBER).into_vec(); - self.get_pinned(&key) - .expect("db get min filtered block number should be ok") - .map(|data| u64::from_le_bytes(data.as_ref().try_into().unwrap())) - .unwrap_or_default() - } - - pub fn update_min_filtered_block_number(&self, block_number: BlockNumber) { - let key = Key::Meta(MIN_FILTERED_BLOCK_NUMBER).into_vec(); - let value = block_number.to_le_bytes(); - self.put(key, value) - .expect("db put min filtered block number should be ok"); - } - - pub fn get_last_check_point(&self) -> (CpIndex, Byte32) { - let index = self.get_max_check_point_index(); - let hash = self - .get_check_points(index, 1) - .first() - .cloned() - .expect("db get last check point should be ok"); - (index, hash) - } - - pub fn get_max_check_point_index(&self) -> CpIndex { - let key = Key::Meta(MAX_CHECK_POINT_INDEX).into_vec(); - self.get_pinned(&key) - .expect("db get max check point index should be ok") - .map(|data| CpIndex::from_be_bytes(data.as_ref().try_into().unwrap())) - .expect("db get max check point index should be ok") - } - - pub fn update_max_check_point_index(&self, index: CpIndex) { - let key = Key::Meta(MAX_CHECK_POINT_INDEX).into_vec(); - let value = index.to_be_bytes(); - self.put(key, value) - .expect("db put max check point index should be ok"); - } - - pub fn update_check_points(&self, start_index: CpIndex, check_points: &[Byte32]) { - let mut index = start_index; - let mut batch = self.batch(); - for cp in check_points { - let key = Key::CheckPointIndex(index).into_vec(); - let value = Value::BlockFilterHash(cp); - batch.put_kv(key, value).expect("batch put should be ok"); - index += 1; - } - batch.commit().expect("batch commit should be ok"); - } - - pub fn filter_block(&self, block: Block) { - let scripts: HashSet<(Script, ScriptType)> = self - .get_filter_scripts() - .into_iter() - .map(|ss| (ss.script, ss.script_type)) - .collect(); - let block_number: BlockNumber = block.header().raw().number().unpack(); - let mut filter_matched = false; - let mut batch = self.batch(); - let mut txs: HashMap = HashMap::new(); - block - .transactions() - .into_iter() - .enumerate() - .for_each(|(tx_index, tx)| { - tx.raw() - .inputs() - .into_iter() - .enumerate() - .for_each(|(input_index, input)| { - let previous_tx_hash = input.previous_output().tx_hash(); - if let Some(( - generated_by_block_number, - generated_by_tx_index, - previous_tx, - )) = self.get_transaction(&previous_tx_hash).or(txs - .get(&previous_tx_hash) - .map(|(tx_index, tx)| (block_number, *tx_index, tx.clone()))) - { - let previous_output_index = input.previous_output().index().unpack(); - if let Some(previous_output) = - previous_tx.raw().outputs().get(previous_output_index) - { - let script = previous_output.lock(); - if scripts.contains(&(script.clone(), ScriptType::Lock)) { - filter_matched = true; - // delete utxo - let key = Key::CellLockScript( - &script, - generated_by_block_number, - generated_by_tx_index, - previous_output_index as OutputIndex, - ) - .into_vec(); - batch.delete(key).expect("batch delete should be ok"); - // insert tx history - let key = Key::TxLockScript( - &script, - block_number, - tx_index as TxIndex, - input_index as CellIndex, - CellType::Input, - ) - .into_vec(); - let tx_hash = tx.calc_tx_hash(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx - let key = Key::TxHash(&tx_hash).into_vec(); - let value = - Value::Transaction(block_number, tx_index as TxIndex, &tx); - batch.put_kv(key, value).expect("batch put should be ok"); - } - if let Some(script) = previous_output.type_().to_opt() { - if scripts.contains(&(script.clone(), ScriptType::Type)) { - filter_matched = true; - // delete utxo - let key = Key::CellTypeScript( - &script, - generated_by_block_number, - generated_by_tx_index, - previous_output_index as OutputIndex, - ) - .into_vec(); - batch.delete(key).expect("batch delete should be ok"); - // insert tx history - let key = Key::TxTypeScript( - &script, - block_number, - tx_index as TxIndex, - input_index as CellIndex, - CellType::Input, - ) - .into_vec(); - let tx_hash = tx.calc_tx_hash(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx - let key = Key::TxHash(&tx_hash).into_vec(); - let value = Value::Transaction( - block_number, - tx_index as TxIndex, - &tx, - ); - batch.put_kv(key, value).expect("batch put should be ok"); - } - } - } - } - }); - - tx.raw() - .outputs() - .into_iter() - .enumerate() - .for_each(|(output_index, output)| { - let script = output.lock(); - if scripts.contains(&(script.clone(), ScriptType::Lock)) { - filter_matched = true; - let tx_hash = tx.calc_tx_hash(); - // insert utxo - let key = Key::CellLockScript( - &script, - block_number, - tx_index as TxIndex, - output_index as OutputIndex, - ) - .into_vec(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx history - let key = Key::TxLockScript( - &script, - block_number, - tx_index as TxIndex, - output_index as CellIndex, - CellType::Output, - ) - .into_vec(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx - let key = Key::TxHash(&tx_hash).into_vec(); - let value = Value::Transaction(block_number, tx_index as TxIndex, &tx); - batch.put_kv(key, value).expect("batch put should be ok"); - } - if let Some(script) = output.type_().to_opt() { - if scripts.contains(&(script.clone(), ScriptType::Type)) { - filter_matched = true; - let tx_hash = tx.calc_tx_hash(); - // insert utxo - let key = Key::CellTypeScript( - &script, - block_number, - tx_index as TxIndex, - output_index as OutputIndex, - ) - .into_vec(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx history - let key = Key::TxTypeScript( - &script, - block_number, - tx_index as TxIndex, - output_index as CellIndex, - CellType::Output, - ) - .into_vec(); - batch - .put(key, tx_hash.as_slice()) - .expect("batch put should be ok"); - // insert tx - let key = Key::TxHash(&tx_hash).into_vec(); - let value = - Value::Transaction(block_number, tx_index as TxIndex, &tx); - batch.put_kv(key, value).expect("batch put should be ok"); - } - } - }); - - txs.insert(tx.calc_tx_hash(), (tx_index as u32, tx)); - }); - if filter_matched { - let block_hash = block.calc_header_hash(); - let hwe = HeaderWithExtension { - header: block.header(), - extension: block.extension(), - }; - batch - .put(Key::BlockHash(&block_hash).into_vec(), hwe.to_vec()) - .expect("batch put should be ok"); - batch - .put( - Key::BlockNumber(block.header().raw().number().unpack()).into_vec(), - block_hash.as_slice(), - ) - .expect("batch put should be ok"); - } - batch.commit().expect("batch commit should be ok"); - } - - fn get_transaction(&self, tx_hash: &Byte32) -> Option<(BlockNumber, TxIndex, Transaction)> { - self.get(Key::TxHash(tx_hash).into_vec()) - .map(|v| { - v.map(|v| { - ( - BlockNumber::from_be_bytes(v[0..8].try_into().expect("stored BlockNumber")), - TxIndex::from_be_bytes(v[8..12].try_into().expect("stored TxIndex")), - Transaction::from_slice(&v[12..]).expect("stored Transaction"), - ) - }) - }) - .expect("db get should be ok") - } - - pub fn get_transaction_with_header(&self, tx_hash: &Byte32) -> Option<(Transaction, Header)> { - self.get_transaction(tx_hash) - .map(|(block_number, _tx_index, tx)| { - let block_hash = Byte32::from_slice( - &self - .get(Key::BlockNumber(block_number).into_vec()) - .expect("db get should be ok") - .expect("stored block number / hash mapping"), - ) - .expect("stored block hash should be OK"); - - let header = Header::from_slice( - &self - .get(Key::BlockHash(&block_hash).into_vec()) - .expect("db get should be ok") - .expect("stored block hash / header mapping")[..Header::TOTAL_SIZE], - ) - .expect("stored header should be OK"); - (tx, header) - }) - } -} - -impl CellProvider for Storage { - // assume all cells are live and load data eagerly - fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - if let Some((block_number, tx_index, tx)) = self.get_transaction(&out_point.tx_hash()) { - let block_hash = Byte32::from_slice( - &self - .get(Key::BlockNumber(block_number).into_vec()) - .expect("db get should be ok") - .expect("stored block number / hash mapping"), - ) - .expect("stored block hash should be OK"); - - let header = Header::from_slice( - &self - .get(Key::BlockHash(&block_hash).into_vec()) - .expect("db get should be ok") - .expect("stored block hash / header mapping")[..Header::TOTAL_SIZE], - ) - .expect("stored header should be OK") - .into_view(); - - let output_index = out_point.index().unpack(); - let tx = tx.into_view(); - if let Some(cell_output) = tx.outputs().get(output_index) { - let output_data = tx - .outputs_data() - .get(output_index) - .expect("output_data's index should be same as output") - .raw_data(); - let output_data_data_hash = CellOutput::calc_data_hash(&output_data); - let cell_meta = CellMeta { - out_point: out_point.clone(), - cell_output, - transaction_info: Some(TransactionInfo { - block_hash, - block_epoch: header.epoch(), - block_number, - index: tx_index as usize, - }), - data_bytes: output_data.len() as u64, - mem_cell_data: Some(output_data), - mem_cell_data_hash: Some(output_data_data_hash), - }; - return CellStatus::Live(cell_meta); - } - } - CellStatus::Unknown - } -} - -impl CellDataProvider for Storage { - // we load all cells data eagerly in Storage's CellProivder impl - fn get_cell_data(&self, _out_point: &OutPoint) -> Option { - unreachable!() - } - - fn get_cell_data_hash(&self, _out_point: &OutPoint) -> Option { - unreachable!() - } -} - -impl HeaderProvider for Storage { - fn get_header(&self, hash: &Byte32) -> Option { - self.get(Key::BlockHash(hash).into_vec()) - .map(|v| { - v.map(|v| { - Header::from_slice(&v[..Header::TOTAL_SIZE]) - .expect("stored Header") - .into_view() - }) - }) - .expect("db get should be ok") - } -} diff --git a/light-client-lib/src/storage/db/native_rocksdb.rs b/light-client-lib/src/storage/db/native_rocksdb.rs new file mode 100644 index 00000000..a7b0341f --- /dev/null +++ b/light-client-lib/src/storage/db/native_rocksdb.rs @@ -0,0 +1,670 @@ +use super::super::{ + extract_raw_data, parse_matched_blocks, BlockNumber, Byte32, CellIndex, CellType, CpIndex, Key, + KeyPrefix, MatchedBlock, MatchedBlocks, Script, FILTER_SCRIPTS_KEY, MATCHED_FILTER_BLOCKS_KEY, + MIN_FILTERED_BLOCK_NUMBER, +}; +use crate::{ + error::Result, + storage::{ + db::{ + GeneralDirection, StorageBatchRelatedOperations, StorageGeneralOperations, + StorageGetPinnedRelatedOperations, StorageHighLevelOperations, + }, + ScriptStatus, ScriptType, SetScriptsCommand, TxIndex, + }, +}; +use ckb_traits::{CellDataProvider, HeaderProvider}; +use ckb_types::{ + bytes::Bytes, + core::{ + cell::{CellMeta, CellProvider, CellStatus}, + HeaderView, TransactionInfo, + }, + packed::{self, CellOutput, Header, OutPoint}, + prelude::*, +}; +use rocksdb::{ + ops::{Delete, GetPinned}, + prelude::{Get, Iterate, Open, Put, WriteOps}, + DBPinnableSlice, Direction, IteratorMode, Options, Snapshot, WriteBatch, DB, +}; +use std::{path::Path, sync::Arc}; + +pub struct PinnedSlice<'a> { + inner: rocksdb::DBPinnableSlice<'a>, +} + +impl<'a> From> for PinnedSlice<'a> { + fn from(value: DBPinnableSlice<'a>) -> Self { + PinnedSlice { inner: value } + } +} + +impl AsRef<[u8]> for PinnedSlice<'_> { + fn as_ref(&self) -> &[u8] { + self.inner.as_ref() + } +} + +#[derive(Clone)] +pub struct Storage { + pub(crate) db: Arc, +} + +impl Storage { + pub fn new>(path: P) -> Self { + let mut opts = Options::default(); + opts.create_if_missing(true); + opts.set_max_total_wal_size(128 * 1024 * 1024); + opts.set_write_buffer_size(128 * 1024 * 1024); + opts.set_max_write_buffer_number(2); + let db = Arc::new(DB::open(&opts, path).expect("Failed to open rocksdb")); + Self { db } + } + + pub fn batch(&self) -> Batch { + Batch { + db: Arc::clone(&self.db), + wb: WriteBatch::default(), + } + } + + pub fn snapshot(&self) -> Snapshot<'_> { + self.db.snapshot() + } + + pub fn put(&self, key: K, value: V) -> Result<()> + where + K: AsRef<[u8]>, + V: AsRef<[u8]>, + { + self.db.put(key, value).map_err(Into::into) + } + + pub fn get>(&self, key: K) -> Result>> { + self.db + .get(key.as_ref()) + .map(|v| v.map(|vi| vi.to_vec())) + .map_err(Into::into) + } + + pub fn get_pinned(&self, key: K) -> Result>> + where + K: AsRef<[u8]>, + { + self.db + .get_pinned(key) + .map_err(Into::into) + .map(|a| a.map(Into::into)) + } + + pub fn delete>(&self, key: K) -> Result<()> { + self.db.delete(key).map_err(Into::into) + } +} +impl StorageHighLevelOperations for Storage { + fn get>(&self, key: K) -> Result>> { + self.get(key) + } + fn is_filter_scripts_empty(&self) -> bool { + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); + self.db + .iterator(mode) + .take_while(|(key, _value)| key.starts_with(&key_prefix)) + .next() + .is_none() + } + + fn get_filter_scripts(&self) -> Vec { + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); + + self.db + .iterator(mode) + .take_while(|(key, _value)| key.starts_with(&key_prefix)) + .map(|(key, value)| { + let script = Script::from_slice(&key[key_prefix.len()..key.len() - 1]) + .expect("stored Script"); + let script_type = match key[key.len() - 1] { + 0 => ScriptType::Lock, + 1 => ScriptType::Type, + _ => panic!("invalid script type"), + }; + let block_number = BlockNumber::from_be_bytes( + value.as_ref().try_into().expect("stored BlockNumber"), + ); + ScriptStatus { + script, + script_type, + block_number, + } + }) + .collect() + } + + fn update_filter_scripts(&self, scripts: Vec, command: SetScriptsCommand) { + let mut should_filter_genesis_block = false; + let mut batch = self.batch(); + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + + match command { + SetScriptsCommand::All => { + should_filter_genesis_block = scripts.iter().any(|ss| ss.block_number == 0); + let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); + + self.db + .iterator(mode) + .take_while(|(key, _value)| key.starts_with(&key_prefix)) + .for_each(|(key, _value)| { + batch.delete(key).expect("batch delete should be ok"); + }); + + for ss in scripts { + let key = [ + key_prefix.as_ref(), + ss.script.as_slice(), + match ss.script_type { + ScriptType::Lock => &[0], + ScriptType::Type => &[1], + }, + ] + .concat(); + batch + .put(key, ss.block_number.to_be_bytes()) + .expect("batch put should be ok"); + } + } + SetScriptsCommand::Partial => { + if scripts.is_empty() { + return; + } + let min_script_block_number = scripts.iter().map(|ss| ss.block_number).min(); + should_filter_genesis_block = min_script_block_number == Some(0); + + for ss in scripts { + let key = [ + key_prefix.as_ref(), + ss.script.as_slice(), + match ss.script_type { + ScriptType::Lock => &[0], + ScriptType::Type => &[1], + }, + ] + .concat(); + batch + .put(key, ss.block_number.to_be_bytes()) + .expect("batch put should be ok"); + } + } + SetScriptsCommand::Delete => { + if scripts.is_empty() { + return; + } + + for ss in scripts { + let key = [ + key_prefix.as_ref(), + ss.script.as_slice(), + match ss.script_type { + ScriptType::Lock => &[0], + ScriptType::Type => &[1], + }, + ] + .concat(); + batch.delete(key).expect("batch delete should be ok"); + } + } + } + + batch.commit().expect("batch commit should be ok"); + + self.update_min_filtered_block_number_by_scripts(); + self.clear_matched_blocks(); + + if should_filter_genesis_block { + let block = self.get_genesis_block(); + self.filter_block(block); + } + } + + fn update_min_filtered_block_number_by_scripts(&self) { + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); + + let min_block_number = self + .db + .iterator(mode) + .take_while(|(key, _value)| key.starts_with(&key_prefix)) + .map(|(_key, value)| { + BlockNumber::from_be_bytes(value.as_ref().try_into().expect("stored BlockNumber")) + }) + .min(); + + if let Some(n) = min_block_number { + self.update_min_filtered_block_number(n); + } + } + + // get scripts hash that should be filtered below the given block number + fn get_scripts_hash(&self, block_number: BlockNumber) -> Vec { + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); + + self.db + .iterator(mode) + .take_while(|(key, _value)| key.starts_with(&key_prefix)) + .filter_map(|(key, value)| { + let stored_block_number = BlockNumber::from_be_bytes( + value.as_ref().try_into().expect("stored BlockNumber"), + ); + if stored_block_number < block_number { + let script = Script::from_slice(&key[key_prefix.len()..key.len() - 1]) + .expect("stored Script"); + Some(script.calc_script_hash()) + } else { + None + } + }) + .collect() + } + + fn clear_matched_blocks(&self) { + let key_prefix = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); + let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); + let mut batch = self.batch(); + for (key, _) in self + .db + .iterator(mode) + .take_while(|(key, _value)| key.starts_with(&key_prefix)) + { + batch.delete(key).expect("batch delete should be ok"); + } + batch.commit().expect("batch commit should be ok"); + } + + fn get_matched_blocks(&self, direction: GeneralDirection) -> Option { + let key_prefix = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); + let iter_from = match direction { + GeneralDirection::Forward => key_prefix.clone(), + GeneralDirection::Reverse => { + let mut key = key_prefix.clone(); + key.extend(u64::MAX.to_be_bytes()); + key + } + }; + let mode = IteratorMode::From( + iter_from.as_ref(), + match direction { + GeneralDirection::Forward => Direction::Forward, + GeneralDirection::Reverse => Direction::Reverse, + }, + ); + self.db + .iterator(mode) + .take_while(|(key, _value)| key.starts_with(&key_prefix)) + .map(|(key, value)| { + let mut u64_bytes = [0u8; 8]; + u64_bytes.copy_from_slice(&key[key_prefix.len()..]); + let start_number = u64::from_be_bytes(u64_bytes); + let (blocks_count, raw_blocks) = parse_matched_blocks(&value); + let blocks = raw_blocks + .into_iter() + .map(|(hash, proved)| MatchedBlock { hash, proved }) + .collect(); + MatchedBlocks { + start_number, + blocks_count, + blocks, + } + }) + .next() + } + + fn get_earliest_matched_blocks(&self) -> Option { + self.get_matched_blocks(GeneralDirection::Forward) + } + + fn get_latest_matched_blocks(&self) -> Option { + self.get_matched_blocks(GeneralDirection::Reverse) + } + + fn get_check_points(&self, start_index: CpIndex, limit: usize) -> Vec { + let start_key = Key::CheckPointIndex(start_index).into_vec(); + let key_prefix = [KeyPrefix::CheckPointIndex as u8]; + let mode = IteratorMode::From(start_key.as_ref(), Direction::Forward); + self.db + .iterator(mode) + .take_while(|(key, _value)| key.starts_with(&key_prefix)) + .take(limit) + .map(|(_key, value)| Byte32::from_slice(&value).expect("stored block filter hash")) + .collect() + } + + fn update_block_number(&self, block_number: BlockNumber) { + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); + + let mut batch = self.batch(); + self.db + .iterator(mode) + .take_while(|(key, _value)| key.starts_with(&key_prefix)) + .for_each(|(key, value)| { + let stored_block_number = BlockNumber::from_be_bytes( + value.as_ref().try_into().expect("stored BlockNumber"), + ); + if stored_block_number < block_number { + batch + .put(key, block_number.to_be_bytes()) + .expect("batch put should be ok") + } + }); + batch.commit().expect("batch commit should be ok"); + } + + /// Rollback filtered block data to specified block number + /// + /// N.B. The specified block will be removed. + fn rollback_to_block(&self, to_number: BlockNumber) { + let scripts = self.get_filter_scripts(); + let mut batch = self.batch(); + + for ss in scripts { + if ss.block_number >= to_number { + let script = ss.script; + let mut key_prefix = vec![match ss.script_type { + ScriptType::Lock => KeyPrefix::TxLockScript as u8, + ScriptType::Type => KeyPrefix::TxTypeScript as u8, + }]; + key_prefix.extend_from_slice(&extract_raw_data(&script)); + let mut start_key = key_prefix.clone(); + start_key.extend_from_slice(BlockNumber::MAX.to_be_bytes().as_ref()); + let mode = IteratorMode::From(start_key.as_ref(), Direction::Reverse); + let key_prefix_len = key_prefix.len(); + + self.db + .iterator(mode) + .take_while(|(key, _value)| { + key.starts_with(&key_prefix) + && BlockNumber::from_be_bytes( + key[key_prefix_len..key_prefix_len + 8] + .try_into() + .expect("stored BlockNumber"), + ) >= to_number + }) + .for_each(|(key, value)| { + let block_number = BlockNumber::from_be_bytes( + key[key_prefix_len..key_prefix_len + 8] + .try_into() + .expect("stored BlockNumber"), + ); + log::debug!("rollback {}", block_number); + let tx_index = TxIndex::from_be_bytes( + key[key_prefix_len + 8..key_prefix_len + 12] + .try_into() + .expect("stored TxIndex"), + ); + let cell_index = CellIndex::from_be_bytes( + key[key_prefix_len + 12..key_prefix_len + 16] + .try_into() + .expect("stored CellIndex"), + ); + let tx_hash = + packed::Byte32Reader::from_slice_should_be_ok(&value).to_entity(); + if key[key_prefix_len + 16] == 0 { + let (_, _, tx) = self + .get_transaction(&tx_hash) + .expect("stored transaction history"); + let input = tx.raw().inputs().get(cell_index as usize).unwrap(); + if let Some(( + generated_by_block_number, + generated_by_tx_index, + _previous_tx, + )) = self.get_transaction(&input.previous_output().tx_hash()) + { + let key = match ss.script_type { + ScriptType::Lock => Key::CellLockScript( + &script, + generated_by_block_number, + generated_by_tx_index, + input.previous_output().index().unpack(), + ), + ScriptType::Type => Key::CellTypeScript( + &script, + generated_by_block_number, + generated_by_tx_index, + input.previous_output().index().unpack(), + ), + }; + batch + .put_kv(key, input.previous_output().tx_hash().as_slice()) + .expect("batch put should be ok"); + }; + // delete tx history + let key = match ss.script_type { + ScriptType::Lock => Key::TxLockScript( + &script, + block_number, + tx_index, + cell_index, + CellType::Input, + ), + ScriptType::Type => Key::TxTypeScript( + &script, + block_number, + tx_index, + cell_index, + CellType::Input, + ), + } + .into_vec(); + batch.delete(key).expect("batch delete should be ok"); + } else { + // delete utxo + let key = match ss.script_type { + ScriptType::Lock => { + Key::CellLockScript(&script, block_number, tx_index, cell_index) + } + ScriptType::Type => { + Key::CellTypeScript(&script, block_number, tx_index, cell_index) + } + } + .into_vec(); + batch.delete(key).expect("batch delete should be ok"); + + // delete tx history + let key = match ss.script_type { + ScriptType::Lock => Key::TxLockScript( + &script, + block_number, + tx_index, + cell_index, + CellType::Output, + ), + ScriptType::Type => Key::TxTypeScript( + &script, + block_number, + tx_index, + cell_index, + CellType::Output, + ), + } + .into_vec(); + batch.delete(key).expect("batch delete should be ok"); + }; + }); + + // update script filter block number + { + let mut key = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + key.extend_from_slice(script.as_slice()); + key.extend_from_slice(match ss.script_type { + ScriptType::Lock => &[0], + ScriptType::Type => &[1], + }); + let value = to_number.to_be_bytes().to_vec(); + batch.put(key, value).expect("batch put should be ok"); + } + } + } + + // we should also sync block filters again + if self.get_min_filtered_block_number() >= to_number { + batch + .put( + Key::Meta(MIN_FILTERED_BLOCK_NUMBER).into_vec(), + to_number.saturating_sub(1).to_le_bytes(), + ) + .expect("batch put should be ok"); + } + + batch.commit().expect("batch commit should be ok"); + } + + fn collect_iterator( + &self, + start_key_bound: Vec, + order: GeneralDirection, + take_while: Box bool + Send + 'static>, + filter_map: Box Option> + Send + 'static>, + limit: usize, + skip: usize, + ) -> Vec<(Vec, Vec)> { + self.db + .snapshot() + .iterator(IteratorMode::From( + &start_key_bound, + match order { + GeneralDirection::Forward => Direction::Forward, + GeneralDirection::Reverse => Direction::Reverse, + }, + )) + .take_while(|(key, _)| take_while(key)) + .filter_map(|(key, value)| filter_map(&key).map(|v| (v.into_boxed_slice(), value))) + .skip(skip) + .take(limit) + .map(|(key, value)| (key.to_vec(), value.to_vec())) + .collect() + } + + fn cell(&self, out_point: &OutPoint, eager_load: bool) -> CellStatus { + CellProvider::cell(self, out_point, eager_load) + } + + fn put(&self, key: K, value: V) -> Result<()> + where + K: AsRef<[u8]>, + V: AsRef<[u8]>, + { + self.put(key, value) + } + + fn delete>(&self, key: K) -> Result<()> { + self.delete(key) + } + + fn get_header(&self, hash: &Byte32) -> Option { + HeaderProvider::get_header(self, hash) + } +} + +pub struct Batch { + db: Arc, + wb: WriteBatch, +} + +impl Batch { + pub fn put_kv>, V: Into>>(&mut self, key: K, value: V) -> Result<()> { + self.put(Into::>::into(key), Into::>::into(value)) + } + + pub fn put, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<()> { + self.wb.put(key, value)?; + Ok(()) + } + + pub fn delete>(&mut self, key: K) -> Result<()> { + self.wb.delete(key.as_ref())?; + Ok(()) + } + + pub fn commit(self) -> Result<()> { + self.db.write(&self.wb)?; + Ok(()) + } +} + +impl CellProvider for Storage { + // assume all cells are live and load data eagerly + fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { + if let Some((block_number, tx_index, tx)) = self.get_transaction(&out_point.tx_hash()) { + let block_hash = Byte32::from_slice( + &self + .get(Key::BlockNumber(block_number).into_vec()) + .expect("db get should be ok") + .expect("stored block number / hash mapping"), + ) + .expect("stored block hash should be OK"); + + let header = Header::from_slice( + &self + .get(Key::BlockHash(&block_hash).into_vec()) + .expect("db get should be ok") + .expect("stored block hash / header mapping")[..Header::TOTAL_SIZE], + ) + .expect("stored header should be OK") + .into_view(); + + let output_index = out_point.index().unpack(); + let tx = tx.into_view(); + if let Some(cell_output) = tx.outputs().get(output_index) { + let output_data = tx + .outputs_data() + .get(output_index) + .expect("output_data's index should be same as output") + .raw_data(); + let output_data_data_hash = CellOutput::calc_data_hash(&output_data); + let cell_meta = CellMeta { + out_point: out_point.clone(), + cell_output, + transaction_info: Some(TransactionInfo { + block_hash, + block_epoch: header.epoch(), + block_number, + index: tx_index as usize, + }), + data_bytes: output_data.len() as u64, + mem_cell_data: Some(output_data), + mem_cell_data_hash: Some(output_data_data_hash), + }; + return CellStatus::Live(cell_meta); + } + } + CellStatus::Unknown + } +} + +impl CellDataProvider for Storage { + // we load all cells data eagerly in Storage's CellProivder impl + fn get_cell_data(&self, _out_point: &OutPoint) -> Option { + unreachable!() + } + + fn get_cell_data_hash(&self, _out_point: &OutPoint) -> Option { + unreachable!() + } +} + +impl HeaderProvider for Storage { + fn get_header(&self, hash: &Byte32) -> Option { + self.get(Key::BlockHash(hash).into_vec()) + .map(|v| { + v.map(|v| { + Header::from_slice(&v[..Header::TOTAL_SIZE]) + .expect("stored Header") + .into_view() + }) + }) + .expect("db get should be ok") + } +} diff --git a/light-client-lib/src/storage/db/native_rusqlite.rs b/light-client-lib/src/storage/db/native_rusqlite.rs new file mode 100644 index 00000000..d946cc0c --- /dev/null +++ b/light-client-lib/src/storage/db/native_rusqlite.rs @@ -0,0 +1,927 @@ +use super::super::{ + extract_raw_data, parse_matched_blocks, BlockNumber, Byte32, CellIndex, CellType, CpIndex, Key, + KeyPrefix, MatchedBlock, MatchedBlocks, Script, FILTER_SCRIPTS_KEY, MATCHED_FILTER_BLOCKS_KEY, + MIN_FILTERED_BLOCK_NUMBER, +}; +use crate::storage::db::StorageBatchRelatedOperations; +use crate::storage::db::StorageGetPinnedRelatedOperations; +use crate::{ + error::Result, + storage::{ + db::{GeneralDirection, StorageGeneralOperations, StorageHighLevelOperations}, + ScriptStatus, ScriptType, SetScriptsCommand, TxIndex, + }, +}; +use ckb_traits::{CellDataProvider, HeaderProvider}; +use ckb_types::{ + bytes::Bytes, + core::{ + cell::{CellMeta, CellProvider, CellStatus}, + HeaderView, TransactionInfo, + }, + packed::{self, CellOutput, Header, OutPoint}, + prelude::*, +}; + +use parking_lot::ReentrantMutex; +use rusqlite::{params, Connection}; +use std::{cell::RefCell, path::Path, sync::Arc}; + +#[derive(Clone)] +pub struct Storage { + pub(crate) conn: Arc>>, +} +pub struct KV { + pub key: Vec, + pub value: Vec, +} + +#[derive(Clone, Copy)] +pub enum CursorDirection { + Ascending, + Descending, +} + +impl Storage { + pub fn new>(raw_path: P) -> Self { + if !raw_path.as_ref().exists() { + std::fs::create_dir_all(raw_path.as_ref()) + .expect("Unable to creatr directory for database"); + } + let path = raw_path.as_ref().join("light-client.db"); + + let conn = Connection::open(path).expect("Unable to open database"); + conn.execute_batch( + r" + CREATE TABLE IF NOT EXISTS data ( + key BLOB PRIMARY KEY, + value BLOB + ); + ", + ) + .expect("Unable to initialize database and create table"); + Self { + conn: Arc::new(ReentrantMutex::new(RefCell::new(conn))), + } + } + + pub fn collect_iterator( + &self, + start_key_bound: &[u8], + order: CursorDirection, + take_while: impl Fn(&[u8]) -> bool, + filter_map: impl Fn(&[u8], &[u8]) -> Option, + limit: usize, + skip: usize, + ) -> rusqlite::Result> { + let lock_guard = self.conn.lock(); + let conn = lock_guard.borrow(); + let sql = match order { + CursorDirection::Ascending => { + "SELECT key, value FROM data WHERE key >= ?1 ORDER BY key ASC" + } + CursorDirection::Descending => { + "SELECT key, value FROM data WHERE key <= ?1 ORDER BY key DESC" + } + }; + + let mut stmt = conn.prepare(sql)?; + let mut rows = stmt.query(params![start_key_bound])?; + + let mut results = Vec::new(); + let mut skipped = 0; + + while let Some(row) = rows.next()? { + let key: Vec = row.get(0)?; + let value: Vec = row.get(1)?; + + if !take_while(&key) { + break; + } + + if let Some(kv) = filter_map(&key, &value) { + if skipped < skip { + skipped += 1; + continue; + } + + results.push(kv); + + if results.len() >= limit { + break; + } + } + } + + Ok(results) + } + + pub fn batch(&self) -> Batch { + Batch { + db: self.conn.clone(), + add: vec![], + delete: vec![], + } + } + + pub fn put(&self, key: K, value: V) -> Result<()> + where + K: AsRef<[u8]>, + V: AsRef<[u8]>, + { + let lock_guard = self.conn.lock(); + let mut guard = lock_guard.borrow_mut(); + let tx = guard.transaction()?; + if tx.query_one( + "SELECT COUNT(*) FROM data WHERE key = ?1", + [key.as_ref().to_vec()], + |x| Ok(x.get::<_, isize>(0)?), + )? == 0 + { + tx.execute( + "INSERT INTO data (key, value) VALUES (?1, ?2)", + [key.as_ref().to_vec(), value.as_ref().to_vec()], + )?; + } else { + tx.execute( + "UPDATE data SET value = ?2 WHERE key = ?1", + [key.as_ref().to_vec(), value.as_ref().to_vec()], + )?; + } + tx.commit()?; + Ok(()) + } + + pub fn get>(&self, key: K) -> Result>> { + let lock_guard = self.conn.lock(); + let guard = lock_guard.borrow(); + match guard.query_one( + "SELECT value FROM data WHERE key = ?1", + [key.as_ref().to_vec()], + |row| Ok(row.get::<_, Vec>(0)?), + ) { + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + s => Ok(s.map(|v| Some(v))?), + } + } + #[allow(clippy::needless_lifetimes)] + pub fn get_pinned(&self, key: K) -> Result>> + where + K: AsRef<[u8]>, + { + self.get(key) + } + + pub fn delete>(&self, key: K) -> Result<()> { + let lock_guard = self.conn.lock(); + let guard = lock_guard.borrow(); + guard.execute("DELETE FROM data WHERE key = ?1", [key.as_ref().to_vec()])?; + Ok(()) + } +} + +impl StorageHighLevelOperations for Storage { + fn get_header(&self, hash: &Byte32) -> Option { + HeaderProvider::get_header(self, hash) + } + fn put(&self, key: K, value: V) -> Result<()> + where + K: AsRef<[u8]>, + V: AsRef<[u8]>, + { + self.put(key, value) + } + + fn delete>(&self, key: K) -> Result<()> { + self.delete(key) + } + fn collect_iterator( + &self, + start_key_bound: Vec, + order: GeneralDirection, + take_while: Box bool + Send + 'static>, + filter_map: Box Option> + Send + 'static>, + limit: usize, + skip: usize, + ) -> Vec<(Vec, Vec)> { + self.collect_iterator( + &start_key_bound, + match order { + GeneralDirection::Forward => CursorDirection::Ascending, + GeneralDirection::Reverse => CursorDirection::Descending, + }, + take_while, + |key, value| { + filter_map(key).map(|key| KV { + key: key, + value: value.to_vec(), + }) + }, + limit, + skip, + ) + .unwrap() + .into_iter() + .map(|x| (x.key, x.value)) + .collect() + } + fn get>(&self, key: K) -> Result>> { + self.get(key) + } + fn is_filter_scripts_empty(&self) -> bool { + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + self.collect_iterator( + &key_prefix, + CursorDirection::Ascending, + |key| key.starts_with(&key_prefix), + |key, value| { + Some(KV { + key: key.to_vec(), + value: value.to_vec(), + }) + }, + 1, + 0, + ) + .unwrap() + .is_empty() + } + + fn get_filter_scripts(&self) -> Vec { + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + + self.collect_iterator( + &key_prefix, + CursorDirection::Ascending, + |key| key.starts_with(&key_prefix), + |key, value| { + Some(KV { + key: key.to_vec(), + value: value.to_vec(), + }) + }, + usize::MAX, + 0, + ) + .unwrap() + .into_iter() + .map(|kv| (kv.key, kv.value)) + .map(|(key, value)| { + let script = + Script::from_slice(&key[key_prefix.len()..key.len() - 1]).expect("stored Script"); + let script_type = match key[key.len() - 1] { + 0 => ScriptType::Lock, + 1 => ScriptType::Type, + _ => panic!("invalid script type"), + }; + let block_number = BlockNumber::from_be_bytes( + AsRef::<[u8]>::as_ref(&value) + .try_into() + .expect("stored BlockNumber"), + ); + ScriptStatus { + script, + script_type, + block_number, + } + }) + .collect() + } + + fn update_filter_scripts(&self, scripts: Vec, command: SetScriptsCommand) { + let mut should_filter_genesis_block = false; + let mut batch = self.batch(); + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + + match command { + SetScriptsCommand::All => { + should_filter_genesis_block = scripts.iter().any(|ss| ss.block_number == 0); + + let remove_keys = self + .collect_iterator( + &key_prefix, + CursorDirection::Ascending, + |x| x.starts_with(&key_prefix), + |k, v| { + Some(KV { + key: k.to_vec(), + value: v.to_vec(), + }) + }, + usize::MAX, + 0, + ) + .unwrap() + .into_iter() + .map(|x| x.key) + .collect(); + + batch.delete_many(remove_keys).unwrap(); + + for ss in scripts { + let key = [ + key_prefix.as_ref(), + ss.script.as_slice(), + match ss.script_type { + ScriptType::Lock => &[0], + ScriptType::Type => &[1], + }, + ] + .concat(); + batch + .put(key, ss.block_number.to_be_bytes()) + .expect("batch put should be ok"); + } + } + SetScriptsCommand::Partial => { + if scripts.is_empty() { + return; + } + let min_script_block_number = scripts.iter().map(|ss| ss.block_number).min(); + should_filter_genesis_block = min_script_block_number == Some(0); + + for ss in scripts { + let key = [ + key_prefix.as_ref(), + ss.script.as_slice(), + match ss.script_type { + ScriptType::Lock => &[0], + ScriptType::Type => &[1], + }, + ] + .concat(); + batch + .put(key, ss.block_number.to_be_bytes()) + .expect("batch put should be ok"); + } + } + SetScriptsCommand::Delete => { + if scripts.is_empty() { + return; + } + + for ss in scripts { + let key = [ + key_prefix.as_ref(), + ss.script.as_slice(), + match ss.script_type { + ScriptType::Lock => &[0], + ScriptType::Type => &[1], + }, + ] + .concat(); + batch.delete(key).expect("batch delete should be ok"); + } + } + } + + batch.commit().expect("batch commit should be ok"); + + self.update_min_filtered_block_number_by_scripts(); + self.clear_matched_blocks(); + + if should_filter_genesis_block { + let block = self.get_genesis_block(); + self.filter_block(block); + } + } + + fn update_min_filtered_block_number_by_scripts(&self) { + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + + let value = self + .collect_iterator( + &key_prefix, + CursorDirection::Ascending, + |x| x.starts_with(&key_prefix), + |k, v| { + Some(KV { + key: k.to_vec(), + value: v.to_vec(), + }) + }, + usize::MAX, + 0, + ) + .unwrap(); + + let min_block_number = value + .into_iter() + .map(|kv| (kv.key, kv.value)) + .map(|(_key, value)| { + BlockNumber::from_be_bytes( + AsRef::<[u8]>::as_ref(&value) + .try_into() + .expect("stored BlockNumber"), + ) + }) + .min(); + + if let Some(n) = min_block_number { + self.update_min_filtered_block_number(n); + } + } + + // get scripts hash that should be filtered below the given block number + fn get_scripts_hash(&self, block_number: BlockNumber) -> Vec { + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + + let value = self + .collect_iterator( + &key_prefix, + CursorDirection::Ascending, + |x| x.starts_with(&key_prefix), + |k, v| { + Some(KV { + key: k.to_vec(), + value: v.to_vec(), + }) + }, + usize::MAX, + 0, + ) + .unwrap(); + + value + .into_iter() + .map(|kv| (kv.key, kv.value)) + .filter_map(|(key, value)| { + let stored_block_number = BlockNumber::from_be_bytes( + AsRef::<[u8]>::as_ref(&value) + .try_into() + .expect("stored BlockNumber"), + ); + if stored_block_number < block_number { + let script = Script::from_slice(&key[key_prefix.len()..key.len() - 1]) + .expect("stored Script"); + Some(script.calc_script_hash()) + } else { + None + } + }) + .collect() + } + + fn clear_matched_blocks(&self) { + let key_prefix: Vec = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); + + let mut batch = self.batch(); + + let value = self + .collect_iterator( + &key_prefix, + CursorDirection::Ascending, + |x| x.starts_with(&key_prefix), + |k, v| { + Some(KV { + key: k.to_vec(), + value: v.to_vec(), + }) + }, + usize::MAX, + 0, + ) + .unwrap() + .into_iter() + .map(|x| x.key) + .collect(); + batch.delete_many(value).unwrap(); + batch.commit().unwrap(); + } + + fn get_matched_blocks(&self, direction: GeneralDirection) -> Option { + let key_prefix = Key::Meta(MATCHED_FILTER_BLOCKS_KEY).into_vec(); + let iter_from = match direction { + GeneralDirection::Forward => key_prefix.clone(), + GeneralDirection::Reverse => { + let mut key = key_prefix.clone(); + key.extend(u64::MAX.to_be_bytes()); + key + } + }; + + let value = self + .collect_iterator( + &iter_from, + match direction { + GeneralDirection::Forward => CursorDirection::Ascending, + GeneralDirection::Reverse => CursorDirection::Descending, + }, + |x| x.starts_with(&key_prefix), + |k, v| { + Some(KV { + key: k.to_vec(), + value: v.to_vec(), + }) + }, + 1, + 0, + ) + .unwrap(); + + value + .into_iter() + .map(|kv| (kv.key, kv.value)) + .map(|(key, value)| { + let mut u64_bytes = [0u8; 8]; + u64_bytes.copy_from_slice(&key[key_prefix.len()..]); + let start_number = u64::from_be_bytes(u64_bytes); + let (blocks_count, raw_blocks) = parse_matched_blocks(&value); + let blocks = raw_blocks + .into_iter() + .map(|(hash, proved)| MatchedBlock { hash, proved }) + .collect(); + MatchedBlocks { + start_number, + blocks_count, + blocks, + } + }) + .next() + } + + fn get_earliest_matched_blocks(&self) -> Option { + self.get_matched_blocks(GeneralDirection::Forward) + } + + fn get_latest_matched_blocks(&self) -> Option { + self.get_matched_blocks(GeneralDirection::Reverse) + } + + fn get_check_points(&self, start_index: CpIndex, limit: usize) -> Vec { + let start_key = Key::CheckPointIndex(start_index).into_vec(); + let key_prefix = [KeyPrefix::CheckPointIndex as u8]; + + let value = self + .collect_iterator( + &start_key, + CursorDirection::Ascending, + |x| x.starts_with(&key_prefix), + |k, v| { + Some(KV { + key: k.to_vec(), + value: v.to_vec(), + }) + }, + limit, + 0, + ) + .unwrap(); + + value + .into_iter() + .map(|kv| (kv.key, kv.value)) + .map(|(_key, value)| Byte32::from_slice(&value).expect("stored block filter hash")) + .collect() + } + + fn update_block_number(&self, block_number: BlockNumber) { + let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + let mut batch = self.batch(); + + let value = self + .collect_iterator( + &key_prefix, + CursorDirection::Ascending, + |x| x.starts_with(&key_prefix), + |k, v| { + Some(KV { + key: k.to_vec(), + value: v.to_vec(), + }) + }, + usize::MAX, + 0, + ) + .unwrap(); + + value + .into_iter() + .map(|kv| (kv.key, kv.value)) + .for_each(|(key, value)| { + let stored_block_number = BlockNumber::from_be_bytes( + AsRef::<[u8]>::as_ref(&value) + .try_into() + .expect("stored BlockNumber"), + ); + if stored_block_number < block_number { + batch + .put(key, block_number.to_be_bytes()) + .expect("batch put should be ok") + } + }); + batch.commit().expect("batch commit should be ok"); + } + + fn rollback_to_block(&self, to_number: BlockNumber) { + let scripts = self.get_filter_scripts(); + let mut batch = self.batch(); + + for ss in scripts { + if ss.block_number >= to_number { + let script = ss.script; + let mut key_prefix = vec![match ss.script_type { + ScriptType::Lock => KeyPrefix::TxLockScript as u8, + ScriptType::Type => KeyPrefix::TxTypeScript as u8, + }]; + key_prefix.extend_from_slice(&extract_raw_data(&script)); + let mut start_key = key_prefix.clone(); + start_key.extend_from_slice(BlockNumber::MAX.to_be_bytes().as_ref()); + let key_prefix_len = key_prefix.len(); + + let value = self + .collect_iterator( + &start_key, + CursorDirection::Descending, + |raw_key: &[u8]| { + raw_key.starts_with(&key_prefix) + && BlockNumber::from_be_bytes( + raw_key[key_prefix_len..key_prefix_len + 8] + .try_into() + .expect("stored BlockNumber"), + ) >= to_number + }, + |k, v| { + Some(KV { + key: k.to_vec(), + value: v.to_vec(), + }) + }, + usize::MAX, + 0, + ) + .unwrap(); + + for (key, value) in value.into_iter().map(|kv| (kv.key, kv.value)) { + let block_number = BlockNumber::from_be_bytes( + key[key_prefix_len..key_prefix_len + 8] + .try_into() + .expect("stored BlockNumber"), + ); + log::debug!("rollback {}", block_number); + let tx_index = TxIndex::from_be_bytes( + key[key_prefix_len + 8..key_prefix_len + 12] + .try_into() + .expect("stored TxIndex"), + ); + let cell_index = CellIndex::from_be_bytes( + key[key_prefix_len + 12..key_prefix_len + 16] + .try_into() + .expect("stored CellIndex"), + ); + let tx_hash = packed::Byte32Reader::from_slice_should_be_ok(&value).to_entity(); + if key[key_prefix_len + 16] == 0 { + let (_, _, tx) = self + .get_transaction(&tx_hash) + .expect("stored transaction history"); + let input = tx.raw().inputs().get(cell_index as usize).unwrap(); + if let Some(( + generated_by_block_number, + generated_by_tx_index, + _previous_tx, + )) = self.get_transaction(&input.previous_output().tx_hash()) + { + let key = match ss.script_type { + ScriptType::Lock => Key::CellLockScript( + &script, + generated_by_block_number, + generated_by_tx_index, + input.previous_output().index().unpack(), + ), + ScriptType::Type => Key::CellTypeScript( + &script, + generated_by_block_number, + generated_by_tx_index, + input.previous_output().index().unpack(), + ), + }; + batch + .put_kv(key, input.previous_output().tx_hash().as_slice()) + .expect("batch put should be ok"); + }; + // delete tx history + let key = match ss.script_type { + ScriptType::Lock => Key::TxLockScript( + &script, + block_number, + tx_index, + cell_index, + CellType::Input, + ), + ScriptType::Type => Key::TxTypeScript( + &script, + block_number, + tx_index, + cell_index, + CellType::Input, + ), + } + .into_vec(); + batch.delete(key).expect("batch delete should be ok"); + } else { + // delete utxo + let key = match ss.script_type { + ScriptType::Lock => { + Key::CellLockScript(&script, block_number, tx_index, cell_index) + } + ScriptType::Type => { + Key::CellTypeScript(&script, block_number, tx_index, cell_index) + } + } + .into_vec(); + batch.delete(key).expect("batch delete should be ok"); + + // delete tx history + let key = match ss.script_type { + ScriptType::Lock => Key::TxLockScript( + &script, + block_number, + tx_index, + cell_index, + CellType::Output, + ), + ScriptType::Type => Key::TxTypeScript( + &script, + block_number, + tx_index, + cell_index, + CellType::Output, + ), + } + .into_vec(); + batch.delete(key).expect("batch delete should be ok"); + }; + } + + // update script filter block number + { + let mut key = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); + key.extend_from_slice(script.as_slice()); + key.extend_from_slice(match ss.script_type { + ScriptType::Lock => &[0], + ScriptType::Type => &[1], + }); + let value = to_number.to_be_bytes().to_vec(); + batch.put(key, value).expect("batch put should be ok"); + } + } + } + // we should also sync block filters again + if self.get_min_filtered_block_number() >= to_number { + batch + .put( + Key::Meta(MIN_FILTERED_BLOCK_NUMBER).into_vec(), + to_number.saturating_sub(1).to_le_bytes(), + ) + .expect("batch put should be ok"); + } + + batch.commit().expect("batch commit should be ok"); + } + fn cell(&self, out_point: &OutPoint, eager_load: bool) -> CellStatus { + CellProvider::cell(self, out_point, eager_load) + } +} + +pub struct Batch { + add: Vec<(Vec, Vec)>, + delete: Vec>, + db: Arc>>, +} + +impl Batch { + pub fn put_kv>, V: Into>>(&mut self, key: K, value: V) -> Result<()> { + self.add.push((key.into(), value.into())); + Ok(()) + } + + pub fn put, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<()> { + self.add + .push((key.as_ref().to_vec(), value.as_ref().to_vec())); + Ok(()) + } + + pub fn delete>(&mut self, key: K) -> Result<()> { + self.delete.push(key.as_ref().to_vec()); + Ok(()) + } + + pub fn delete_many(&mut self, keys: Vec>) -> Result<()> { + let lock_guard = self.db.lock(); + let guard = lock_guard.borrow(); + let mut stmt = guard.prepare("DELETE FROM data WHERE key = ?1")?; + + for item in keys.into_iter() { + stmt.execute([item])?; + } + Ok(()) + } + + pub fn commit(self) -> Result<()> { + let lock_guard = self.db.lock(); + let mut guard = lock_guard.borrow_mut(); + if !self.add.is_empty() { + for (key, value) in self.add.into_iter() { + let tx = guard.transaction()?; + if tx.query_one( + "SELECT COUNT(*) FROM data WHERE key = ?1", + [key.clone()], + |x| Ok(x.get::<_, isize>(0)?), + )? == 0 + { + tx.execute( + "INSERT INTO data (key, value) VALUES (?1, ?2)", + [key, value], + )?; + } else { + tx.execute("UPDATE data SET value = ?2 WHERE key = ?1", [key, value])?; + } + tx.commit()?; + } + } + + if !self.delete.is_empty() { + let mut stmt = guard.prepare("DELETE FROM data WHERE key = ?1")?; + for key in self.delete.into_iter() { + stmt.execute([key])?; + } + } + + Ok(()) + } +} + +impl CellProvider for Storage { + // assume all cells are live and load data eagerly + fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { + if let Some((block_number, tx_index, tx)) = self.get_transaction(&out_point.tx_hash()) { + let block_hash = Byte32::from_slice( + &self + .get(Key::BlockNumber(block_number).into_vec()) + .expect("db get should be ok") + .expect("stored block number / hash mapping"), + ) + .expect("stored block hash should be OK"); + + let header = Header::from_slice( + &self + .get(Key::BlockHash(&block_hash).into_vec()) + .expect("db get should be ok") + .expect("stored block hash / header mapping")[..Header::TOTAL_SIZE], + ) + .expect("stored header should be OK") + .into_view(); + + let output_index = out_point.index().unpack(); + let tx = tx.into_view(); + if let Some(cell_output) = tx.outputs().get(output_index) { + let output_data = tx + .outputs_data() + .get(output_index) + .expect("output_data's index should be same as output") + .raw_data(); + let output_data_data_hash = CellOutput::calc_data_hash(&output_data); + let cell_meta = CellMeta { + out_point: out_point.clone(), + cell_output, + transaction_info: Some(TransactionInfo { + block_hash, + block_epoch: header.epoch(), + block_number, + index: tx_index as usize, + }), + data_bytes: output_data.len() as u64, + mem_cell_data: Some(output_data), + mem_cell_data_hash: Some(output_data_data_hash), + }; + return CellStatus::Live(cell_meta); + } + } + CellStatus::Unknown + } +} + +impl CellDataProvider for Storage { + // we load all cells data eagerly in Storage's CellProivder impl + fn get_cell_data(&self, _out_point: &OutPoint) -> Option { + unreachable!() + } + + fn get_cell_data_hash(&self, _out_point: &OutPoint) -> Option { + unreachable!() + } +} + +impl HeaderProvider for Storage { + fn get_header(&self, hash: &Byte32) -> Option { + self.get(Key::BlockHash(hash).into_vec()) + .map(|v| { + v.map(|v| { + Header::from_slice(&v[..Header::TOTAL_SIZE]) + .expect("stored Header") + .into_view() + }) + }) + .expect("db get should be ok") + } +} diff --git a/light-client-lib/src/storage/mod.rs b/light-client-lib/src/storage/mod.rs index 60b262ea..6411a9b8 100644 --- a/light-client-lib/src/storage/mod.rs +++ b/light-client-lib/src/storage/mod.rs @@ -15,16 +15,13 @@ use ckb_types::{ H256, }; -mod db; +pub mod db; -#[cfg(target_arch = "wasm32")] -pub use db::{Batch, CursorDirection, Storage}; - -#[cfg(not(target_arch = "wasm32"))] pub use db::{Batch, Storage}; use crate::{ protocols::{Peers, PendingTxs}, + storage::db::StorageHighLevelOperations, types::RwLock, }; @@ -120,14 +117,14 @@ impl FilterDataProvider for WrappedBlockView<'_> { } #[derive(Clone)] -pub struct StorageWithChainData { - pub(crate) storage: Storage, +pub struct StorageWithChainData { + pub(crate) storage: S, pub(crate) peers: Arc, pending_txs: Arc>, } -impl StorageWithChainData { - pub fn new(storage: Storage, peers: Arc, pending_txs: Arc>) -> Self { +impl StorageWithChainData { + pub fn new(storage: S, peers: Arc, pending_txs: Arc>) -> Self { Self { storage, peers, @@ -135,7 +132,7 @@ impl StorageWithChainData { } } - pub fn storage(&self) -> &Storage { + pub fn storage(&self) -> &S { &self.storage } @@ -203,7 +200,7 @@ impl StorageWithChainData { } } -impl CellProvider for StorageWithChainData { +impl CellProvider for StorageWithChainData { fn cell(&self, out_point: &OutPoint, eager_load: bool) -> CellStatus { match self.storage.cell(out_point, eager_load) { CellStatus::Live(cell_meta) => CellStatus::Live(cell_meta), @@ -216,7 +213,7 @@ impl CellProvider for StorageWithChainData { } #[cfg(target_arch = "wasm32")] -impl CellDataProvider for StorageWithChainData { +impl CellDataProvider for StorageWithChainData { fn get_cell_data(&self, _out_point: &OutPoint) -> Option { unreachable!() } @@ -227,7 +224,9 @@ impl CellDataProvider for StorageWithChainData { } #[cfg(not(target_arch = "wasm32"))] -impl CellDataProvider for StorageWithChainData { +impl CellDataProvider + for StorageWithChainData +{ fn get_cell_data(&self, out_point: &OutPoint) -> Option { self.storage.get_cell_data(out_point) } @@ -237,7 +236,7 @@ impl CellDataProvider for StorageWithChainData { } } -impl HeaderProvider for StorageWithChainData { +impl HeaderProvider for StorageWithChainData { fn get_header(&self, hash: &Byte32) -> Option { self.storage .get_header(hash) @@ -245,7 +244,7 @@ impl HeaderProvider for StorageWithChainData { } } -impl ExtensionProvider for StorageWithChainData { +impl ExtensionProvider for StorageWithChainData { fn get_block_extension(&self, hash: &Byte32) -> Option { self.storage .get(Key::BlockHash(hash).into_vec()) @@ -266,7 +265,7 @@ impl ExtensionProvider for StorageWithChainData { } } -impl HeaderFieldsProvider for StorageWithChainData { +impl HeaderFieldsProvider for StorageWithChainData { fn get_header_fields(&self, hash: &Byte32) -> Option { self.get_header(hash).map(|header| HeaderFields { hash: header.hash(), diff --git a/light-client-lib/src/tests/prelude.rs b/light-client-lib/src/tests/prelude.rs index 2fb2e19a..222b434c 100644 --- a/light-client-lib/src/tests/prelude.rs +++ b/light-client-lib/src/tests/prelude.rs @@ -19,6 +19,7 @@ use ckb_types::{ }; use log::{error, info}; +use crate::storage::db::StorageGeneralOperations; use crate::{ protocols::{ FilterProtocol, LastState, LightClientProtocol, Peers, ProveRequest, SyncProtocol, diff --git a/light-client-lib/src/tests/protocols/block_filter.rs b/light-client-lib/src/tests/protocols/block_filter.rs index 12cd0c35..25125ad7 100644 --- a/light-client-lib/src/tests/protocols/block_filter.rs +++ b/light-client-lib/src/tests/protocols/block_filter.rs @@ -12,6 +12,8 @@ use ckb_types::{ H256, U256, }; +use crate::storage::db::StorageGeneralOperations; +use crate::storage::db::StorageHighLevelOperations; use crate::storage::SetScriptsCommand; use crate::storage::{ScriptStatus, ScriptType}; use crate::{ diff --git a/light-client-lib/src/tests/protocols/light_client/mod.rs b/light-client-lib/src/tests/protocols/light_client/mod.rs index 58d54179..421ae7bb 100644 --- a/light-client-lib/src/tests/protocols/light_client/mod.rs +++ b/light-client-lib/src/tests/protocols/light_client/mod.rs @@ -15,6 +15,7 @@ use crate::{ }, PeerState, BAD_MESSAGE_BAN_TIME, }, + storage::db::StorageGeneralOperations, tests::{ prelude::*, utils::{setup, MockChain, MockNetworkContext}, diff --git a/light-client-lib/src/tests/protocols/light_client/send_last_state_proof.rs b/light-client-lib/src/tests/protocols/light_client/send_last_state_proof.rs index acafebe8..d1cb2e53 100644 --- a/light-client-lib/src/tests/protocols/light_client/send_last_state_proof.rs +++ b/light-client-lib/src/tests/protocols/light_client/send_last_state_proof.rs @@ -1,12 +1,8 @@ use std::{cmp, sync::Arc}; -use ckb_network::{CKBProtocolHandler, PeerIndex, SupportProtocols}; -use ckb_types::{ - core::BlockNumber, packed, prelude::*, utilities::merkle_mountain_range::VerifiableHeader, - H256, U256, +use crate::storage::db::{ + StorageGeneralOperations, StorageGetPinnedRelatedOperations, StorageHighLevelOperations, }; -use log::debug; - use crate::{ protocols::{light_client::prelude::*, LastState, ProveRequest, ProveState, StatusCode}, tests::{ @@ -14,6 +10,12 @@ use crate::{ utils::{setup, MockChain, MockNetworkContext}, }, }; +use ckb_network::{CKBProtocolHandler, PeerIndex, SupportProtocols}; +use ckb_types::{ + core::BlockNumber, packed, prelude::*, utilities::merkle_mountain_range::VerifiableHeader, + H256, U256, +}; +use log::debug; fn sampling_between(start_number: BlockNumber, boundary_number: BlockNumber) -> Vec { let mut sampled_numbers = Vec::new(); diff --git a/light-client-lib/src/tests/protocols/light_client/send_transactions_proof.rs b/light-client-lib/src/tests/protocols/light_client/send_transactions_proof.rs index 4b2b1934..2dd83d07 100644 --- a/light-client-lib/src/tests/protocols/light_client/send_transactions_proof.rs +++ b/light-client-lib/src/tests/protocols/light_client/send_transactions_proof.rs @@ -14,6 +14,7 @@ use crate::{ light_client::constant::{FETCH_HEADER_TX_TOKEN, REFRESH_PEERS_TOKEN}, FetchInfo, StatusCode, }, + storage::db::StorageGeneralOperations, tests::{ prelude::*, utils::{MockChain, MockNetworkContext}, diff --git a/light-client-lib/src/tests/protocols/synchronizer.rs b/light-client-lib/src/tests/protocols/synchronizer.rs index 35d55e57..660ba860 100644 --- a/light-client-lib/src/tests/protocols/synchronizer.rs +++ b/light-client-lib/src/tests/protocols/synchronizer.rs @@ -7,8 +7,9 @@ use ckb_types::{ prelude::*, }; +use crate::storage::db::StorageHighLevelOperations; use crate::{ - storage::{ScriptStatus, ScriptType}, + storage::{db::StorageGeneralOperations, ScriptStatus, ScriptType}, tests::{ prelude::*, utils::{MockChain, MockNetworkContext}, diff --git a/light-client-lib/src/tests/storage.rs b/light-client-lib/src/tests/storage.rs index c14875a7..240acbe1 100644 --- a/light-client-lib/src/tests/storage.rs +++ b/light-client-lib/src/tests/storage.rs @@ -1,11 +1,13 @@ use ckb_types::packed::Script; use crate::storage; +use crate::storage::db::{ + StorageGeneralOperations, StorageGetPinnedRelatedOperations, StorageHighLevelOperations, +}; use crate::tests::utils::new_storage; - #[test] fn test_forget_update_min_filtred_number() { - let storage = new_storage("forget_update_min_filtred_block"); + let (storage, _tmpdir) = new_storage("forget_update_min_filtred_block"); storage.update_min_filtered_block_number(66); storage.update_filter_scripts( vec![ diff --git a/light-client-lib/src/tests/utils/chain.rs b/light-client-lib/src/tests/utils/chain.rs index def7d831..fa03deb4 100644 --- a/light-client-lib/src/tests/utils/chain.rs +++ b/light-client-lib/src/tests/utils/chain.rs @@ -10,14 +10,16 @@ use ckb_shared::{Shared, SharedBuilder}; use ckb_types::{core, prelude::*}; use crate::{ - storage::Storage, + storage::{db::StorageBatchRelatedOperations, Storage}, tests::{prelude::*, ALWAYS_SUCCESS_SCRIPT}, }; +use tempfile::TempDir; /// Mock a chain without starting services. pub(crate) struct MockChain { storage: Storage, consensus: Consensus, + tempdir: TempDir, } /// Mock a chain and start its services. @@ -25,6 +27,8 @@ pub(crate) struct MockRunningChain { storage: Storage, chain_controller: ChainController, shared: Shared, + #[allow(dead_code)] + tempdir: TempDir, } impl ChainExt for MockChain { @@ -66,7 +70,11 @@ impl MockChain { .build_consensus() .expect("build consensus should be OK"); storage.init_genesis_block(consensus.genesis_block().data()); - MockChain { storage, consensus } + MockChain { + storage, + consensus, + tempdir: tmp_dir, + } } pub(crate) fn new_with_default_pow(prefix: &str) -> Self { @@ -81,7 +89,11 @@ impl MockChain { } pub(crate) fn start(self) -> MockRunningChain { - let Self { storage, consensus } = self; + let Self { + storage, + consensus, + tempdir, + } = self; let config = BlockAssemblerConfig { // always success @@ -114,6 +126,7 @@ impl MockChain { storage, chain_controller, shared, + tempdir, } } } diff --git a/light-client-lib/src/tests/utils/mod.rs b/light-client-lib/src/tests/utils/mod.rs index 27564d3a..d35c95a4 100644 --- a/light-client-lib/src/tests/utils/mod.rs +++ b/light-client-lib/src/tests/utils/mod.rs @@ -6,6 +6,7 @@ mod network_context; pub(crate) use chain::MockChain; pub(crate) use network_context::MockNetworkContext; +use tempfile::TempDir; use crate::storage::Storage; @@ -19,7 +20,7 @@ pub(crate) fn setup() { println!(); } -pub(crate) fn new_storage(prefix: &str) -> Storage { +pub(crate) fn new_storage(prefix: &str) -> (Storage, TempDir) { let tmp_dir = tempfile::Builder::new().prefix(prefix).tempdir().unwrap(); - Storage::new(tmp_dir.path().to_str().unwrap()) + (Storage::new(tmp_dir.path().to_str().unwrap()), tmp_dir) } diff --git a/light-client-lib/src/tests/verify.rs b/light-client-lib/src/tests/verify.rs index 1b6863d5..772653d1 100644 --- a/light-client-lib/src/tests/verify.rs +++ b/light-client-lib/src/tests/verify.rs @@ -6,8 +6,12 @@ use ckb_types::{ prelude::{IntoHeaderView, IntoTransactionView as _}, }; +use crate::storage::db::StorageHighLevelOperations; use crate::{ - storage::{ScriptStatus, ScriptType, StorageWithChainData}, + storage::{ + db::{StorageBatchRelatedOperations, StorageGetPinnedRelatedOperations}, + ScriptStatus, ScriptType, StorageWithChainData, + }, tests::{prelude::*, utils::MockChain}, verify::verify_tx, }; diff --git a/light-client-rpc/Cargo.toml b/light-client-rpc/Cargo.toml new file mode 100644 index 00000000..73df9288 --- /dev/null +++ b/light-client-rpc/Cargo.toml @@ -0,0 +1,30 @@ +[package] +edition = "2024" +name = "ckb-light-client-rpc" +version = "0.5.4" + +[lib] +crate-type = ["rlib"] + +[dependencies] +ckb-jsonrpc-types = "1" +ckb-types = "1" +ckb-traits = "1" +ckb-chain-spec = "1" +ckb-network = "1" +ckb-systemtime = "1" +log = "0.4" + +ckb-light-client-lib = {path = "../light-client-lib", default-features = false} +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +rocksdb = {package = "ckb-rocksdb", version = "=0.21.1", features = [ + "snappy", +], default-features = false, optional = true} +rusqlite = {version = "0.38.0", features = ["bundled"], optional = true} + +[features] +default = ["rocksdb"] +march-native = ["rocksdb/march-native"] +portable = ["rocksdb/portable"] +rocksdb = ["dep:rocksdb", "ckb-light-client-lib/rocksdb"] +rusqlite = ["ckb-light-client-lib/rusqlite", "dep:rusqlite"] diff --git a/light-client-bin/src/rpc.rs b/light-client-rpc/src/lib.rs similarity index 63% rename from light-client-bin/src/rpc.rs rename to light-client-rpc/src/lib.rs index 6522057f..6a9d2f71 100644 --- a/light-client-bin/src/rpc.rs +++ b/light-client-rpc/src/lib.rs @@ -1,55 +1,43 @@ -use std::{ - net::ToSocketAddrs, - sync::{Arc, RwLock}, -}; - -use jsonrpc_core::{Error, IoHandler, Result}; -use jsonrpc_derive::rpc; -use jsonrpc_http_server::{Server, ServerBuilder}; -use jsonrpc_server_utils::cors::AccessControlAllowOrigin; -use jsonrpc_server_utils::hosts::DomainsValidation; - -use ckb_light_client_lib::{ - protocols::{Peers, PendingTxs}, - service::{ - Cell, CellType, CellsCapacity, FetchStatus, LocalNode, LocalNodeProtocol, Order, - Pagination, PeerSyncState, RemoteNode, ScriptStatus, ScriptType, SearchKey, - SetScriptsCommand, Status, TransactionWithStatus, Tx, TxStatus, TxWithCell, TxWithCells, - }, - storage::{ - self, extract_raw_data, Key, KeyPrefix, Storage, StorageWithChainData, LAST_STATE_KEY, - }, - verify::verify_tx, -}; +use std::sync::Arc; use ckb_chain_spec::consensus::Consensus; use ckb_jsonrpc_types::{ BlockView, EstimateCycles, HeaderView, JsonBytes, NodeAddress, RemoteNodeProtocol, Transaction, Uint32, }; -use ckb_network::{extract_peer_id, NetworkController}; +use ckb_light_client_lib::error::{Error, Result}; +use ckb_light_client_lib::protocols::Peers; +use ckb_light_client_lib::service::{ + Cell, CellType, CellsCapacity, FetchStatus, LocalNode, LocalNodeProtocol, Order, Pagination, + PeerSyncState, RemoteNode, ScriptStatus, ScriptType, SearchKey, SetScriptsCommand, Status, + TransactionWithStatus, Tx, TxStatus, TxWithCell, TxWithCells, +}; +use ckb_light_client_lib::storage::db::{ + GeneralDirection, StorageGeneralOperations, StorageGetPinnedRelatedOperations, + StorageHighLevelOperations, +}; +use ckb_light_client_lib::storage::extract_raw_data; +use ckb_light_client_lib::storage::{self, Key, KeyPrefix, LAST_STATE_KEY, StorageWithChainData}; +use ckb_light_client_lib::verify::verify_tx; +use ckb_network::{NetworkController, extract_peer_id}; use ckb_systemtime::unix_time_as_millis; +use ckb_traits::CellDataProvider; use ckb_traits::HeaderProvider; -use ckb_types::{core, packed, prelude::*, H256}; -use rocksdb::{ - ops::{Get, Iterate}, - Direction, IteratorMode, +use ckb_types::prelude::IntoTransactionView; +use ckb_types::prelude::Reader; +use ckb_types::prelude::{ + Entity, FromSliceShouldBeOk, IntoBlockView, IntoHeaderView, Pack, Unpack, }; - -#[rpc(server)] -pub trait BlockFilterRpc { - /// curl http://localhost:9000/ -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0", "method":"set_scripts", "params": [{"script": {"code_hash": "0x9bd7e06f3ecf4be0f2fcd2188b23f1b9fcc88e5d4b65a8637b17723bbda3cce8", "hash_type": "type", "args": "0x50878ce52a68feb47237c29574d82288f58b5d21"}, "block_number": "0x59F74D"}], "id": 1}' - #[rpc(name = "set_scripts")] +use ckb_types::{H256, packed}; +pub trait BlockFilterRpcMethods { fn set_scripts( &self, scripts: Vec, command: Option, ) -> Result<()>; - #[rpc(name = "get_scripts")] fn get_scripts(&self) -> Result>; - #[rpc(name = "get_cells")] fn get_cells( &self, search_key: SearchKey, @@ -58,7 +46,6 @@ pub trait BlockFilterRpc { after: Option, ) -> Result>; - #[rpc(name = "get_transactions")] fn get_transactions( &self, search_key: SearchKey, @@ -67,69 +54,342 @@ pub trait BlockFilterRpc { after: Option, ) -> Result>; - #[rpc(name = "get_cells_capacity")] fn get_cells_capacity(&self, search_key: SearchKey) -> Result; } -#[rpc(server)] -pub trait TransactionRpc { - #[rpc(name = "send_transaction")] +pub trait TransactionRpcMethods { fn send_transaction(&self, tx: Transaction) -> Result; - - #[rpc(name = "get_transaction")] fn get_transaction(&self, tx_hash: H256) -> Result; - - #[rpc(name = "fetch_transaction")] fn fetch_transaction(&self, tx_hash: H256) -> Result>; } -#[rpc(server)] -pub trait ChainRpc { - #[rpc(name = "get_tip_header")] +pub trait ChainRpcMethods { fn get_tip_header(&self) -> Result; - - #[rpc(name = "get_genesis_block")] fn get_genesis_block(&self) -> Result; - - #[rpc(name = "get_header")] fn get_header(&self, block_hash: H256) -> Result>; - - #[rpc(name = "fetch_header")] - fn fetch_header(&self, block_hash: H256) -> Result>; - - #[rpc(name = "estimate_cycles")] + fn fetch_header( + &self, + block_hash: H256, + ) -> Result>; fn estimate_cycles(&self, tx: Transaction) -> Result; } -#[rpc(server)] -pub trait NetRpc { - #[rpc(name = "local_node_info")] +pub trait NetRpcMethods { fn local_node_info(&self) -> Result; - - #[rpc(name = "get_peers")] fn get_peers(&self) -> Result>; } -pub struct BlockFilterRpcImpl { - pub(crate) swc: StorageWithChainData, +pub struct BlockFilterRpcImpl { + pub swc: StorageWithChainData, } -pub struct TransactionRpcImpl { - pub(crate) swc: StorageWithChainData, - pub(crate) consensus: Arc, +pub struct TransactionRpcImpl< + S: StorageHighLevelOperations + + StorageGetPinnedRelatedOperations + + CellDataProvider + + Send + + Sync + + Clone + + 'static, +> { + pub swc: StorageWithChainData, + pub consensus: Arc, } -pub struct ChainRpcImpl { - pub(crate) swc: StorageWithChainData, - pub(crate) consensus: Arc, +pub struct ChainRpcImpl< + S: StorageHighLevelOperations + + StorageGeneralOperations + + StorageGetPinnedRelatedOperations + + CellDataProvider + + Send + + Sync + + Clone + + 'static, +> { + pub swc: StorageWithChainData, + pub consensus: Arc, } pub struct NetRpcImpl { - network_controller: NetworkController, - peers: Arc, + pub network_controller: NetworkController, + pub peers: Arc, +} + +const MAX_ADDRS: usize = 50; + +impl NetRpcMethods for NetRpcImpl { + fn local_node_info(&self) -> Result { + Ok(LocalNode { + version: self.network_controller.version().to_owned(), + node_id: self.network_controller.node_id(), + active: self.network_controller.is_active(), + addresses: self + .network_controller + .public_urls(MAX_ADDRS) + .into_iter() + .map(|(address, score)| NodeAddress { + address, + score: u64::from(score).into(), + }) + .collect(), + protocols: self + .network_controller + .protocols() + .into_iter() + .map(|(protocol_id, name, support_versions)| LocalNodeProtocol { + id: (protocol_id.value() as u64).into(), + name, + support_versions, + }) + .collect::>(), + connections: (self.network_controller.connected_peers().len() as u64).into(), + }) + } + + fn get_peers(&self) -> Result> { + let peers: Vec = self + .network_controller + .connected_peers() + .iter() + .map(|(peer_index, peer)| { + let mut addresses = vec![&peer.connected_addr]; + addresses.extend(peer.listened_addrs.iter()); + + let node_addresses = addresses + .iter() + .map(|addr| { + let score = self + .network_controller + .addr_info(addr) + .map(|addr_info| addr_info.score) + .unwrap_or(1); + let non_negative_score = if score > 0 { score as u64 } else { 0 }; + NodeAddress { + address: addr.to_string(), + score: non_negative_score.into(), + } + }) + .collect(); + + RemoteNode { + version: peer + .identify_info + .as_ref() + .map(|info| info.client_version.clone()) + .unwrap_or_else(|| "unknown".to_string()), + node_id: extract_peer_id(&peer.connected_addr) + .map(|peer_id| peer_id.to_base58()) + .unwrap_or_default(), + addresses: node_addresses, + connected_duration: (ckb_light_client_lib::types::Instant::now() + .saturating_duration_since(peer.connected_time) + .as_millis() as u64) + .into(), + sync_state: self.peers.get_state(peer_index).map(|state| PeerSyncState { + requested_best_known_header: state + .get_prove_request() + .map(|request| request.get_last_header().header().to_owned().into()), + proved_best_known_header: state + .get_prove_state() + .map(|request| request.get_last_header().header().to_owned().into()), + }), + protocols: peer + .protocols + .iter() + .map(|(protocol_id, protocol_version)| RemoteNodeProtocol { + id: (protocol_id.value() as u64).into(), + version: protocol_version.clone(), + }) + .collect(), + } + }) + .collect(); + Ok(peers) + } } -impl BlockFilterRpc for BlockFilterRpcImpl { +impl< + S: StorageHighLevelOperations + + StorageGetPinnedRelatedOperations + + CellDataProvider + + Send + + Sync + + Clone + + 'static, +> TransactionRpcMethods for TransactionRpcImpl +{ + fn send_transaction(&self, tx: Transaction) -> Result { + let tx: packed::Transaction = tx.into(); + let tx = tx.into_view(); + let cycles = verify_tx( + tx.clone(), + &self.swc, + Arc::clone(&self.consensus), + &self.swc.storage().get_last_state().1.into_view(), + ) + .map_err(|e| Error::runtime(format!("invalid transaction: {:?}", e)))?; + #[cfg(target_arch = "wasm32")] + self.swc + .pending_txs() + .blocking_write() + .push(tx.clone(), cycles); + + #[cfg(not(target_arch = "wasm32"))] + self.swc + .pending_txs() + .write() + .expect("pending_txs lock is poisoned") + .push(tx.clone(), cycles); + + Ok(tx.hash().unpack()) + } + + fn get_transaction(&self, tx_hash: H256) -> Result { + if let Some((transaction, header)) = self + .swc + .storage() + .get_transaction_with_header(&tx_hash.pack()) + { + return Ok(TransactionWithStatus { + transaction: Some(transaction.into_view().into()), + cycles: None, + tx_status: TxStatus { + block_hash: Some(header.into_view().hash().unpack()), + status: Status::Committed, + }, + }); + } + + #[cfg(not(target_arch = "wasm32"))] + let pending_tx = self + .swc + .pending_txs() + .read() + .expect("pending_txs lock is poisoned") + .get(&tx_hash.pack()); + #[cfg(target_arch = "wasm32")] + let pending_tx = self.swc.pending_txs().blocking_read().get(&tx_hash.pack()); + if let Some((transaction, cycles, _)) = pending_tx { + return Ok(TransactionWithStatus { + transaction: Some(transaction.into_view().into()), + cycles: Some(cycles.into()), + tx_status: TxStatus { + block_hash: None, + status: Status::Pending, + }, + }); + } + + Ok(TransactionWithStatus { + transaction: None, + cycles: None, + tx_status: TxStatus { + block_hash: None, + status: Status::Unknown, + }, + }) + } + + fn fetch_transaction(&self, tx_hash: H256) -> Result> { + let tws = self.get_transaction(tx_hash.clone())?; + if tws.transaction.is_some() { + return Ok(FetchStatus::Fetched { data: tws }); + } + + let now = unix_time_as_millis(); + if let Some((added_ts, first_sent, missing)) = self.swc.get_tx_fetch_info(&tx_hash) { + if missing { + // re-fetch the transaction + self.swc.add_fetch_tx(tx_hash, now); + return Ok(FetchStatus::NotFound); + } else if first_sent > 0 { + return Ok(FetchStatus::Fetching { + first_sent: first_sent.into(), + }); + } else { + return Ok(FetchStatus::Added { + timestamp: added_ts.into(), + }); + } + } else { + self.swc.add_fetch_tx(tx_hash, now); + } + Ok(FetchStatus::Added { + timestamp: now.into(), + }) + } +} + +impl< + S: StorageHighLevelOperations + + StorageGeneralOperations + + StorageGetPinnedRelatedOperations + + CellDataProvider + + Send + + Sync + + Clone + + 'static, +> ChainRpcMethods for ChainRpcImpl +{ + fn get_tip_header(&self) -> Result { + Ok(self.swc.storage().get_tip_header().into_view().into()) + } + + fn get_genesis_block(&self) -> Result { + Ok(self.swc.storage().get_genesis_block().into_view().into()) + } + + fn get_header(&self, block_hash: H256) -> Result> { + Ok(self.swc.get_header(&block_hash.pack()).map(Into::into)) + } + + fn fetch_header(&self, block_hash: H256) -> Result> { + if let Some(value) = self.swc.storage().get_header(&block_hash.pack()) { + return Ok(FetchStatus::Fetched { data: value.into() }); + } + let now = unix_time_as_millis(); + if let Some((added_ts, first_sent, missing)) = self.swc.get_header_fetch_info(&block_hash) { + if missing { + // re-fetch the header + self.swc.add_fetch_header(block_hash, now); + return Ok(FetchStatus::NotFound); + } else if first_sent > 0 { + return Ok(FetchStatus::Fetching { + first_sent: first_sent.into(), + }); + } else { + return Ok(FetchStatus::Added { + timestamp: added_ts.into(), + }); + } + } else { + self.swc.add_fetch_header(block_hash, now); + } + Ok(FetchStatus::Added { + timestamp: now.into(), + }) + } + + fn estimate_cycles(&self, tx: Transaction) -> Result { + let tx: packed::Transaction = tx.into(); + let tx = tx.into_view(); + let cycles = verify_tx( + tx.clone(), + &self.swc, + Arc::clone(&self.consensus), + &self.swc.storage().get_last_state().1.into_view(), + ) + .map_err(|e| Error::runtime(format!("invalid transaction: {:?}", e)))?; + Ok(EstimateCycles { + cycles: cycles.into(), + }) + } +} + +impl BlockFilterRpcMethods + for BlockFilterRpcImpl +{ + #[cfg(not(target_arch = "wasm32"))] fn set_scripts( &self, scripts: Vec, @@ -148,6 +408,20 @@ impl BlockFilterRpc for BlockFilterRpcImpl { rx.recv().unwrap(); Ok(()) } + #[cfg(target_arch = "wasm32")] + fn set_scripts( + &self, + scripts: Vec, + command: Option, + ) -> Result<()> { + let mut matched_blocks = self.swc.matched_blocks().blocking_write(); + self.swc.storage().update_filter_scripts( + scripts.into_iter().map(Into::into).collect(), + command.map(Into::into).unwrap_or_default(), + ); + matched_blocks.clear(); + Ok(()) + } fn get_scripts(&self) -> Result> { let scripts = self.swc.storage().get_filter_scripts(); @@ -168,15 +442,17 @@ impl BlockFilterRpc for BlockFilterRpcImpl { order, after_cursor, )?; + let limit = limit.value() as usize; if limit == 0 { - return Err(Error::invalid_params("limit should be greater than 0")); + return Err(Error::runtime("limit should be greater than 0")); } let with_data = search_key.with_data.unwrap_or(true); let filter_script_type = match search_key.script_type { ScriptType::Lock => ScriptType::Type, ScriptType::Type => ScriptType::Lock, }; + let ( filter_prefix, filter_script_len_range, @@ -184,38 +460,44 @@ impl BlockFilterRpc for BlockFilterRpcImpl { filter_output_capacity_range, filter_block_range, ) = build_filter_options(search_key)?; - let mode = IteratorMode::From(from_key.as_ref(), direction); - let snapshot = self.swc.storage().snapshot(); - let iter = snapshot.iterator(mode).skip(skip); - let mut last_key = Vec::new(); - let cells = iter - .take_while(|(key, _value)| key.starts_with(&prefix)) - .filter_map(|(key, value)| { - let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); - let output_index = u32::from_be_bytes( - key[key.len() - 4..] - .try_into() - .expect("stored output_index"), - ); - let tx_index = u32::from_be_bytes( - key[key.len() - 8..key.len() - 4] - .try_into() - .expect("stored tx_index"), - ); - let block_number = u64::from_be_bytes( - key[key.len() - 16..key.len() - 8] - .try_into() - .expect("stored block_number"), - ); + fn extract_data_from_key(key: &[u8]) -> (u32, u32, u64) { + let output_index = u32::from_be_bytes( + key[key.len() - 4..] + .try_into() + .expect("stored output_index"), + ); + let tx_index = u32::from_be_bytes( + key[key.len() - 8..key.len() - 4] + .try_into() + .expect("stored tx_index"), + ); + let block_number = u64::from_be_bytes( + key[key.len() - 16..key.len() - 8] + .try_into() + .expect("stored block_number"), + ); + (output_index, tx_index, block_number) + } - let tx = packed::Transaction::from_slice( - &snapshot - .get(Key::TxHash(&tx_hash).into_vec()) - .expect("get tx should be OK") - .expect("stored tx")[12..], - ) - .expect("from stored tx slice should be OK"); + let storage = self.swc.storage(); + let storage_cloned = storage.clone(); + let kvs: Vec<_> = storage.collect_iterator( + from_key.clone(), + direction, + Box::new(move |key| key.starts_with(&prefix)), + Box::new(move |key| { + let value = storage_cloned.get(key).unwrap().unwrap(); + let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); + trace!("get cells iterator at {:?} {:?}", key, value); + let (output_index, _tx_index, block_number) = extract_data_from_key(key); + let tx_data = &storage_cloned + .get(Key::TxHash(&tx_hash).into_vec()) + .unwrap() + .expect("stored tx")[12..]; + trace!("tx hash = {:?}, tx data = {:?}", tx_hash, tx_data); + let tx = packed::Transaction::from_slice(tx_data) + .expect("from stored tx slice should be OK"); let output = tx .raw() .outputs() @@ -234,6 +516,7 @@ impl BlockFilterRpc for BlockFilterRpcImpl { .as_slice() .starts_with(prefix) { + trace!("skipped at {}", line!()); return None; } } @@ -243,6 +526,7 @@ impl BlockFilterRpc for BlockFilterRpcImpl { .as_slice() .starts_with(prefix) { + trace!("skipped at {}", line!()); return None; } } @@ -254,6 +538,7 @@ impl BlockFilterRpc for BlockFilterRpcImpl { ScriptType::Lock => { let script_len = extract_raw_data(&output.lock()).len(); if script_len < r0 || script_len > r1 { + trace!("skipped at {}", line!()); return None; } } @@ -264,48 +549,82 @@ impl BlockFilterRpc for BlockFilterRpcImpl { .map(|script| extract_raw_data(&script).len()) .unwrap_or_default(); if script_len < r0 || script_len > r1 { + trace!("skipped at {}", line!()); return None; } } } } - if let Some([r0, r1]) = filter_output_data_len_range { - if output_data.len() < r0 || output_data.len() >= r1 { - return None; - } + if let Some([r0, r1]) = filter_output_data_len_range + && (output_data.len() < r0 || output_data.len() >= r1) + { + trace!("skipped at {}", line!()); + return None; } if let Some([r0, r1]) = filter_output_capacity_range { let capacity: core::Capacity = output.capacity().unpack(); if capacity < r0 || capacity >= r1 { + trace!("skipped at {}", line!()); return None; } } - if let Some([r0, r1]) = filter_block_range { - if block_number < r0 || block_number >= r1 { - return None; - } + if let Some([r0, r1]) = filter_block_range + && (block_number < r0 || block_number >= r1) + { + trace!("skipped at {}", line!()); + return None; } + Some(key.to_vec()) + }), + limit, + skip, + ); + trace!("get_cells: collect_iterator done"); + let mut cells = Vec::new(); + let mut last_key = Vec::new(); + for (key, value) in kvs.into_iter() { + let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); + let (output_index, tx_index, block_number) = extract_data_from_key(&key); + let tx = packed::Transaction::from_slice( + &storage + .get(Key::TxHash(&tx_hash).into_vec()) + .unwrap() + .expect("stored tx")[12..], + ) + .expect("from stored tx slice should be OK"); + let output = tx + .raw() + .outputs() + .get(output_index as usize) + .expect("get output by index should be OK"); + let output_data = tx + .raw() + .outputs_data() + .get(output_index as usize) + .expect("get output data by index should be OK"); + + last_key = key.to_vec(); + let cell_to_push = Cell { + output: output.into(), + output_data: if with_data { + Some(output_data.into()) + } else { + None + }, + out_point: packed::OutPoint::new(tx_hash, output_index).into(), + block_number: block_number.into(), + tx_index: tx_index.into(), + }; + cells.push(cell_to_push); + if cells.len() >= limit { + break; + } + } - last_key = key.to_vec(); - - Some(Cell { - output: output.into(), - output_data: if with_data { - Some(output_data.into()) - } else { - None - }, - out_point: packed::OutPoint::new(tx_hash, output_index).into(), - block_number: block_number.into(), - tx_index: tx_index.into(), - }) - }) - .take(limit) - .collect::>(); - + trace!("get_cells last_key={:?}", last_key); Ok(Pagination { objects: cells, last_cursor: JsonBytes::from_vec(last_key), @@ -326,19 +645,25 @@ impl BlockFilterRpc for BlockFilterRpcImpl { order, after_cursor, )?; + let limit = limit.value() as usize; if limit == 0 { - return Err(Error::invalid_params("limit should be greater than 0")); + return Err(Error::runtime("limit should be greater than 0")); } + let filter_script_type = match search_key.script_type { + ScriptType::Lock => ScriptType::Type, + ScriptType::Type => ScriptType::Lock, + }; + let (filter_script, filter_block_range) = if let Some(filter) = search_key.filter.as_ref() { if filter.output_data_len_range.is_some() { - return Err(Error::invalid_params( + return Err(Error::runtime( "doesn't support search_key.filter.output_data_len_range parameter", )); } if filter.output_capacity_range.is_some() { - return Err(Error::invalid_params( + return Err(Error::runtime( "doesn't support search_key.filter.output_capacity_range parameter", )); } @@ -351,143 +676,148 @@ impl BlockFilterRpc for BlockFilterRpcImpl { (None, None) }; - let filter_script_type = match search_key.script_type { - ScriptType::Lock => ScriptType::Type, - ScriptType::Type => ScriptType::Lock, - }; - - let mode = IteratorMode::From(from_key.as_ref(), direction); - let snapshot = self.swc.storage().snapshot(); - let iter = snapshot.iterator(mode).skip(skip); + let storage = self.swc.storage(); if search_key.group_by_transaction.unwrap_or_default() { + let prefix_cloned = prefix.clone(); + let mut kvs: Vec<_> = storage.collect_iterator( + from_key, + direction, + Box::new(move |key| key.starts_with(&prefix_cloned)), + Box::new(move |key| Some(key.to_vec())), + 100, + skip, + ); let mut tx_with_cells: Vec = Vec::new(); let mut last_key = Vec::new(); - for (key, value) in iter.take_while(|(key, _value)| key.starts_with(&prefix)) { - let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); - if tx_with_cells.len() == limit - && tx_with_cells.last_mut().unwrap().transaction.hash != tx_hash.unpack() - { - break; - } - last_key = key.to_vec(); - let tx = packed::Transaction::from_slice( - &snapshot - .get(Key::TxHash(&tx_hash).into_vec()) - .expect("get tx should be OK") - .expect("stored tx")[12..], - ) - .expect("from stored tx slice should be OK"); + 'outer: while !kvs.is_empty() { + for (key, value) in kvs.into_iter() { + let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); + if tx_with_cells.len() == limit + && tx_with_cells.last_mut().unwrap().transaction.hash != tx_hash.unpack() + { + break 'outer; + } + last_key = key.to_vec(); + let tx = packed::Transaction::from_slice( + &storage + .get(Key::TxHash(&tx_hash).into_vec()) + .expect("get tx should be OK") + .expect("stored tx")[12..], + ) + .expect("from stored tx slice should be OK"); - let block_number = u64::from_be_bytes( - key[key.len() - 17..key.len() - 9] - .try_into() - .expect("stored block_number"), - ); - let tx_index = u32::from_be_bytes( - key[key.len() - 9..key.len() - 5] - .try_into() - .expect("stored tx_index"), - ); - let io_index = u32::from_be_bytes( - key[key.len() - 5..key.len() - 1] - .try_into() - .expect("stored io_index"), - ); - let io_type = if *key.last().expect("stored io_type") == 0 { - CellType::Input - } else { - CellType::Output - }; + let block_number = u64::from_be_bytes( + key[key.len() - 17..key.len() - 9] + .try_into() + .expect("stored block_number"), + ); + let tx_index = u32::from_be_bytes( + key[key.len() - 9..key.len() - 5] + .try_into() + .expect("stored tx_index"), + ); + let io_index = u32::from_be_bytes( + key[key.len() - 5..key.len() - 1] + .try_into() + .expect("stored io_index"), + ); + let io_type = if *key.last().expect("stored io_type") == 0 { + CellType::Input + } else { + CellType::Output + }; - if let Some(filter_script) = filter_script.as_ref() { - let filter_script_matched = match filter_script_type { - ScriptType::Lock => snapshot - .get( - Key::TxLockScript( - filter_script, - block_number, - tx_index, - io_index, - match io_type { - CellType::Input => storage::CellType::Input, - CellType::Output => storage::CellType::Output, - }, + if let Some(filter_script) = filter_script.as_ref() { + let filter_script_matched = match filter_script_type { + ScriptType::Lock => storage + .get( + Key::TxLockScript( + filter_script, + block_number, + tx_index, + io_index, + match io_type { + CellType::Input => storage::CellType::Input, + CellType::Output => storage::CellType::Output, + }, + ) + .into_vec(), ) - .into_vec(), - ) - .expect("get TxLockScript should be OK") - .is_some(), - ScriptType::Type => snapshot - .get( - Key::TxTypeScript( - filter_script, - block_number, - tx_index, - io_index, - match io_type { - CellType::Input => storage::CellType::Input, - CellType::Output => storage::CellType::Output, - }, + .expect("get TxLockScript should be OK") + .is_some(), + ScriptType::Type => storage + .get( + Key::TxTypeScript( + filter_script, + block_number, + tx_index, + io_index, + match io_type { + CellType::Input => storage::CellType::Input, + CellType::Output => storage::CellType::Output, + }, + ) + .into_vec(), ) - .into_vec(), - ) - .expect("get TxTypeScript should be OK") - .is_some(), - }; + .expect("get TxTypeScript should be OK") + .is_some(), + }; - if !filter_script_matched { - continue; + if !filter_script_matched { + continue; + } } - } - if let Some([r0, r1]) = filter_block_range { - if block_number < r0 || block_number >= r1 { + if let Some([r0, r1]) = filter_block_range + && (block_number < r0 || block_number >= r1) + { continue; } - } - let last_tx_hash_is_same = tx_with_cells - .last_mut() - .map(|last| { - if last.transaction.hash == tx_hash.unpack() { - last.cells.push((io_type.clone(), io_index.into())); - true - } else { - false - } - }) - .unwrap_or_default(); - - if !last_tx_hash_is_same { - tx_with_cells.push(TxWithCells { - transaction: tx.into_view().into(), - block_number: block_number.into(), - tx_index: tx_index.into(), - cells: vec![(io_type, io_index.into())], - }); + let last_tx_hash_is_same = tx_with_cells + .last_mut() + .map(|last| { + if last.transaction.hash == tx_hash.unpack() { + last.cells.push((io_type.clone(), io_index.into())); + true + } else { + false + } + }) + .unwrap_or_default(); + + if !last_tx_hash_is_same { + tx_with_cells.push(TxWithCells { + transaction: tx.into_view().into(), + block_number: block_number.into(), + tx_index: tx_index.into(), + cells: vec![(io_type, io_index.into())], + }); + } } + let prefix_cloned = prefix.clone(); + kvs = storage.collect_iterator( + last_key.clone(), + direction, + Box::new(move |key| key.starts_with(&prefix_cloned)), + Box::new(|k| Some(k.to_vec())), + 100, + 1, + ); } - Ok(Pagination { objects: tx_with_cells.into_iter().map(Tx::Grouped).collect(), last_cursor: JsonBytes::from_vec(last_key), }) } else { - let mut last_key = Vec::new(); - let txs = iter - .take_while(|(key, _value)| key.starts_with(&prefix)) - .filter_map(|(key, value)| { - let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); - let tx = packed::Transaction::from_slice( - &snapshot - .get(Key::TxHash(&tx_hash).into_vec()) - .expect("get tx should be OK") - .expect("stored tx")[12..], - ) - .expect("from stored tx slice should be OK"); - + let storage_cloned = storage.clone(); + let kvs: Vec<_> = storage.collect_iterator( + from_key.clone(), + direction, + Box::new(move |key| key.starts_with(&prefix)), + Box::new(move |key| { let block_number = u64::from_be_bytes( key[key.len() - 17..key.len() - 9] .try_into() @@ -512,7 +842,7 @@ impl BlockFilterRpc for BlockFilterRpcImpl { if let Some(filter_script) = filter_script.as_ref() { match filter_script_type { ScriptType::Lock => { - snapshot + storage_cloned .get( Key::TxLockScript( filter_script, @@ -529,7 +859,7 @@ impl BlockFilterRpc for BlockFilterRpcImpl { .expect("get TxLockScript should be OK")?; } ScriptType::Type => { - snapshot + storage_cloned .get( Key::TxTypeScript( filter_script, @@ -548,23 +878,65 @@ impl BlockFilterRpc for BlockFilterRpcImpl { } } - if let Some([r0, r1]) = filter_block_range { - if block_number < r0 || block_number >= r1 { - return None; - } + if let Some([r0, r1]) = filter_block_range + && (block_number < r0 || block_number >= r1) + { + return None; } - last_key = key.to_vec(); - Some(Tx::Ungrouped(TxWithCell { - transaction: tx.into_view().into(), - block_number: block_number.into(), - tx_index: tx_index.into(), - io_index: io_index.into(), - io_type, - })) - }) - .take(limit) - .collect::>(); + Some(key.to_vec()) + }), + limit, + skip, + ); + + let mut last_key = Vec::new(); + let mut txs = Vec::new(); + + for (key, value) in kvs.into_iter() { + let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); + let tx = packed::Transaction::from_slice( + &storage + .get(Key::TxHash(&tx_hash).into_vec()) + .expect("get tx should be OK") + .expect("stored tx")[12..], + ) + .expect("from stored tx slice should be OK"); + + let block_number = u64::from_be_bytes( + key[key.len() - 17..key.len() - 9] + .try_into() + .expect("stored block_number"), + ); + let tx_index = u32::from_be_bytes( + key[key.len() - 9..key.len() - 5] + .try_into() + .expect("stored tx_index"), + ); + let io_index = u32::from_be_bytes( + key[key.len() - 5..key.len() - 1] + .try_into() + .expect("stored io_index"), + ); + let io_type = if *key.last().expect("stored io_type") == 0 { + CellType::Input + } else { + CellType::Output + }; + + last_key = key.to_vec(); + let tx_to_push = Tx::Ungrouped(TxWithCell { + transaction: tx.into_view().into(), + block_number: block_number.into(), + tx_index: tx_index.into(), + io_index: io_index.into(), + io_type, + }); + txs.push(tx_to_push); + if txs.len() >= limit { + break; + } + } Ok(Pagination { objects: txs, @@ -592,13 +964,18 @@ impl BlockFilterRpc for BlockFilterRpcImpl { filter_output_capacity_range, filter_block_range, ) = build_filter_options(search_key)?; - let mode = IteratorMode::From(from_key.as_ref(), direction); - let snapshot = self.swc.storage().snapshot(); - let iter = snapshot.iterator(mode).skip(skip); - let capacity: u64 = iter - .take_while(|(key, _value)| key.starts_with(&prefix)) - .filter_map(|(key, value)| { + let storage = self.swc.storage(); + let storage_cloned = storage.clone(); + + log::trace!("get_cells_capacity: before entering collect iterator"); + + let kvs: Vec<_> = storage.collect_iterator( + from_key, + direction, + Box::new(move |key| key.starts_with(&prefix)), + Box::new(move |key| { + let value = storage_cloned.get(key).unwrap().unwrap(); let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); let output_index = u32::from_be_bytes( key[key.len() - 4..] @@ -612,7 +989,7 @@ impl BlockFilterRpc for BlockFilterRpcImpl { ); let tx = packed::Transaction::from_slice( - &snapshot + &storage_cloned .get(Key::TxHash(&tx_hash).into_vec()) .expect("get tx should be OK") .expect("stored tx")[12..], @@ -636,6 +1013,7 @@ impl BlockFilterRpc for BlockFilterRpcImpl { .as_slice() .starts_with(prefix) { + log::trace!("break at {}", line!()); return None; } } @@ -645,6 +1023,7 @@ impl BlockFilterRpc for BlockFilterRpcImpl { .as_slice() .starts_with(prefix) { + log::trace!("break at {}", line!()); return None; } } @@ -656,6 +1035,7 @@ impl BlockFilterRpc for BlockFilterRpcImpl { ScriptType::Lock => { let script_len = extract_raw_data(&output.lock()).len(); if script_len < r0 || script_len > r1 { + log::trace!("break at {}", line!()); return None; } } @@ -666,41 +1046,72 @@ impl BlockFilterRpc for BlockFilterRpcImpl { .map(|script| extract_raw_data(&script).len()) .unwrap_or_default(); if script_len < r0 || script_len > r1 { + log::trace!("break at {}", line!()); return None; } } } } - if let Some([r0, r1]) = filter_output_data_len_range { - if output_data.len() < r0 || output_data.len() >= r1 { - return None; - } + if let Some([r0, r1]) = filter_output_data_len_range + && (output_data.len() < r0 || output_data.len() >= r1) + { + log::trace!("break at {}", line!()); + return None; } if let Some([r0, r1]) = filter_output_capacity_range { let capacity: core::Capacity = output.capacity().unpack(); if capacity < r0 || capacity >= r1 { + log::trace!("break at {}", line!()); return None; } } - if let Some([r0, r1]) = filter_block_range { - if block_number < r0 || block_number >= r1 { - return None; - } + if let Some([r0, r1]) = filter_block_range + && (block_number < r0 || block_number >= r1) + { + log::trace!("break at {}", line!()); + return None; } - - Some(Unpack::::unpack(&output.capacity()).as_u64()) - }) - .sum(); + log::trace!("Returning normally at {:?}", key); + Some(key.to_vec()) + }), + usize::MAX, + skip, + ); + + let mut capacity = 0; + for (key, value) in kvs.into_iter() { + let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); + let output_index = u32::from_be_bytes( + key[key.len() - 4..] + .try_into() + .expect("stored output_index"), + ); + + let tx = packed::Transaction::from_slice( + &storage + .get(Key::TxHash(&tx_hash).into_vec()) + .expect("get tx should be OK") + .expect("stored tx")[12..], + ) + .expect("from stored tx slice should be OK"); + let output = tx + .raw() + .outputs() + .get(output_index as usize) + .expect("get output by index should be OK"); + capacity += Unpack::::unpack(&output.capacity()).as_u64() + } let key = Key::Meta(LAST_STATE_KEY).into_vec(); - let tip_header = snapshot + let tip_header = storage .get(key) .expect("snapshot get last state should be ok") .map(|data| packed::HeaderReader::from_slice_should_be_ok(&data[32..]).to_entity()) .expect("tip header should be inited"); + log::trace!("Get cells capacity done"); Ok(CellsCapacity { capacity: capacity.into(), block_hash: tip_header.calc_header_hash().unpack(), @@ -709,303 +1120,6 @@ impl BlockFilterRpc for BlockFilterRpcImpl { } } -const MAX_ADDRS: usize = 50; - -impl NetRpc for NetRpcImpl { - fn local_node_info(&self) -> Result { - Ok(LocalNode { - version: self.network_controller.version().to_owned(), - node_id: self.network_controller.node_id(), - active: self.network_controller.is_active(), - addresses: self - .network_controller - .public_urls(MAX_ADDRS) - .into_iter() - .map(|(address, score)| NodeAddress { - address, - score: u64::from(score).into(), - }) - .collect(), - protocols: self - .network_controller - .protocols() - .into_iter() - .map(|(protocol_id, name, support_versions)| LocalNodeProtocol { - id: (protocol_id.value() as u64).into(), - name, - support_versions, - }) - .collect::>(), - connections: (self.network_controller.connected_peers().len() as u64).into(), - }) - } - - fn get_peers(&self) -> Result> { - let peers: Vec = self - .network_controller - .connected_peers() - .iter() - .map(|(peer_index, peer)| { - let mut addresses = vec![&peer.connected_addr]; - addresses.extend(peer.listened_addrs.iter()); - - let node_addresses = addresses - .iter() - .map(|addr| { - let score = self - .network_controller - .addr_info(addr) - .map(|addr_info| addr_info.score) - .unwrap_or(1); - let non_negative_score = if score > 0 { score as u64 } else { 0 }; - NodeAddress { - address: addr.to_string(), - score: non_negative_score.into(), - } - }) - .collect(); - - RemoteNode { - version: peer - .identify_info - .as_ref() - .map(|info| info.client_version.clone()) - .unwrap_or_else(|| "unknown".to_string()), - node_id: extract_peer_id(&peer.connected_addr) - .map(|peer_id| peer_id.to_base58()) - .unwrap_or_default(), - addresses: node_addresses, - connected_duration: (std::time::Instant::now() - .saturating_duration_since(peer.connected_time) - .as_millis() as u64) - .into(), - sync_state: self.peers.get_state(peer_index).map(|state| PeerSyncState { - requested_best_known_header: state - .get_prove_request() - .map(|request| request.get_last_header().header().to_owned().into()), - proved_best_known_header: state - .get_prove_state() - .map(|request| request.get_last_header().header().to_owned().into()), - }), - protocols: peer - .protocols - .iter() - .map(|(protocol_id, protocol_version)| RemoteNodeProtocol { - id: (protocol_id.value() as u64).into(), - version: protocol_version.clone(), - }) - .collect(), - } - }) - .collect(); - Ok(peers) - } -} - -impl TransactionRpc for TransactionRpcImpl { - fn send_transaction(&self, tx: Transaction) -> Result { - let tx: packed::Transaction = tx.into(); - let tx = tx.into_view(); - let cycles = verify_tx( - tx.clone(), - &self.swc, - Arc::clone(&self.consensus), - &self.swc.storage().get_last_state().1.into_view(), - ) - .map_err(|e| Error::invalid_params(format!("invalid transaction: {:?}", e)))?; - self.swc - .pending_txs() - .write() - .expect("pending_txs lock is poisoned") - .push(tx.clone(), cycles); - - Ok(tx.hash().unpack()) - } - - fn get_transaction(&self, tx_hash: H256) -> Result { - if let Some((transaction, header)) = self - .swc - .storage() - .get_transaction_with_header(&tx_hash.pack()) - { - return Ok(TransactionWithStatus { - transaction: Some(transaction.into_view().into()), - cycles: None, - tx_status: TxStatus { - block_hash: Some(header.into_view().hash().unpack()), - status: Status::Committed, - }, - }); - } - - if let Some((transaction, cycles, _)) = self - .swc - .pending_txs() - .read() - .expect("pending_txs lock is poisoned") - .get(&tx_hash.pack()) - { - return Ok(TransactionWithStatus { - transaction: Some(transaction.into_view().into()), - cycles: Some(cycles.into()), - tx_status: TxStatus { - block_hash: None, - status: Status::Pending, - }, - }); - } - - Ok(TransactionWithStatus { - transaction: None, - cycles: None, - tx_status: TxStatus { - block_hash: None, - status: Status::Unknown, - }, - }) - } - - fn fetch_transaction(&self, tx_hash: H256) -> Result> { - let tws = self.get_transaction(tx_hash.clone())?; - if tws.transaction.is_some() { - return Ok(FetchStatus::Fetched { data: tws }); - } - - let now = unix_time_as_millis(); - if let Some((added_ts, first_sent, missing)) = self.swc.get_tx_fetch_info(&tx_hash) { - if missing { - // re-fetch the transaction - self.swc.add_fetch_tx(tx_hash, now); - return Ok(FetchStatus::NotFound); - } else if first_sent > 0 { - return Ok(FetchStatus::Fetching { - first_sent: first_sent.into(), - }); - } else { - return Ok(FetchStatus::Added { - timestamp: added_ts.into(), - }); - } - } else { - self.swc.add_fetch_tx(tx_hash, now); - } - Ok(FetchStatus::Added { - timestamp: now.into(), - }) - } -} - -impl ChainRpc for ChainRpcImpl { - fn get_tip_header(&self) -> Result { - Ok(self.swc.storage().get_tip_header().into_view().into()) - } - - fn get_genesis_block(&self) -> Result { - Ok(self.swc.storage().get_genesis_block().into_view().into()) - } - - fn get_header(&self, block_hash: H256) -> Result> { - Ok(self.swc.get_header(&block_hash.pack()).map(Into::into)) - } - - fn fetch_header(&self, block_hash: H256) -> Result> { - if let Some(value) = self.swc.storage().get_header(&block_hash.pack()) { - return Ok(FetchStatus::Fetched { data: value.into() }); - } - let now = unix_time_as_millis(); - if let Some((added_ts, first_sent, missing)) = self.swc.get_header_fetch_info(&block_hash) { - if missing { - // re-fetch the header - self.swc.add_fetch_header(block_hash, now); - return Ok(FetchStatus::NotFound); - } else if first_sent > 0 { - return Ok(FetchStatus::Fetching { - first_sent: first_sent.into(), - }); - } else { - return Ok(FetchStatus::Added { - timestamp: added_ts.into(), - }); - } - } else { - self.swc.add_fetch_header(block_hash, now); - } - Ok(FetchStatus::Added { - timestamp: now.into(), - }) - } - - fn estimate_cycles(&self, tx: Transaction) -> Result { - let tx: packed::Transaction = tx.into(); - let tx = tx.into_view(); - let cycles = verify_tx( - tx.clone(), - &self.swc, - Arc::clone(&self.consensus), - &self.swc.storage().get_last_state().1.into_view(), - ) - .map_err(|e| Error::invalid_params(format!("invalid transaction: {:?}", e)))?; - Ok(EstimateCycles { - cycles: cycles.into(), - }) - } -} - -pub struct Service { - listen_address: String, -} - -impl Service { - pub fn new(listen_address: &str) -> Self { - Self { - listen_address: listen_address.to_string(), - } - } - - pub fn start( - &self, - network_controller: NetworkController, - storage: Storage, - peers: Arc, - pending_txs: Arc>, - consensus: Consensus, - ) -> Server { - let mut io_handler = IoHandler::new(); - let swc = StorageWithChainData::new(storage, Arc::clone(&peers), Arc::clone(&pending_txs)); - let consensus = Arc::new(consensus); - let block_filter_rpc_impl = BlockFilterRpcImpl { swc: swc.clone() }; - let chain_rpc_impl = ChainRpcImpl { - swc: swc.clone(), - consensus: Arc::clone(&consensus), - }; - let transaction_rpc_impl = TransactionRpcImpl { swc, consensus }; - let net_rpc_impl = NetRpcImpl { - network_controller, - peers, - }; - io_handler.extend_with(block_filter_rpc_impl.to_delegate()); - io_handler.extend_with(chain_rpc_impl.to_delegate()); - io_handler.extend_with(transaction_rpc_impl.to_delegate()); - io_handler.extend_with(net_rpc_impl.to_delegate()); - - ServerBuilder::new(io_handler) - .cors(DomainsValidation::AllowOnly(vec![ - AccessControlAllowOrigin::Null, - AccessControlAllowOrigin::Any, - ])) - .health_api(("/ping", "ping")) - .start_http( - &self - .listen_address - .to_socket_addrs() - .expect("config listen_address parsed") - .next() - .expect("config listen_address parsed"), - ) - .expect("Start Jsonrpc HTTP service") - } -} - const MAX_PREFIX_SEARCH_SIZE: usize = u16::MAX as usize; // a helper fn to build query options from search paramters, returns prefix, from_key, direction and skip offset @@ -1015,7 +1129,7 @@ pub fn build_query_options( type_prefix: KeyPrefix, order: Order, after_cursor: Option, -) -> Result<(Vec, Vec, Direction, usize)> { +) -> Result<(Vec, Vec, GeneralDirection, usize)> { let mut prefix = match search_key.script_type { ScriptType::Lock => vec![lock_prefix as u8], ScriptType::Type => vec![type_prefix as u8], @@ -1023,7 +1137,7 @@ pub fn build_query_options( let script: packed::Script = search_key.script.clone().into(); let args_len = script.args().len(); if args_len > MAX_PREFIX_SEARCH_SIZE { - return Err(Error::invalid_params(format!( + return Err(Error::runtime(format!( "search_key.script.args len should be less than {}", MAX_PREFIX_SEARCH_SIZE ))); @@ -1032,8 +1146,8 @@ pub fn build_query_options( let (from_key, direction, skip) = match order { Order::Asc => after_cursor.map_or_else( - || (prefix.clone(), Direction::Forward, 0), - |json_bytes| (json_bytes.as_bytes().into(), Direction::Forward, 1), + || (prefix.clone(), GeneralDirection::Forward, 0), + |json_bytes| (json_bytes.as_bytes().into(), GeneralDirection::Forward, 1), ), Order::Desc => after_cursor.map_or_else( || { @@ -1043,18 +1157,19 @@ pub fn build_query_options( vec![0xff; MAX_PREFIX_SEARCH_SIZE - args_len], ] .concat(), - Direction::Reverse, + GeneralDirection::Reverse, 0, ) }, - |json_bytes| (json_bytes.as_bytes().into(), Direction::Reverse, 1), + |json_bytes| (json_bytes.as_bytes().into(), GeneralDirection::Reverse, 1), ), }; Ok((prefix, from_key, direction, skip)) } +use ckb_types::core; +use log::trace; -// a helper fn to build filter options from search paramters, returns prefix, output_data_len_range, output_capacity_range and block_range #[allow(clippy::type_complexity)] pub fn build_filter_options( search_key: SearchKey, @@ -1069,7 +1184,7 @@ pub fn build_filter_options( let filter_script_prefix = if let Some(script) = filter.script { let script: packed::Script = script.into(); if script.args().len() > MAX_PREFIX_SEARCH_SIZE { - return Err(Error::invalid_params(format!( + return Err(Error::runtime(format!( "search_key.filter.script.args len should be less than {}", MAX_PREFIX_SEARCH_SIZE ))); diff --git a/wasm/light-client-wasm/Cargo.toml b/wasm/light-client-wasm/Cargo.toml index 8ce967f5..71162238 100644 --- a/wasm/light-client-wasm/Cargo.toml +++ b/wasm/light-client-wasm/Cargo.toml @@ -9,7 +9,7 @@ crate-type = ["cdylib", "rlib"] [dependencies] wasm-bindgen = "0.2.63" ckb-light-client-lib = { path = "../../light-client-lib" } - +ckb-light-client-rpc = {path = "../../light-client-rpc"} # The `console_error_panic_hook` crate provides better debugging of panics by # logging them with `console.error`. This is great for development, but requires # all the `std::fmt` and `std::panicking` infrastructure, so isn't great for diff --git a/wasm/light-client-wasm/src/lib.rs b/wasm/light-client-wasm/src/lib.rs index 20141391..9f388249 100644 --- a/wasm/light-client-wasm/src/lib.rs +++ b/wasm/light-client-wasm/src/lib.rs @@ -8,56 +8,53 @@ use std::{ }, }; +use ckb_light_client_lib::storage::db::StorageGeneralOperations; use ckb_light_client_lib::{ error::Error, protocols::{ FilterProtocol, LightClientProtocol, Peers, PendingTxs, RelayProtocol, SyncProtocol, BAD_MESSAGE_ALLOWED_EACH_HOUR, CHECK_POINT_INTERVAL, }, - service::{ - Cell, CellType, CellsCapacity, FetchStatus, LocalNode, LocalNodeProtocol, Order, - Pagination, PeerSyncState, RemoteNode, ScriptStatus, ScriptType, SearchKey, - SetScriptsCommand, Status, TransactionWithStatus, Tx, TxStatus, TxWithCell, TxWithCells, - }, - storage::{ - self, extract_raw_data, CursorDirection, Key, KeyPrefix, Storage, StorageWithChainData, - LAST_STATE_KEY, - }, + service::{Order, ScriptStatus, SearchKey, SetScriptsCommand}, + storage::{Storage, StorageWithChainData}, types::RunEnv, - verify::verify_tx, +}; +use ckb_light_client_rpc::{ + BlockFilterRpcImpl, BlockFilterRpcMethods, ChainRpcImpl, ChainRpcMethods, NetRpcImpl, + NetRpcMethods, TransactionRpcImpl, TransactionRpcMethods, }; use log::debug; use serde::{Deserialize, Serialize}; use serde_wasm_bindgen::Serializer; use wasm_bindgen::prelude::*; -use ckb_chain_spec::{consensus::Consensus, ChainSpec}; +use ckb_chain_spec::ChainSpec; use ckb_jsonrpc_types::{JsonBytes, Transaction}; use ckb_network::{ - extract_peer_id, network::TransportType, CKBProtocol, CKBProtocolHandler, Flags, - NetworkController, NetworkService, NetworkState, SupportProtocols, + network::TransportType, CKBProtocol, CKBProtocolHandler, Flags, NetworkService, NetworkState, + SupportProtocols, }; use ckb_resource::Resource; use ckb_stop_handler::broadcast_exit_signals; -use ckb_systemtime::{unix_time_as_millis, Instant}; -use ckb_types::{core, packed, prelude::*, H256}; +use ckb_types::H256; +use ckb_light_client_lib::storage::db::StorageBatchRelatedOperations; use std::sync::OnceLock; - static MAINNET_CONFIG: &str = include_str!("../../../config/mainnet.toml"); static TESTNET_CONFIG: &str = include_str!("../../../config/testnet.toml"); -static STORAGE_WITH_DATA: OnceLock = OnceLock::new(); - -static NET_CONTROL: OnceLock = OnceLock::new(); - -static CONSENSUS: OnceLock> = OnceLock::new(); +static STORAGE_WITH_DATA: OnceLock> = OnceLock::new(); static SERIALIZER: Serializer = Serializer::new() .serialize_large_number_types_as_bigints(true) .serialize_maps_as_objects(true); +static BLOCK_FILTER_RPC: OnceLock> = OnceLock::new(); +static TRANSACTION_RPC: OnceLock> = OnceLock::new(); +static CHAIN_RPC: OnceLock> = OnceLock::new(); +static NET_RPC: OnceLock = OnceLock::new(); + /// 0b0 init /// 0b1 start /// 0b10 stop @@ -248,10 +245,26 @@ pub async fn light_client( .unwrap(); let storage_with_data = StorageWithChainData::new(storage, peers, pending_txs); + let consensus = Arc::new(consensus); - STORAGE_WITH_DATA.get_or_init(|| storage_with_data); - NET_CONTROL.get_or_init(|| network_controller); - CONSENSUS.get_or_init(|| Arc::new(consensus)); + STORAGE_WITH_DATA.get_or_init(|| storage_with_data.clone()); + // NET_CONTROL.get_or_init(|| network_controller); + // CONSENSUS.get_or_init(|| consensus.clone()); + BLOCK_FILTER_RPC.get_or_init(|| BlockFilterRpcImpl { + swc: storage_with_data.clone(), + }); + TRANSACTION_RPC.get_or_init(|| TransactionRpcImpl { + swc: storage_with_data.clone(), + consensus: consensus.clone(), + }); + CHAIN_RPC.get_or_init(|| ChainRpcImpl { + swc: storage_with_data.clone(), + consensus: consensus.clone(), + }); + NET_RPC.get_or_init(|| NetRpcImpl { + network_controller, + peers: storage_with_data.peers().clone(), + }); change_status(0b1); Ok(()) } @@ -263,22 +276,17 @@ pub fn stop() { change_status(0b10); } -use ckb_types::prelude::IntoHeaderView; - #[wasm_bindgen] pub fn get_tip_header() -> Result { if !status(0b1) { return Err(JsValue::from_str("light client not on start state")); } - Ok(Into::::into( - STORAGE_WITH_DATA - .get() - .unwrap() - .storage() - .get_tip_header() - .into_view(), - ) - .serialize(&SERIALIZER)?) + Ok(CHAIN_RPC + .get() + .unwrap() + .get_tip_header() + .map_err(|e| JsValue::from_str(&format!("Unable to get tip header: {:?}", e)))? + .serialize(&SERIALIZER)?) } #[wasm_bindgen] @@ -286,15 +294,12 @@ pub fn get_genesis_block() -> Result { if !status(0b1) { return Err(JsValue::from_str("light client not on start state")); } - Ok(Into::::into( - STORAGE_WITH_DATA - .get() - .unwrap() - .storage() - .get_genesis_block() - .into_view(), - ) - .serialize(&SERIALIZER)?) + Ok(CHAIN_RPC + .get() + .unwrap() + .get_genesis_block() + .map_err(|e| JsValue::from_str(&format!("Unable to get genesis block: {:?}", e)))? + .serialize(&SERIALIZER)?) } #[wasm_bindgen] @@ -303,9 +308,11 @@ pub fn get_header(hash: &str) -> Result { return Err(JsValue::from_str("light client not on start state")); } let block_hash = H256::from_str(&hash[2..]).map_err(|e| JsValue::from_str(&e.to_string()))?; - let swc = STORAGE_WITH_DATA.get().unwrap(); - let header_view: Option = - swc.storage().get_header(&block_hash.pack()).map(Into::into); + let header_view = CHAIN_RPC + .get() + .unwrap() + .get_header(block_hash) + .map_err(|e| JsValue::from_str(&format!("Unable to get header: {}", e)))?; Ok(header_view.serialize(&SERIALIZER)?) } @@ -317,41 +324,13 @@ pub fn fetch_header(hash: &str) -> Result { } let block_hash = H256::from_str(&hash[2..]).map_err(|e| JsValue::from_str(&e.to_string()))?; - let swc = STORAGE_WITH_DATA.get().unwrap(); - - if let Some(value) = swc.storage().get_header(&block_hash.pack()) { - return Ok( - FetchStatus::::Fetched { data: value.into() } - .serialize(&SERIALIZER)?, - ); - } - let now = unix_time_as_millis(); - if let Some((added_ts, first_sent, missing)) = swc.get_header_fetch_info(&block_hash) { - if missing { - // re-fetch the header - swc.add_fetch_header(block_hash, now); - return Ok( - FetchStatus::::NotFound.serialize(&SERIALIZER)? - ); - } else if first_sent > 0 { - return Ok(FetchStatus::::Fetching { - first_sent: first_sent.into(), - } - .serialize(&SERIALIZER)?); - } else { - return Ok(FetchStatus::::Added { - timestamp: added_ts.into(), - } - .serialize(&SERIALIZER)?); - } - } else { - swc.add_fetch_header(block_hash, now); - } - Ok(FetchStatus::::Added { - timestamp: now.into(), - } - .serialize(&SERIALIZER)?) + let result = CHAIN_RPC + .get() + .unwrap() + .fetch_header(block_hash) + .map_err(|e| JsValue::from_str(&e.to_string()))?; + Ok(result.serialize(&SERIALIZER)?) } #[wasm_bindgen] @@ -361,58 +340,26 @@ pub fn estimate_cycles(tx: JsValue) -> Result { } let tx: Transaction = serde_wasm_bindgen::from_value(tx)?; - let tx: packed::Transaction = tx.into(); - let tx = tx.into_view(); - - let swc = STORAGE_WITH_DATA.get().unwrap(); - let consensus = CONSENSUS.get().unwrap(); - let cycles = verify_tx( - tx.clone(), - swc, - Arc::clone(consensus), - &swc.storage().get_last_state().1.into_view(), - ) - .map_err(|e| JsValue::from_str(&format!("invalid transaction: {:?}", e)))?; - Ok(ckb_jsonrpc_types::EstimateCycles { - cycles: cycles.into(), - } - .serialize(&SERIALIZER)?) + let result = CHAIN_RPC + .get() + .unwrap() + .estimate_cycles(tx) + .map_err(|e| JsValue::from_str(&e.to_string()))?; + Ok(result.serialize(&SERIALIZER)?) } -const MAX_ADDRS: usize = 50; - #[wasm_bindgen] pub fn local_node_info() -> Result { if !status(0b1) { return Err(JsValue::from_str("light client not on start state")); } - - let network_controller = NET_CONTROL.get().unwrap(); - Ok(LocalNode { - version: network_controller.version().to_owned(), - node_id: network_controller.node_id(), - active: network_controller.is_active(), - addresses: network_controller - .public_urls(MAX_ADDRS) - .into_iter() - .map(|(address, score)| ckb_jsonrpc_types::NodeAddress { - address, - score: u64::from(score).into(), - }) - .collect(), - protocols: network_controller - .protocols() - .into_iter() - .map(|(protocol_id, name, support_versions)| LocalNodeProtocol { - id: (protocol_id.value() as u64).into(), - name, - support_versions, - }) - .collect::>(), - connections: (network_controller.connected_peers().len() as u64).into(), - } - .serialize(&SERIALIZER)?) + Ok(NET_RPC + .get() + .unwrap() + .local_node_info() + .map_err(|e| JsValue::from_str(&e.to_string()))? + .serialize(&SERIALIZER)?) } #[wasm_bindgen] @@ -420,70 +367,12 @@ pub fn get_peers() -> Result { if !status(0b1) { return Err(JsValue::from_str("light client not on start state")); } - - let network_controller = NET_CONTROL.get().unwrap(); - let swc = STORAGE_WITH_DATA.get().unwrap(); - let peers: Vec = network_controller - .connected_peers() - .iter() - .map(|(peer_index, peer)| { - let mut addresses = vec![&peer.connected_addr]; - addresses.extend(peer.listened_addrs.iter()); - - let node_addresses = addresses - .iter() - .map(|addr| { - let score = network_controller - .addr_info(addr) - .map(|addr_info| addr_info.score) - .unwrap_or(1); - let non_negative_score = if score > 0 { score as u64 } else { 0 }; - ckb_jsonrpc_types::NodeAddress { - address: addr.to_string(), - score: non_negative_score.into(), - } - }) - .collect(); - - RemoteNode { - version: peer - .identify_info - .as_ref() - .map(|info| info.client_version.clone()) - .unwrap_or_else(|| "unknown".to_string()), - node_id: extract_peer_id(&peer.connected_addr) - .map(|peer_id| peer_id.to_base58()) - .unwrap_or_default(), - addresses: node_addresses, - connected_duration: (Instant::now() - .saturating_duration_since(peer.connected_time) - .as_millis() as u64) - .into(), - sync_state: swc - .peers() - .get_state(peer_index) - .map(|state| PeerSyncState { - requested_best_known_header: state - .get_prove_request() - .map(|request| request.get_last_header().header().to_owned().into()), - proved_best_known_header: state - .get_prove_state() - .map(|request| request.get_last_header().header().to_owned().into()), - }), - protocols: peer - .protocols - .iter() - .map( - |(protocol_id, protocol_version)| ckb_jsonrpc_types::RemoteNodeProtocol { - id: (protocol_id.value() as u64).into(), - version: protocol_version.clone(), - }, - ) - .collect(), - } - }) - .collect(); - Ok(peers.serialize(&SERIALIZER)?) + Ok(NET_RPC + .get() + .unwrap() + .get_peers() + .map_err(|e| JsValue::from_str(&e.to_string()))? + .serialize(&SERIALIZER)?) } #[wasm_bindgen] @@ -494,26 +383,17 @@ pub fn set_scripts( if !status(0b1) { return Err(JsValue::from_str("light client not on start state")); } - let mut matched_blocks = STORAGE_WITH_DATA - .get() - .unwrap() - .matched_blocks() - .blocking_write(); let scripts: Vec = scripts .into_iter() .map(serde_wasm_bindgen::from_value::) .collect::, _>>()?; - debug!("Update scripts, {:?}, {:?}", scripts, command); - STORAGE_WITH_DATA + BLOCK_FILTER_RPC .get() .unwrap() - .storage() - .update_filter_scripts( - scripts.into_iter().map(Into::into).collect(), - command.map(Into::into).unwrap_or_default(), - ); - matched_blocks.clear(); + .set_scripts(scripts, command) + .map_err(|e| JsValue::from_str(&e.to_string()))?; + Ok(()) } @@ -522,15 +402,14 @@ pub fn get_scripts() -> Result, JsValue> { if !status(0b1) { return Err(JsValue::from_str("light client not on start state")); } - let scripts = STORAGE_WITH_DATA + let result = BLOCK_FILTER_RPC .get() .unwrap() - .storage() - .get_filter_scripts(); + .get_scripts() + .map_err(|e| JsValue::from_str(&e.to_string()))?; - Ok(scripts + Ok(result .into_iter() - .map(Into::into) .map(|v: ScriptStatus| v.serialize(&SERIALIZER)) .collect::, _>>()?) } @@ -550,203 +429,17 @@ pub fn get_cells( search_key, order, limit, after_cursor ); let search_key: SearchKey = serde_wasm_bindgen::from_value(search_key)?; - - let (prefix, from_key, direction, skip) = build_query_options( - &search_key, - KeyPrefix::CellLockScript, - KeyPrefix::CellTypeScript, - order, - after_cursor.map(JsonBytes::from_vec), - )?; - - let limit = limit as usize; - if limit == 0 { - return Err(JsValue::from_str("limit should be greater than 0")); - } - let with_data = search_key.with_data.unwrap_or(true); - let filter_script_type = match search_key.script_type { - ScriptType::Lock => ScriptType::Type, - ScriptType::Type => ScriptType::Lock, - }; - - let ( - filter_prefix, - filter_script_len_range, - filter_output_data_len_range, - filter_output_capacity_range, - filter_block_range, - ) = build_filter_options(search_key)?; - - fn extract_data_from_key(key: &[u8]) -> (u32, u32, u64) { - let output_index = u32::from_be_bytes( - key[key.len() - 4..] - .try_into() - .expect("stored output_index"), - ); - let tx_index = u32::from_be_bytes( - key[key.len() - 8..key.len() - 4] - .try_into() - .expect("stored tx_index"), - ); - let block_number = u64::from_be_bytes( - key[key.len() - 16..key.len() - 8] - .try_into() - .expect("stored block_number"), - ); - (output_index, tx_index, block_number) - } - - let storage = STORAGE_WITH_DATA.get().unwrap().storage(); - let kvs: Vec<_> = storage.collect_iterator( - from_key.clone(), - direction, - Box::new(move |key| key.starts_with(&prefix)), - Box::new(move |key| { - let value = storage.get(key).unwrap().unwrap(); - - debug!("get cells iterator at {:?} {:?}", key, value); - let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); - let (output_index, _tx_index, block_number) = extract_data_from_key(key); - let tx_data = &storage - .get(Key::TxHash(&tx_hash).into_vec()) - .unwrap() - .expect("stored tx")[12..]; - debug!("tx hash = {:?}, tx data = {:?}", tx_hash, tx_data); - let tx = packed::Transaction::from_slice(tx_data) - .expect("from stored tx slice should be OK"); - let output = tx - .raw() - .outputs() - .get(output_index as usize) - .expect("get output by index should be OK"); - let output_data = tx - .raw() - .outputs_data() - .get(output_index as usize) - .expect("get output data by index should be OK"); - - if let Some(prefix) = filter_prefix.as_ref() { - match filter_script_type { - ScriptType::Lock => { - if !extract_raw_data(&output.lock()) - .as_slice() - .starts_with(prefix) - { - debug!("skipped at {}", line!()); - return None; - } - } - ScriptType::Type => { - if output.type_().is_none() - || !extract_raw_data(&output.type_().to_opt().unwrap()) - .as_slice() - .starts_with(prefix) - { - debug!("skipped at {}", line!()); - return None; - } - } - } - } - - if let Some([r0, r1]) = filter_script_len_range { - match filter_script_type { - ScriptType::Lock => { - let script_len = extract_raw_data(&output.lock()).len(); - if script_len < r0 || script_len > r1 { - debug!("skipped at {}", line!()); - return None; - } - } - ScriptType::Type => { - let script_len = output - .type_() - .to_opt() - .map(|script| extract_raw_data(&script).len()) - .unwrap_or_default(); - if script_len < r0 || script_len > r1 { - debug!("skipped at {}", line!()); - return None; - } - } - } - } - - if let Some([r0, r1]) = filter_output_data_len_range { - if output_data.len() < r0 || output_data.len() >= r1 { - debug!("skipped at {}", line!()); - return None; - } - } - - if let Some([r0, r1]) = filter_output_capacity_range { - let capacity: core::Capacity = output.capacity().unpack(); - if capacity < r0 || capacity >= r1 { - debug!("skipped at {}", line!()); - return None; - } - } - - if let Some([r0, r1]) = filter_block_range { - if block_number < r0 || block_number >= r1 { - debug!("skipped at {}", line!()); - return None; - } - } - Some(key.to_vec()) - }), - limit, - skip, - ); - debug!("get_cells: collect_iterator done"); - let mut cells = Vec::new(); - let mut last_key = Vec::new(); - for (key, value) in kvs.into_iter().map(|kv| (kv.key, kv.value)) { - let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); - let (output_index, tx_index, block_number) = extract_data_from_key(&key); - let tx = packed::Transaction::from_slice( - &storage - .get(Key::TxHash(&tx_hash).into_vec()) - .unwrap() - .expect("stored tx")[12..], + let result = BLOCK_FILTER_RPC + .get() + .unwrap() + .get_cells( + search_key, + order, + limit.into(), + after_cursor.map(JsonBytes::from_vec), ) - .expect("from stored tx slice should be OK"); - let output = tx - .raw() - .outputs() - .get(output_index as usize) - .expect("get output by index should be OK"); - let output_data = tx - .raw() - .outputs_data() - .get(output_index as usize) - .expect("get output data by index should be OK"); - - last_key = key.to_vec(); - let cell_to_push = Cell { - output: output.into(), - output_data: if with_data { - Some(output_data.into()) - } else { - None - }, - out_point: packed::OutPoint::new(tx_hash, output_index).into(), - block_number: block_number.into(), - tx_index: tx_index.into(), - }; - debug!("pushed cell {:#?}", cell_to_push); - cells.push(cell_to_push); - if cells.len() >= limit { - break; - } - } - debug!("get_cells last_key={:?}", last_key); - - Ok((Pagination { - objects: cells, - last_cursor: JsonBytes::from_vec(last_key), - }) - .serialize(&SERIALIZER)?) + .map_err(|e| JsValue::from_str(&e.to_string()))?; + Ok(result.serialize(&SERIALIZER)?) } #[wasm_bindgen] pub fn get_transactions( @@ -764,327 +457,18 @@ pub fn get_transactions( ); let search_key: SearchKey = serde_wasm_bindgen::from_value(search_key)?; - let (prefix, from_key, direction, skip) = build_query_options( - &search_key, - KeyPrefix::TxLockScript, - KeyPrefix::TxTypeScript, - order, - after_cursor.map(JsonBytes::from_vec), - )?; - - let limit = limit as usize; - if limit == 0 { - return Err(JsValue::from_str("limit should be greater than 0")); - } - - let filter_script_type = match search_key.script_type { - ScriptType::Lock => ScriptType::Type, - ScriptType::Type => ScriptType::Lock, - }; - - let (filter_script, filter_block_range) = if let Some(filter) = search_key.filter.as_ref() { - if filter.output_data_len_range.is_some() { - return Err(JsValue::from_str( - "doesn't support search_key.filter.output_data_len_range parameter", - )); - } - if filter.output_capacity_range.is_some() { - return Err(JsValue::from_str( - "doesn't support search_key.filter.output_capacity_range parameter", - )); - } - let filter_script: Option = - filter.script.as_ref().map(|script| script.clone().into()); - let filter_block_range: Option<[core::BlockNumber; 2]> = - filter.block_range.map(|r| [r[0].into(), r[1].into()]); - (filter_script, filter_block_range) - } else { - (None, None) - }; - - let storage = STORAGE_WITH_DATA.get().unwrap().storage(); - - if search_key.group_by_transaction.unwrap_or_default() { - let prefix_cloned = prefix.clone(); - let mut kvs: Vec<_> = storage.collect_iterator( - from_key, - direction, - Box::new(move |key| key.starts_with(&prefix_cloned)), - Box::new(|key| Some(key.to_vec())), - 100, - skip, - ); - let mut tx_with_cells: Vec = Vec::new(); - let mut last_key = Vec::new(); - - 'outer: while !kvs.is_empty() { - for (key, value) in kvs.into_iter().map(|kv| (kv.key, kv.value)) { - let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); - if tx_with_cells.len() == limit - && tx_with_cells.last_mut().unwrap().transaction.hash != tx_hash.unpack() - { - break 'outer; - } - last_key = key.to_vec(); - let tx = packed::Transaction::from_slice( - &storage - .get(Key::TxHash(&tx_hash).into_vec()) - .expect("get tx should be OK") - .expect("stored tx")[12..], - ) - .expect("from stored tx slice should be OK"); - - let block_number = u64::from_be_bytes( - key[key.len() - 17..key.len() - 9] - .try_into() - .expect("stored block_number"), - ); - let tx_index = u32::from_be_bytes( - key[key.len() - 9..key.len() - 5] - .try_into() - .expect("stored tx_index"), - ); - let io_index = u32::from_be_bytes( - key[key.len() - 5..key.len() - 1] - .try_into() - .expect("stored io_index"), - ); - let io_type = if *key.last().expect("stored io_type") == 0 { - CellType::Input - } else { - CellType::Output - }; - if let Some(filter_script) = filter_script.as_ref() { - let filter_script_matched = match filter_script_type { - ScriptType::Lock => storage - .get( - Key::TxLockScript( - filter_script, - block_number, - tx_index, - io_index, - match io_type { - CellType::Input => storage::CellType::Input, - CellType::Output => storage::CellType::Output, - }, - ) - .into_vec(), - ) - .expect("get TxLockScript should be OK") - .is_some(), - ScriptType::Type => storage - .get( - Key::TxTypeScript( - filter_script, - block_number, - tx_index, - io_index, - match io_type { - CellType::Input => storage::CellType::Input, - CellType::Output => storage::CellType::Output, - }, - ) - .into_vec(), - ) - .expect("get TxTypeScript should be OK") - .is_some(), - }; - - if !filter_script_matched { - continue; - } - } - - if let Some([r0, r1]) = filter_block_range { - if block_number < r0 || block_number >= r1 { - continue; - } - } - - let last_tx_hash_is_same = tx_with_cells - .last_mut() - .map(|last| { - if last.transaction.hash == tx_hash.unpack() { - last.cells.push((io_type.clone(), io_index.into())); - true - } else { - false - } - }) - .unwrap_or_default(); - - if !last_tx_hash_is_same { - tx_with_cells.push(TxWithCells { - transaction: tx.into_view().into(), - block_number: block_number.into(), - tx_index: tx_index.into(), - cells: vec![(io_type, io_index.into())], - }); - } - } - let prefix_cloned = prefix.clone(); - kvs = storage.collect_iterator( - last_key.clone(), - direction, - Box::new(move |key| key.starts_with(&prefix_cloned)), - Box::new(|key| Some(key.to_vec())), - 100, - 1, - ); - } - Ok((Pagination { - objects: tx_with_cells.into_iter().map(Tx::Grouped).collect(), - last_cursor: JsonBytes::from_vec(last_key), - }) - .serialize(&SERIALIZER)?) - } else { - let kvs: Vec<_> = storage.collect_iterator( - from_key.clone(), - direction, - Box::new(move |key| key.starts_with(&prefix)), - Box::new(move |key| { - let value = storage.get(key).unwrap().unwrap(); - debug!("get transactions iterator at {:?} {:?}", key, value); - - let block_number = u64::from_be_bytes( - key[key.len() - 17..key.len() - 9] - .try_into() - .expect("stored block_number"), - ); - let tx_index = u32::from_be_bytes( - key[key.len() - 9..key.len() - 5] - .try_into() - .expect("stored tx_index"), - ); - let io_index = u32::from_be_bytes( - key[key.len() - 5..key.len() - 1] - .try_into() - .expect("stored io_index"), - ); - let io_type = if *key.last().expect("stored io_type") == 0 { - CellType::Input - } else { - CellType::Output - }; - - if let Some(filter_script) = filter_script.as_ref() { - match filter_script_type { - ScriptType::Lock => { - if storage - .get( - Key::TxLockScript( - filter_script, - block_number, - tx_index, - io_index, - match io_type { - CellType::Input => storage::CellType::Input, - CellType::Output => storage::CellType::Output, - }, - ) - .into_vec(), - ) - .expect("get TxLockScript should be OK") - .is_none() - { - debug!("skipped at {}", line!()); - return None; - }; - } - ScriptType::Type => { - if storage - .get( - Key::TxTypeScript( - filter_script, - block_number, - tx_index, - io_index, - match io_type { - CellType::Input => storage::CellType::Input, - CellType::Output => storage::CellType::Output, - }, - ) - .into_vec(), - ) - .expect("get TxTypeScript should be OK") - .is_none() - { - debug!("skipped at {}", line!()); - return None; - }; - } - } - } - - if let Some([r0, r1]) = filter_block_range { - if block_number < r0 || block_number >= r1 { - debug!("skipped at {}", line!()); - return None; - } - } - - Some(key.to_vec()) - }), - limit, - skip, - ); - - let mut last_key = Vec::new(); - let mut txs = Vec::new(); - - for (key, value) in kvs.into_iter().map(|kv| (kv.key, kv.value)) { - let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); - let tx = packed::Transaction::from_slice( - &storage - .get(Key::TxHash(&tx_hash).into_vec()) - .expect("get tx should be OK") - .expect("stored tx")[12..], - ) - .expect("from stored tx slice should be OK"); - - let block_number = u64::from_be_bytes( - key[key.len() - 17..key.len() - 9] - .try_into() - .expect("stored block_number"), - ); - let tx_index = u32::from_be_bytes( - key[key.len() - 9..key.len() - 5] - .try_into() - .expect("stored tx_index"), - ); - let io_index = u32::from_be_bytes( - key[key.len() - 5..key.len() - 1] - .try_into() - .expect("stored io_index"), - ); - let io_type = if *key.last().expect("stored io_type") == 0 { - CellType::Input - } else { - CellType::Output - }; - - last_key = key.to_vec(); - let tx_to_push = Tx::Ungrouped(TxWithCell { - transaction: tx.into_view().into(), - block_number: block_number.into(), - tx_index: tx_index.into(), - io_index: io_index.into(), - io_type, - }); - txs.push(tx_to_push); - if txs.len() >= limit { - break; - } - } - debug!("get_transactions last_key={:?}", last_key); - - Ok((Pagination { - objects: txs, - last_cursor: JsonBytes::from_vec(last_key), - }) - .serialize(&SERIALIZER)?) - } + let result = BLOCK_FILTER_RPC + .get() + .unwrap() + .get_transactions( + search_key, + order, + limit.into(), + after_cursor.map(JsonBytes::from_vec), + ) + .map_err(|e| JsValue::from_str(&e.to_string()))?; + Ok(result.serialize(&SERIALIZER)?) } #[wasm_bindgen] pub fn get_cells_capacity(search_key: JsValue) -> Result { @@ -1093,179 +477,13 @@ pub fn get_cells_capacity(search_key: JsValue) -> Result { } let search_key: SearchKey = serde_wasm_bindgen::from_value(search_key)?; - debug!("Call get_cells_capacity: {:?}", search_key); - let (prefix, from_key, direction, skip) = build_query_options( - &search_key, - KeyPrefix::CellLockScript, - KeyPrefix::CellTypeScript, - Order::Asc, - None, - )?; - let filter_script_type = match search_key.script_type { - ScriptType::Lock => ScriptType::Type, - ScriptType::Type => ScriptType::Lock, - }; - let ( - filter_prefix, - filter_script_len_range, - filter_output_data_len_range, - filter_output_capacity_range, - filter_block_range, - ) = build_filter_options(search_key)?; - - let storage = STORAGE_WITH_DATA.get().unwrap().storage(); - log::trace!("get_cells_capacity: before entering collect iterator"); - - let kvs: Vec<_> = storage.collect_iterator( - from_key, - direction, - Box::new(move |key| key.starts_with(&prefix)), - Box::new(move |key| { - log::trace!("At key {:?}", key); - let value = storage.get(key).unwrap().unwrap(); - - let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); - let output_index = u32::from_be_bytes( - key[key.len() - 4..] - .try_into() - .expect("stored output_index"), - ); - let block_number = u64::from_be_bytes( - key[key.len() - 16..key.len() - 8] - .try_into() - .expect("stored block_number"), - ); - - let tx = packed::Transaction::from_slice( - &storage - .get(Key::TxHash(&tx_hash).into_vec()) - .expect("get tx should be OK") - .expect("stored tx")[12..], - ) - .expect("from stored tx slice should be OK"); - let output = tx - .raw() - .outputs() - .get(output_index as usize) - .expect("get output by index should be OK"); - let output_data = tx - .raw() - .outputs_data() - .get(output_index as usize) - .expect("get output data by index should be OK"); - - if let Some(prefix) = filter_prefix.as_ref() { - match filter_script_type { - ScriptType::Lock => { - if !extract_raw_data(&output.lock()) - .as_slice() - .starts_with(prefix) - { - log::trace!("break at {}", line!()); - return None; - } - } - ScriptType::Type => { - if output.type_().is_none() - || !extract_raw_data(&output.type_().to_opt().unwrap()) - .as_slice() - .starts_with(prefix) - { - log::trace!("break at {}", line!()); - return None; - } - } - } - } - - if let Some([r0, r1]) = filter_script_len_range { - match filter_script_type { - ScriptType::Lock => { - let script_len = extract_raw_data(&output.lock()).len(); - if script_len < r0 || script_len > r1 { - log::trace!("break at {}", line!()); - return None; - } - } - ScriptType::Type => { - let script_len = output - .type_() - .to_opt() - .map(|script| extract_raw_data(&script).len()) - .unwrap_or_default(); - if script_len < r0 || script_len > r1 { - log::trace!("break at {}", line!()); - return None; - } - } - } - } - - if let Some([r0, r1]) = filter_output_data_len_range { - if output_data.len() < r0 || output_data.len() >= r1 { - log::trace!("break at {}", line!()); - return None; - } - } - - if let Some([r0, r1]) = filter_output_capacity_range { - let capacity: core::Capacity = output.capacity().unpack(); - if capacity < r0 || capacity >= r1 { - log::trace!("break at {}", line!()); - return None; - } - } - - if let Some([r0, r1]) = filter_block_range { - if block_number < r0 || block_number >= r1 { - log::trace!("break at {}", line!()); - return None; - } - } - log::trace!("Returning normally at {:?}", key); - Some(key.to_vec()) - }), - usize::MAX, - skip, - ); - - let mut capacity = 0; - for (key, value) in kvs.into_iter().map(|kv| (kv.key, kv.value)) { - let tx_hash = packed::Byte32::from_slice(&value).expect("stored tx hash"); - let output_index = u32::from_be_bytes( - key[key.len() - 4..] - .try_into() - .expect("stored output_index"), - ); - - let tx = packed::Transaction::from_slice( - &storage - .get(Key::TxHash(&tx_hash).into_vec()) - .expect("get tx should be OK") - .expect("stored tx")[12..], - ) - .expect("from stored tx slice should be OK"); - let output = tx - .raw() - .outputs() - .get(output_index as usize) - .expect("get output by index should be OK"); - capacity += Unpack::::unpack(&output.capacity()).as_u64() - } - let key = Key::Meta(LAST_STATE_KEY).into_vec(); - let tip_header = storage - .get(key) - .expect("snapshot get last state should be ok") - .map(|data| packed::HeaderReader::from_slice_should_be_ok(&data[32..]).to_entity()) - .expect("tip header should be inited"); - log::trace!("Get cells capacity done"); - Ok((CellsCapacity { - capacity: capacity.into(), - block_hash: tip_header.calc_header_hash().unpack(), - block_number: tip_header.raw().number().unpack(), - }) - .serialize(&SERIALIZER)?) + let result = BLOCK_FILTER_RPC + .get() + .unwrap() + .get_cells_capacity(search_key) + .map_err(|e| JsValue::from_str(&e.to_string()))?; + Ok(result.serialize(&SERIALIZER)?) } #[wasm_bindgen] @@ -1274,22 +492,14 @@ pub fn send_transaction(tx: JsValue) -> Result, JsValue> { return Err(JsValue::from_str("light client not on start state")); } let tx: Transaction = serde_wasm_bindgen::from_value(tx)?; - let tx: packed::Transaction = tx.into(); - let tx = tx.into_view(); - let swc = STORAGE_WITH_DATA.get().unwrap(); - let consensus = CONSENSUS.get().unwrap(); - - let cycles = verify_tx( - tx.clone(), - swc, - Arc::clone(consensus), - &swc.storage().get_last_state().1.into_view(), - ) - .map_err(|e| JsValue::from_str(&format!("invalid transaction: {:?}", e)))?; - swc.pending_txs().blocking_write().push(tx.clone(), cycles); + let result = TRANSACTION_RPC + .get() + .unwrap() + .send_transaction(tx) + .map_err(|e| JsValue::from_str(&e.to_string()))?; - Ok(Unpack::::unpack(&tx.hash()).0.to_vec()) + Ok(result.0.to_vec()) } #[wasm_bindgen] @@ -1298,42 +508,13 @@ pub fn get_transaction(tx_hash: &str) -> Result { return Err(JsValue::from_str("light client not on start state")); } let tx_hash = H256::from_str(&tx_hash[2..]).map_err(|e| JsValue::from_str(&e.to_string()))?; - let swc = STORAGE_WITH_DATA.get().unwrap(); - - if let Some((transaction, header)) = swc.storage().get_transaction_with_header(&tx_hash.pack()) - { - return Ok((TransactionWithStatus { - transaction: Some(transaction.into_view().into()), - cycles: None, - tx_status: TxStatus { - block_hash: Some(header.into_view().hash().unpack()), - status: Status::Committed, - }, - }) - .serialize(&SERIALIZER)?); - } - if let Some((transaction, cycles, _)) = swc.pending_txs().blocking_read().get(&tx_hash.pack()) { - return Ok((TransactionWithStatus { - transaction: Some(transaction.into_view().into()), - cycles: Some(cycles.into()), - tx_status: TxStatus { - block_hash: None, - status: Status::Pending, - }, - }) - .serialize(&SERIALIZER)?); - } - - Ok((TransactionWithStatus { - transaction: None, - cycles: None, - tx_status: TxStatus { - block_hash: None, - status: Status::Unknown, - }, - }) - .serialize(&SERIALIZER)?) + let result = TRANSACTION_RPC + .get() + .unwrap() + .get_transaction(tx_hash) + .map_err(|e| JsValue::from_str(&e.to_string()))?; + Ok(result.serialize(&SERIALIZER)?) } #[wasm_bindgen] @@ -1341,144 +522,11 @@ pub fn fetch_transaction(tx_hash: &str) -> Result { if !status(0b1) { return Err(JsValue::from_str("light client not on start state")); } - - let tws = get_transaction(tx_hash)?; - let tws: TransactionWithStatus = serde_wasm_bindgen::from_value(tws)?; - if tws.transaction.is_some() { - return Ok((FetchStatus::Fetched { data: tws }).serialize(&SERIALIZER)?); - } let tx_hash = H256::from_str(&tx_hash[2..]).map_err(|e| JsValue::from_str(&e.to_string()))?; - let swc = STORAGE_WITH_DATA.get().unwrap(); - - let now = unix_time_as_millis(); - if let Some((added_ts, first_sent, missing)) = swc.get_tx_fetch_info(&tx_hash) { - if missing { - // re-fetch the transaction - swc.add_fetch_tx(tx_hash, now); - return Ok((FetchStatus::::NotFound).serialize(&SERIALIZER)?); - } else if first_sent > 0 { - return Ok((FetchStatus::::Fetching { - first_sent: first_sent.into(), - }) - .serialize(&SERIALIZER)?); - } else { - return Ok((FetchStatus::::Added { - timestamp: added_ts.into(), - }) - .serialize(&SERIALIZER)?); - } - } else { - swc.add_fetch_tx(tx_hash, now); - } - Ok((FetchStatus::::Added { - timestamp: now.into(), - }) - .serialize(&SERIALIZER)?) -} - -const MAX_PREFIX_SEARCH_SIZE: usize = u16::MAX as usize; - -// a helper fn to build query options from search paramters, returns prefix, from_key, direction and skip offset -pub fn build_query_options( - search_key: &SearchKey, - lock_prefix: KeyPrefix, - type_prefix: KeyPrefix, - order: Order, - after_cursor: Option, -) -> Result<(Vec, Vec, CursorDirection, usize), JsValue> { - let mut prefix = match search_key.script_type { - ScriptType::Lock => vec![lock_prefix as u8], - ScriptType::Type => vec![type_prefix as u8], - }; - let script: packed::Script = search_key.script.clone().into(); - let args_len = script.args().len(); - if args_len > MAX_PREFIX_SEARCH_SIZE { - return Err(JsValue::from_str(&format!( - "search_key.script.args len should be less than {}", - MAX_PREFIX_SEARCH_SIZE - ))); - } - prefix.extend_from_slice(extract_raw_data(&script).as_slice()); - - let (from_key, direction, skip) = match order { - Order::Asc => after_cursor.map_or_else( - || (prefix.clone(), CursorDirection::NextUnique, 0), - |json_bytes| (json_bytes.as_bytes().into(), CursorDirection::NextUnique, 1), - ), - Order::Desc => after_cursor.map_or_else( - || { - ( - [ - prefix.clone(), - vec![0xff; MAX_PREFIX_SEARCH_SIZE - args_len], - ] - .concat(), - CursorDirection::PrevUnique, - 0, - ) - }, - |json_bytes| (json_bytes.as_bytes().into(), CursorDirection::PrevUnique, 1), - ), - }; - - Ok((prefix, from_key, direction, skip)) -} - -#[allow(clippy::type_complexity)] -pub fn build_filter_options( - search_key: SearchKey, -) -> Result< - ( - Option>, - Option<[usize; 2]>, - Option<[usize; 2]>, - Option<[core::Capacity; 2]>, - Option<[core::BlockNumber; 2]>, - ), - JsValue, -> { - let filter = search_key.filter.unwrap_or_default(); - let filter_script_prefix = if let Some(script) = filter.script { - let script: packed::Script = script.into(); - if script.args().len() > MAX_PREFIX_SEARCH_SIZE { - return Err(JsValue::from_str(&format!( - "search_key.filter.script.args len should be less than {}", - MAX_PREFIX_SEARCH_SIZE - ))); - } - let mut prefix = Vec::new(); - prefix.extend_from_slice(extract_raw_data(&script).as_slice()); - Some(prefix) - } else { - None - }; - - let filter_script_len_range = filter.script_len_range.map(|[r0, r1]| { - [ - Into::::into(r0) as usize, - Into::::into(r1) as usize, - ] - }); - - let filter_output_data_len_range = filter.output_data_len_range.map(|[r0, r1]| { - [ - Into::::into(r0) as usize, - Into::::into(r1) as usize, - ] - }); - let filter_output_capacity_range = filter.output_capacity_range.map(|[r0, r1]| { - [ - core::Capacity::shannons(r0.into()), - core::Capacity::shannons(r1.into()), - ] - }); - let filter_block_range = filter.block_range.map(|r| [r[0].into(), r[1].into()]); - - Ok(( - filter_script_prefix, - filter_script_len_range, - filter_output_data_len_range, - filter_output_capacity_range, - filter_block_range, - )) + let result = TRANSACTION_RPC + .get() + .unwrap() + .fetch_transaction(tx_hash) + .map_err(|e| JsValue::from_str(&e.to_string()))?; + Ok(result.serialize(&SERIALIZER)?) }