Skip to content
Merged
445 changes: 29 additions & 416 deletions Cargo.lock

Large diffs are not rendered by default.

4 changes: 0 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,25 +23,21 @@ categories = ["data-structures", "database"]
anyhow = "1.0"
assert_matches = "1.5"
bincode = "2"
byteorder = "1.5"
chrono = "0.4.42"
criterion = "0.8"
crossbeam = "0.8.0"
modifier = "0.1"
proc-macro2 = "1.0"
proptest = "1.9"
quote = "1.0"
rand = "0.9"
rand_xorshift = "0.4.0"
rocksdb = "0.24.0"
rust_decimal = "1.0"
serde = "1.0"
smallvec = "1.15"
syn = "2.0"
tempfile = "3.2"
thiserror = "2.0"
url = "2.0"
uuid = "1.19"
version-sync = "0.9.5"

# Workspace dependencoes
Expand Down
20 changes: 0 additions & 20 deletions matterdb/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,12 @@ description = "Persistent storage implementation based on RocksDB"
readme = "README.md"

[dependencies]
# FIXME: make deps optional where appropriate
anyhow.workspace = true
byteorder.workspace = true
chrono.workspace = true
crossbeam.workspace = true
rocksdb.workspace = true
rust_decimal.workspace = true
serde = { workspace = true, features = ["derive"] }
smallvec.workspace = true
thiserror.workspace = true
uuid = { workspace = true, features = ["v4"] }

[dev-dependencies]
matterdb-derive.workspace = true
Expand All @@ -42,20 +37,5 @@ name = "criterion"
path = "benches/lib.rs"
harness = false

[features]
# FIXME: revise features
default = ["rocksdb_snappy"]
with-serde = []

# Compression options passed to RocksDB backend.
rocksdb_snappy = ["rocksdb/snappy"]
rocksdb_lz4 = ["rocksdb/lz4"]
rocksdb_zlib = ["rocksdb/zlib"]
rocksdb_zstd = ["rocksdb/zstd"]
rocksdb_bzip2 = ["rocksdb/bzip2"]

# Enables long benchmarks; does not influence main crate code.
long_benchmarks = []

[lints]
workspace = true
48 changes: 6 additions & 42 deletions matterdb/benches/benchmarks/encoding.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use std::{borrow::Cow, fmt, hint::black_box};

use byteorder::{ByteOrder, LittleEndian, ReadBytesExt, WriteBytesExt};
use criterion::{Bencher, Criterion};
use matterdb::BinaryValue;
use rand::{RngCore, SeedableRng, rngs::StdRng};
Expand All @@ -18,43 +17,17 @@ struct SimpleData {
impl BinaryValue for SimpleData {
fn to_bytes(&self) -> Vec<u8> {
let mut buffer = vec![0; 8];
LittleEndian::write_u16(&mut buffer[0..2], self.id);
LittleEndian::write_i16(&mut buffer[2..4], self.class);
LittleEndian::write_i32(&mut buffer[4..8], self.value);
buffer[0..2].copy_from_slice(&self.id.to_le_bytes());
buffer[2..4].copy_from_slice(&self.class.to_le_bytes());
buffer[4..8].copy_from_slice(&self.value.to_le_bytes());
buffer
}

fn from_bytes(bytes: Cow<'_, [u8]>) -> anyhow::Result<Self> {
let bytes = bytes.as_ref();
let id = LittleEndian::read_u16(&bytes[0..2]);
let class = LittleEndian::read_i16(&bytes[2..4]);
let value = LittleEndian::read_i32(&bytes[4..8]);
Ok(Self { id, class, value })
}
}

#[derive(Debug, Clone, Copy, PartialEq)]
struct CursorData {
id: u16,
class: i16,
value: i32,
}

impl BinaryValue for CursorData {
fn to_bytes(&self) -> Vec<u8> {
let mut buf = vec![0; 8];
let mut cursor = buf.as_mut_slice();
cursor.write_u16::<LittleEndian>(self.id).unwrap();
cursor.write_i16::<LittleEndian>(self.class).unwrap();
cursor.write_i32::<LittleEndian>(self.value).unwrap();
buf
}

fn from_bytes(bytes: Cow<'_, [u8]>) -> anyhow::Result<Self> {
let mut cursor = bytes.as_ref();
let id = cursor.read_u16::<LittleEndian>()?;
let class = cursor.read_i16::<LittleEndian>()?;
let value = cursor.read_i32::<LittleEndian>()?;
let id = u16::from_le_bytes(bytes[0..2].try_into().unwrap());
let class = i16::from_le_bytes(bytes[2..4].try_into().unwrap());
let value = i32::from_le_bytes(bytes[4..8].try_into().unwrap());
Ok(Self { id, class, value })
}
}
Expand Down Expand Up @@ -83,14 +56,6 @@ fn gen_sample_data() -> SimpleData {
})
}

fn gen_cursor_data() -> CursorData {
check_binary_value(CursorData {
id: 1,
class: -5,
value: 2127,
})
}

fn bench_binary_value<F, V>(c: &mut Criterion, name: &str, f: F)
where
F: Fn() -> V + 'static + Clone + Copy,
Expand Down Expand Up @@ -131,5 +96,4 @@ where
pub(crate) fn bench_encoding(c: &mut Criterion) {
bench_binary_value(c, "bytes", gen_bytes_data);
bench_binary_value(c, "simple", gen_sample_data);
bench_binary_value(c, "cursor", gen_cursor_data);
}
10 changes: 5 additions & 5 deletions matterdb/benches/benchmarks/schema_patterns.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use std::hint::black_box;

use criterion::{Bencher, Criterion, Throughput};
use matterdb::{
Group, KeySetIndex, Lazy, ListIndex, MapIndex,
Group, KeySetIndex, ListIndex, MapIndex,
access::{Access, AccessExt, FromAccess, Prefixed, RawAccessMut},
};
use matterdb_derive::{BinaryValue, FromAccess};
Expand Down Expand Up @@ -102,10 +102,10 @@ struct LazySchema<T: Access> {
transactions: MapIndex<T::Base, u32, Transaction>,
hot_index: MapIndex<T::Base, u64, u32>,
hot_group: Group<T, u64, ListIndex<T::Base, u64>>,
cold_index: Lazy<T, MapIndex<T::Base, u64, u32>>,
cold_index: MapIndex<T::Base, u64, u32>,
// groups are already lazy
cold_group: Group<T, u64, ListIndex<T::Base, u64>>,
other_cold_index: Lazy<T, KeySetIndex<T::Base, u64>>,
other_cold_index: KeySetIndex<T::Base, u64>,
}

impl<T: Access> LazySchema<T> {
Expand Down Expand Up @@ -134,12 +134,12 @@ where
let cold_group_id = transaction.value % COLD_DIVISOR;
let mut list_in_group = self.cold_group.get(&cold_group_id);
list_in_group.push(transaction.value);
self.cold_index.get().put(&cold_group_id, divisor);
self.cold_index.put(&cold_group_id, divisor);
}
}

if transaction.value % COLD_CHANCE == 0 {
self.other_cold_index.get().insert(&transaction.value);
self.other_cold_index.insert(&transaction.value);
}
}
}
Expand Down
6 changes: 1 addition & 5 deletions matterdb/benches/benchmarks/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use std::hint::black_box;
use criterion::{
AxisScale, BatchSize, Bencher, BenchmarkId, Criterion, PlotConfiguration, Throughput,
};
use matterdb::{Fork, ListIndex, MapIndex, access::CopyAccessExt};
use matterdb::{Fork, ListIndex, MapIndex, access::AccessExt};
use rand::{Rng, SeedableRng, rngs::StdRng};

use super::BenchDB;
Expand All @@ -14,12 +14,8 @@ const SAMPLE_SIZE: usize = 10;
const CHUNK_SIZE: usize = 64;
const SEED: [u8; 32] = [100; 32];

#[cfg(all(test, not(feature = "long_benchmarks")))]
const ITEM_COUNTS: [usize; 3] = [1_000, 10_000, 100_000];

#[cfg(all(test, feature = "long_benchmarks"))]
const ITEM_COUNTS: [usize; 4] = [1_000, 10_000, 100_000, 1_000_000];

fn generate_random_kv(len: usize) -> Vec<(u32, Vec<u8>)> {
let mut key = 0;
let kv_generator = |_| {
Expand Down
8 changes: 4 additions & 4 deletions matterdb/examples/migration/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use std::sync::Arc;

use matterdb::{
Database, Entry, Group, ListIndex, MapIndex, Snapshot, TemporaryDB,
access::{Access, CopyAccessExt, FromAccess, Prefixed},
access::{Access, AccessExt, FromAccess, Prefixed},
migration::{Migration, flush_migration},
};
use matterdb_derive::{BinaryValue, FromAccess};
Expand Down Expand Up @@ -179,7 +179,7 @@ where

// Check that DB contains old and new data.
let snapshot = db.snapshot();
check_data_before_flush(&snapshot);
check_data_before_flush(snapshot.as_ref());
// Finalize the migration by calling `flush_migration`.
let mut fork = db.fork();
flush_migration(&mut fork, "test");
Expand All @@ -194,10 +194,10 @@ where
db.merge(patch).unwrap();
// Check that data was updated after merge.
let snapshot = db.snapshot();
check_data_after_flush(&snapshot);
check_data_after_flush(snapshot.as_ref());

// Print DB state after migration is completed.
let schema = v2::Schema::new(Prefixed::new("test", &snapshot));
let schema = v2::Schema::new(Prefixed::new("test", snapshot.as_ref()));
println!("After migration:");
schema.print_wallets();
}
Loading
Loading