Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
fecab80
refactor(pmt): rewrite Node as tagged union with parallel ref_count
GrapeBaBa May 1, 2026
940373f
feat(pmt): add Slab module with K=1024 storage and computeRoot
GrapeBaBa May 1, 2026
5484624
refactor(pmt): tighten Slab — drop stack copy, document fields, split…
GrapeBaBa May 1, 2026
832cade
refactor(pmt): strip len/dirty from Slab.Storage
GrapeBaBa May 2, 2026
d24ce6c
feat(pmt): add slab variant to Node union with inline len/dirty/root
GrapeBaBa May 2, 2026
c718da1
feat(pmt): Pool.createSlab/getSlab*/unref slab cleanup
GrapeBaBa May 2, 2026
d994958
refactor(pmt): move getSlabChunks/Len to Id, return Error on wrong va…
GrapeBaBa May 2, 2026
978014c
feat(pmt): Id.setSlabChunk + Id.setSlabChunks (CoW slab mutation)
GrapeBaBa May 2, 2026
d31ab20
test(pmt): mixed slab+branch tree root matches per-leaf reference
GrapeBaBa May 2, 2026
b4484bb
feat(ssz): FixedListType.tree.fromValue builds slab leaves above 2*K …
GrapeBaBa May 2, 2026
cbd9f94
Revert "feat(ssz): FixedListType.tree.fromValue builds slab leaves ab…
GrapeBaBa May 2, 2026
8d5b9ff
feat(pmt): FillWithContentsIterator gains leaf_offset for slab subtrees
GrapeBaBa May 2, 2026
f8f4a8c
feat(ssz): BasicPackedChunks gains comptime use_slab branch
GrapeBaBa May 2, 2026
da6bf10
feat(ssz): FixedListType/FixedVectorType opts.slab opt-in
GrapeBaBa May 2, 2026
a93af9b
bench(ssz): slab vs leaf for FixedListType(Uint64, 2^20)
GrapeBaBa May 2, 2026
304386c
feat(consensus_types): opt phase0.Balances into opts.slab=true
GrapeBaBa May 2, 2026
8be15c5
feat(consensus_types): opt InactivityScores + EpochParticipation into…
GrapeBaBa May 2, 2026
f83a4b8
fix(ssz): slab paths tolerate zero-sentinel subtrees at slab boundary
GrapeBaBa May 2, 2026
5329ef3
perf(ssz): in-place mutation for transient slab chunks (rc==0)
GrapeBaBa May 2, 2026
4bb6cfc
refactor(pmt): slab variant carries *Slab.Storage (Lighthouse-aligned)
GrapeBaBa May 2, 2026
ca5ac32
fix(pmt): init unfinalized_parents_buf to null in setNodes/setNodesAt…
GrapeBaBa May 3, 2026
17ded4a
refactor(pmt): replace ?[32]u8 root with lazy_sentinel
GrapeBaBa May 3, 2026
c592892
perf(pmt): decompose Node union into flat SoA columns
GrapeBaBa May 3, 2026
3b99c2e
perf(ssz): add StructContainerType backed by branch_struct nodes
GrapeBaBa May 3, 2026
2a75c42
fix(pmt): release heap payloads on pool deinit + StructContainer fiel…
GrapeBaBa May 3, 2026
016d07d
feat(pmt): support proofs through branch_struct and slab opaque nodes
GrapeBaBa May 3, 2026
e012894
refactor(pmt): hoist Node fields to file-as-struct
GrapeBaBa May 4, 2026
1f6d2ff
perf(pmt): pack kind+ref_count into single State u32 column
GrapeBaBa May 4, 2026
d239bc9
perf(pmt): merge left+right+cache into single u64 payload column
GrapeBaBa May 4, 2026
983bd4e
refactor(pmt): drop StateView, expose State value directly
GrapeBaBa May 4, 2026
dd60d5a
refactor(pmt): rename Slab to ChunkedLeaf
GrapeBaBa May 4, 2026
64297b9
feat(ssz): enable iteratorReadonly + sliceTo for chunked_leaf lists
GrapeBaBa May 5, 2026
adbd217
refactor(pmt): rename branch_struct to container_struct + clarify docs
GrapeBaBa May 5, 2026
a48fa6e
refactor(pmt): move Pool struct above Id, match main's State→Pool→Id …
GrapeBaBa May 5, 2026
9d5e563
refactor(pmt): drop dead defensive slice-refresh in setNodes/setNodes…
GrapeBaBa May 5, 2026
7a3b8d4
refactor(pmt): drop Storage suffix, rename Mut->Ptr to match Zig stdlib
GrapeBaBa May 5, 2026
59fdbe7
fix(pmt): address gemini-flagged stack growth + refcount ordering
GrapeBaBa May 5, 2026
c120b5f
perf(pmt): split Pool allocator into page+small lanes, opts struct API
GrapeBaBa May 6, 2026
6ed5c41
fix(pmt): convert remaining Pool.init call sites missed by sed
GrapeBaBa May 6, 2026
58c22fa
feat(ssz): zero-copy nextValuePtr for StructContainerType list iter
GrapeBaBa May 6, 2026
2c07154
perf(state-transition): migrate getEffectiveBalanceIncrementsZeroInac…
GrapeBaBa May 6, 2026
0a0412f
feat(fork-types): validatorsPtrSlice returns []*const Validator (no c…
GrapeBaBa May 7, 2026
9aeab18
perf(state-transition): migrate slashings_cache build to ptr slice
GrapeBaBa May 7, 2026
25d1871
fix(bench): switch process_{block,epoch} bench setup to validatorsPtr…
GrapeBaBa May 7, 2026
b68be09
fix(ssz): getAllInto chunked_leaf branch reads staged updates
GrapeBaBa May 7, 2026
5a8d794
fix(ssz): assert no pending writes before iteratorReadonly
GrapeBaBa May 7, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
198 changes: 198 additions & 0 deletions bench/ssz/list_chunked_leaf.zig
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
//! Bench comparing FixedListType(Uint64, 2^20) leaf-default vs opts.chunked_leaf=true
//! on representative balances-scale workloads.
//!
//! Run with:
//! zig build run:bench_list_chunked_leaf -Doptimize=ReleaseFast
//!
//! Workloads (1M u64 items unless noted):
//! - fromValue: build tree from a populated value
//! - getRoot: compute root hash from a freshly built tree
//! - toValue: decode all items back from the tree
//! - sparseSet: single-item set + getRoot (CoW path), 100 iterations
//! - bulkSetAndRoot: set every item then getRoot (epoch-rewards-shaped)
const std = @import("std");
const zbench = @import("zbench");

const Node = @import("persistent_merkle_tree").Node;
const ChunkedLeaf = @import("persistent_merkle_tree").ChunkedLeaf;

const ssz = @import("ssz");
const FixedListType = ssz.FixedListType;
const UintType = ssz.UintType;

const Limit: comptime_int = 1 << 20;
const ItemCount: usize = 1 << 20;

const ListLeaf = FixedListType(UintType(64), Limit, .{});
const ListChunkedLeaf = FixedListType(UintType(64), Limit, .{ .chunked_leaf = true });

const global_allocator = std.heap.page_allocator;

// Shared input value used by all build-side benches.
var input_value: ListLeaf.Type = ListLeaf.Type.empty;

fn populateInput(allocator: std.mem.Allocator) !void {
try input_value.ensureTotalCapacity(allocator, ItemCount);
for (0..ItemCount) |i| {
try input_value.append(allocator, @as(u64, @intCast(i * 31 + 1)));
}
}

// ──────── fromValue ────────

const FromValueLeaf = struct {
pool: *Node.Pool,
pub fn run(self: *FromValueLeaf, allocator: std.mem.Allocator) void {
_ = allocator;
const id = ListLeaf.tree.fromValue(self.pool, &input_value) catch unreachable;
self.pool.unref(id);
}
};

const FromValueChunkedLeaf = struct {
pool: *Node.Pool,
pub fn run(self: *FromValueChunkedLeaf, allocator: std.mem.Allocator) void {
_ = allocator;
const id = ListChunkedLeaf.tree.fromValue(self.pool, &input_value) catch unreachable;
self.pool.unref(id);
}
};

// ──────── getRoot on a freshly built tree (cold lazy hashes) ────────

const GetRootLeaf = struct {
pool: *Node.Pool,
pub fn run(self: *GetRootLeaf, allocator: std.mem.Allocator) void {
_ = allocator;
const id = ListLeaf.tree.fromValue(self.pool, &input_value) catch unreachable;
const root = id.getRoot(self.pool);
std.mem.doNotOptimizeAway(root);
self.pool.unref(id);
}
};

const GetRootChunkedLeaf = struct {
pool: *Node.Pool,
pub fn run(self: *GetRootChunkedLeaf, allocator: std.mem.Allocator) void {
_ = allocator;
const id = ListChunkedLeaf.tree.fromValue(self.pool, &input_value) catch unreachable;
const root = id.getRoot(self.pool);
std.mem.doNotOptimizeAway(root);
self.pool.unref(id);
}
};

// ──────── toValue (read all items back) ────────

const ToValueLeaf = struct {
pool: *Node.Pool,
tree_id: Node.Id,
pub fn run(self: *ToValueLeaf, allocator: std.mem.Allocator) void {
var dst = ListLeaf.Type.empty;
defer dst.deinit(allocator);
ListLeaf.tree.toValue(allocator, self.tree_id, self.pool, &dst) catch unreachable;
std.mem.doNotOptimizeAway(dst.items[0]);
}
};

const ToValueChunkedLeaf = struct {
pool: *Node.Pool,
tree_id: Node.Id,
pub fn run(self: *ToValueChunkedLeaf, allocator: std.mem.Allocator) void {
var dst = ListChunkedLeaf.Type.empty;
defer dst.deinit(allocator);
ListChunkedLeaf.tree.toValue(allocator, self.tree_id, self.pool, &dst) catch unreachable;
std.mem.doNotOptimizeAway(dst.items[0]);
}
};

// ──────── bulkSetAndRoot: epoch-rewards-shaped — write every item via tree.fromValue
// of a slightly mutated input, then getRoot ────────

const BulkSetAndRootLeaf = struct {
pool: *Node.Pool,
mutated: *ListLeaf.Type,
pub fn run(self: *BulkSetAndRootLeaf, allocator: std.mem.Allocator) void {
_ = allocator;
const id = ListLeaf.tree.fromValue(self.pool, self.mutated) catch unreachable;
const root = id.getRoot(self.pool);
std.mem.doNotOptimizeAway(root);
self.pool.unref(id);
}
};

const BulkSetAndRootChunkedLeaf = struct {
pool: *Node.Pool,
mutated: *ListChunkedLeaf.Type,
pub fn run(self: *BulkSetAndRootChunkedLeaf, allocator: std.mem.Allocator) void {
_ = allocator;
const id = ListChunkedLeaf.tree.fromValue(self.pool, self.mutated) catch unreachable;
const root = id.getRoot(self.pool);
std.mem.doNotOptimizeAway(root);
self.pool.unref(id);
}
};

pub fn main(init: std.process.Init) !void {
const io = init.io;
const allocator = global_allocator;
var bench = zbench.Benchmark.init(allocator, .{});
defer bench.deinit();

// Single shared pool. Sized for ~1M chunks worth of node IDs across both
// leaf and chunked_leaf scenarios; preheats keep allocator overhead off the hot path.
var pool = try Node.Pool.init(.{ .page_allocator = allocator, .allocator = allocator, .pool_size = 8_000_000 });
defer pool.deinit();

try populateInput(allocator);
defer input_value.deinit(allocator);

// Build per-layout reference trees once for the read-side benches.
const tree_leaf = try ListLeaf.tree.fromValue(&pool, &input_value);
defer pool.unref(tree_leaf);
_ = tree_leaf.getRoot(&pool); // warm

const tree_chunked_leaf = try ListChunkedLeaf.tree.fromValue(&pool, &input_value);
defer pool.unref(tree_chunked_leaf);
_ = tree_chunked_leaf.getRoot(&pool); // warm

// bulkSet input: each iteration rebuilds tree.fromValue on this value;
// matches the shape of "epoch rewards rewrite all balances + recompute root".
var mutated_leaf: ListLeaf.Type = ListLeaf.Type.empty;
defer mutated_leaf.deinit(allocator);
try mutated_leaf.ensureTotalCapacity(allocator, ItemCount);
for (0..ItemCount) |i| {
try mutated_leaf.append(allocator, @as(u64, @intCast(i * 17 + 3)));
}

var mutated_chunked_leaf: ListChunkedLeaf.Type = ListChunkedLeaf.Type.empty;
defer mutated_chunked_leaf.deinit(allocator);
try mutated_chunked_leaf.ensureTotalCapacity(allocator, ItemCount);
for (0..ItemCount) |i| {
try mutated_chunked_leaf.append(allocator, @as(u64, @intCast(i * 17 + 3)));
}

const fv_leaf = FromValueLeaf{ .pool = &pool };
const fv_chunked_leaf = FromValueChunkedLeaf{ .pool = &pool };
try bench.addParam("fromValue 1M leaf", &fv_leaf, .{});
try bench.addParam("fromValue 1M chunked_leaf", &fv_chunked_leaf, .{});

const gr_leaf = GetRootLeaf{ .pool = &pool };
const gr_chunked_leaf = GetRootChunkedLeaf{ .pool = &pool };
try bench.addParam("fromValue+getRoot 1M leaf", &gr_leaf, .{});
try bench.addParam("fromValue+getRoot 1M chunked_leaf", &gr_chunked_leaf, .{});

const tv_leaf = ToValueLeaf{ .pool = &pool, .tree_id = tree_leaf };
const tv_chunked_leaf = ToValueChunkedLeaf{ .pool = &pool, .tree_id = tree_chunked_leaf };
try bench.addParam("toValue 1M leaf", &tv_leaf, .{});
try bench.addParam("toValue 1M chunked_leaf", &tv_chunked_leaf, .{});

const bs_leaf = BulkSetAndRootLeaf{ .pool = &pool, .mutated = &mutated_leaf };
const bs_chunked_leaf = BulkSetAndRootChunkedLeaf{ .pool = &pool, .mutated = &mutated_chunked_leaf };
try bench.addParam("bulkSet+getRoot 1M leaf", &bs_leaf, .{});
try bench.addParam("bulkSet+getRoot 1M chunked_leaf", &bs_chunked_leaf, .{});

try bench.run(io, std.Io.File.stdout());

_ = ChunkedLeaf; // silence unused if chunked_leaf code path proves unreachable in some build mode
}
15 changes: 10 additions & 5 deletions bench/state_transition/process_block.zig
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
//! Run with: zig build run:bench_process_block -Doptimize=ReleaseFast [-- /path/to/state.ssz /path/to/block.ssz]

const std = @import("std");
const builtin = @import("builtin");
const zbench = @import("zbench");
const Node = @import("persistent_merkle_tree").Node;
const state_transition = @import("state_transition");
Expand Down Expand Up @@ -436,16 +437,20 @@ fn ProcessBlockSegmentedBench(comptime fork: ForkSeq) type {
};
}

var gpa: std.heap.DebugAllocator(.{}) = .init;

pub fn main(init: std.process.Init) !void {
var gpa: std.heap.DebugAllocator(.{}) = .init;
defer std.debug.assert(gpa.deinit() == .ok);
defer if (builtin.mode == .Debug) std.debug.assert(gpa.deinit() == .ok);

const allocator = gpa.allocator();
const allocator = if (builtin.mode == .Debug)
gpa.allocator()
else
std.heap.c_allocator;
const io = init.io;
var stdout_buf: [4096]u8 = undefined;
var stdout_file_writer = std.Io.File.stdout().writer(io, &stdout_buf);
var stdout = &stdout_file_writer.interface;
var pool = try Node.Pool.init(allocator, 10_000_000);
var pool = try Node.Pool.init(.{ .page_allocator = allocator, .allocator = allocator });
defer pool.deinit();

// Use download_era_options.era_files[0] for state
Expand Down Expand Up @@ -526,7 +531,7 @@ fn runBenchmark(
allocator.destroy(index_pubkey_cache);
}

const validators = try beacon_state.?.validatorsSlice(allocator);
const validators = try beacon_state.?.validatorsPtrSlice(allocator);
defer allocator.free(validators);

try state_transition.syncPubkeys(allocator, validators, &pubkey_index_map, index_pubkey_cache);
Expand Down
15 changes: 10 additions & 5 deletions bench/state_transition/process_epoch.zig
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
//! Run with: zig build run:bench_process_epoch -Doptimize=ReleaseFast

const std = @import("std");
const builtin = @import("builtin");
const zbench = @import("zbench");
const Node = @import("persistent_merkle_tree").Node;
const state_transition = @import("state_transition");
Expand Down Expand Up @@ -698,16 +699,20 @@ fn loadStateBytesFromConfiguredEraFiles(allocator: std.mem.Allocator, io: std.Io
return error.NoUsableEraStateFound;
}

var gpa: std.heap.DebugAllocator(.{}) = .init;

pub fn main(init: std.process.Init) !void {
var gpa: std.heap.DebugAllocator(.{}) = .init;
defer std.debug.assert(gpa.deinit() == .ok);
defer if (builtin.mode == .Debug) std.debug.assert(gpa.deinit() == .ok);

const allocator = gpa.allocator();
const allocator = if (builtin.mode == .Debug)
gpa.allocator()
else
std.heap.c_allocator;
const io = init.io;
var stdout_buf: [4096]u8 = undefined;
var stdout_file_writer = std.Io.File.stdout().writer(io, &stdout_buf);
var stdout = &stdout_file_writer.interface;
var pool = try Node.Pool.init(allocator, 10_000_000);
var pool = try Node.Pool.init(.{ .page_allocator = allocator, .allocator = allocator });
defer pool.deinit();

const state_bytes = try loadStateBytesFromConfiguredEraFiles(allocator, io, stdout);
Expand Down Expand Up @@ -762,7 +767,7 @@ fn runBenchmark(
allocator.destroy(index_pubkey_cache);
}

const validators = try beacon_state.?.validatorsSlice(allocator);
const validators = try beacon_state.?.validatorsPtrSlice(allocator);
defer allocator.free(validators);

try state_transition.syncPubkeys(allocator, validators, &pubkey_index_map, index_pubkey_cache);
Expand Down
8 changes: 1 addition & 7 deletions bindings/napi/pool.zig
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,13 @@ const std = @import("std");
const js = @import("zapi:zapi").js;
const Node = @import("persistent_merkle_tree").Node;

/// Pool uses page allocator for internal allocations.
/// It's recommended to never reallocate the pool after initialization.
const allocator = std.heap.page_allocator;

const default_pool_size: u32 = 0;

pub const State = struct {
pool: Node.Pool = undefined,
initialized: bool = false,

pub fn init(self: *State) !void {
if (self.initialized) return;
self.pool = try Node.Pool.init(allocator, default_pool_size);
self.pool = try Node.Pool.init(.{});
self.initialized = true;
}

Expand Down
6 changes: 6 additions & 0 deletions build.zig.zon
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,12 @@
.imports = .{ .config, .consensus_types, .download_era_options, .era, .ssz, .zbench },
},
},
.bench_list_chunked_leaf = .{
.root_module = .{
.root_source_file = "bench/ssz/list_chunked_leaf.zig",
.imports = .{ .persistent_merkle_tree, .ssz, .zbench },
},
},
.bench_merkle_gindex = .{
.root_module = .{
.root_source_file = "bench/ssz/gindex.zig",
Expand Down
14 changes: 7 additions & 7 deletions src/consensus_types/altair.zig
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ pub const SyncAggregate = ssz.FixedContainerType(struct {
});

pub const SyncCommittee = ssz.FixedContainerType(struct {
pubkeys: ssz.FixedVectorType(p.BLSPubkey, preset.SYNC_COMMITTEE_SIZE),
pubkeys: ssz.FixedVectorType(p.BLSPubkey, preset.SYNC_COMMITTEE_SIZE, .{}),
aggregate_pubkey: p.BLSPubkey,
});

Expand All @@ -71,8 +71,8 @@ pub const BeaconBlock = ssz.VariableContainerType(struct {
body: BeaconBlockBody,
});

pub const InactivityScores = ssz.FixedListType(p.Uint64, preset.VALIDATOR_REGISTRY_LIMIT);
pub const EpochParticipation = ssz.FixedListType(p.Uint8, preset.VALIDATOR_REGISTRY_LIMIT);
pub const InactivityScores = ssz.FixedListType(p.Uint64, preset.VALIDATOR_REGISTRY_LIMIT, .{ .chunked_leaf = true });
pub const EpochParticipation = ssz.FixedListType(p.Uint8, preset.VALIDATOR_REGISTRY_LIMIT, .{ .chunked_leaf = true });

pub const BeaconState = ssz.VariableContainerType(struct {
genesis_time: p.Uint64,
Expand Down Expand Up @@ -144,23 +144,23 @@ pub const LightClientHeader = ssz.FixedContainerType(struct {
pub const LightClientBootstrap = ssz.FixedContainerType(struct {
header: LightClientHeader,
current_sync_committee: SyncCommittee,
current_sync_committee_branch: ssz.FixedVectorType(p.Bytes32, std.math.log2(c.CURRENT_SYNC_COMMITTEE_GINDEX)),
current_sync_committee_branch: ssz.FixedVectorType(p.Bytes32, std.math.log2(c.CURRENT_SYNC_COMMITTEE_GINDEX), .{}),
});

pub const LightClientUpdate = ssz.FixedContainerType(struct {
attested_header: LightClientHeader,
next_sync_committee: SyncCommittee,
next_sync_committee_branch: ssz.FixedVectorType(p.Bytes32, std.math.log2(c.NEXT_SYNC_COMMITTEE_GINDEX)),
next_sync_committee_branch: ssz.FixedVectorType(p.Bytes32, std.math.log2(c.NEXT_SYNC_COMMITTEE_GINDEX), .{}),
finalized_header: LightClientHeader,
finality_branch: ssz.FixedVectorType(p.Bytes32, std.math.log2(c.FINALIZED_ROOT_GINDEX)),
finality_branch: ssz.FixedVectorType(p.Bytes32, std.math.log2(c.FINALIZED_ROOT_GINDEX), .{}),
sync_aggregate: SyncAggregate,
signature_slot: p.Slot,
});

pub const LightClientFinalityUpdate = ssz.FixedContainerType(struct {
attested_header: LightClientHeader,
finalized_header: LightClientHeader,
finality_branch: ssz.FixedVectorType(p.Bytes32, std.math.log2(c.FINALIZED_ROOT_GINDEX)),
finality_branch: ssz.FixedVectorType(p.Bytes32, std.math.log2(c.FINALIZED_ROOT_GINDEX), .{}),
sync_aggregate: SyncAggregate,
signature_slot: p.Slot,
});
Expand Down
6 changes: 3 additions & 3 deletions src/consensus_types/bellatrix.zig
Original file line number Diff line number Diff line change
Expand Up @@ -122,11 +122,11 @@ pub const BlindedBeaconBlockBody = ssz.VariableContainerType(struct {
randao_reveal: p.BLSSignature,
eth1_data: Eth1Data,
graffiti: p.Bytes32,
proposer_slashings: ssz.FixedListType(ProposerSlashing, preset.MAX_PROPOSER_SLASHINGS),
proposer_slashings: ssz.FixedListType(ProposerSlashing, preset.MAX_PROPOSER_SLASHINGS, .{}),
attester_slashings: ssz.VariableListType(AttesterSlashing, preset.MAX_ATTESTER_SLASHINGS),
attestations: ssz.VariableListType(Attestation, preset.MAX_ATTESTATIONS),
deposits: ssz.FixedListType(Deposit, preset.MAX_DEPOSITS),
voluntary_exits: ssz.FixedListType(SignedVoluntaryExit, preset.MAX_VOLUNTARY_EXITS),
deposits: ssz.FixedListType(Deposit, preset.MAX_DEPOSITS, .{}),
voluntary_exits: ssz.FixedListType(SignedVoluntaryExit, preset.MAX_VOLUNTARY_EXITS, .{}),
sync_aggregate: SyncAggregate,
execution_payload_header: ExecutionPayloadHeader,
});
Expand Down
Loading
Loading