|
| 1 | +const std = @import("std"); |
| 2 | +const Sha256 = std.crypto.hash.sha2.Sha256; |
| 3 | +const builtin = @import("builtin"); |
| 4 | +const native_endian = builtin.target.cpu.arch.endian(); |
| 5 | +const Allocator = std.mem.Allocator; |
| 6 | + |
| 7 | +pub const SEED_SIZE = 32; |
| 8 | +const U32U32HashMap = std.AutoHashMap(u32, u32); |
| 9 | +const U8SliceByU32 = std.AutoHashMap(u32, []const u8); |
| 10 | +// note that AutoHashMap always copy data in put() api |
| 11 | +// so value should be a pointer instead of U8SliceByU32 so that it can be freed |
| 12 | +const U8SliceByU8ByU32 = std.AutoHashMap(u32, *U8SliceByU32); |
| 13 | + |
| 14 | +/// a Zig implementation of https://github.com/ChainSafe/swap-or-not-shuffle/pull/5 |
| 15 | +pub const ComputeShuffledIndex = struct { |
| 16 | + // this ComputeShuffledIndex is always init() and deinit() inside consumer's function so use arena allocator here |
| 17 | + // to improve performance and implify deinit() |
| 18 | + arena: std.heap.ArenaAllocator, |
| 19 | + pivot_by_index: U32U32HashMap, |
| 20 | + source_by_position_by_index: U8SliceByU8ByU32, |
| 21 | + // 32 bytes seed + 1 byte i |
| 22 | + pivot_buffer: [33]u8, |
| 23 | + // 32 bytes seed + 1 byte i + 4 bytes positionDiv |
| 24 | + source_buffer: [37]u8, |
| 25 | + index_count: u32, |
| 26 | + rounds: u32, |
| 27 | + |
| 28 | + pub fn init(parent_allocator: Allocator, seed: []const u8, index_count: u32, rounds: u32) !ComputeShuffledIndex { |
| 29 | + if (seed.len != SEED_SIZE) { |
| 30 | + return error.InvalidSeedLen; |
| 31 | + } |
| 32 | + |
| 33 | + if (index_count == 0) { |
| 34 | + return error.InvalidIndexCount; |
| 35 | + } |
| 36 | + |
| 37 | + if (rounds == 0) { |
| 38 | + return error.InvalidRounds; |
| 39 | + } |
| 40 | + |
| 41 | + const arena = std.heap.ArenaAllocator.init(parent_allocator); |
| 42 | + |
| 43 | + const pivot_by_index = U32U32HashMap.init(parent_allocator); |
| 44 | + const source_by_position_by_index = U8SliceByU8ByU32.init(parent_allocator); |
| 45 | + |
| 46 | + var pivot_buffer: [33]u8 = [_]u8{0} ** 33; |
| 47 | + var source_buffer: [37]u8 = [_]u8{0} ** 37; |
| 48 | + @memcpy(pivot_buffer[0..SEED_SIZE], seed); |
| 49 | + @memcpy(source_buffer[0..SEED_SIZE], seed); |
| 50 | + |
| 51 | + return ComputeShuffledIndex{ |
| 52 | + .arena = arena, |
| 53 | + .pivot_by_index = pivot_by_index, |
| 54 | + .source_by_position_by_index = source_by_position_by_index, |
| 55 | + .pivot_buffer = pivot_buffer, |
| 56 | + .source_buffer = source_buffer, |
| 57 | + .index_count = index_count, |
| 58 | + .rounds = rounds, |
| 59 | + }; |
| 60 | + } |
| 61 | + |
| 62 | + pub fn deinit(self: *ComputeShuffledIndex) void { |
| 63 | + self.pivot_by_index.deinit(); |
| 64 | + |
| 65 | + var it = self.source_by_position_by_index.iterator(); |
| 66 | + while (it.next()) |entry| { |
| 67 | + var source_by_position = entry.value_ptr.*; |
| 68 | + // no need to loop through values and free the sources inside source_by_position thanks to arena |
| 69 | + source_by_position.deinit(); |
| 70 | + // we create() source_by_position in the below get() api |
| 71 | + // but no need to destroy() it thanks to arena |
| 72 | + } |
| 73 | + |
| 74 | + self.source_by_position_by_index.deinit(); |
| 75 | + |
| 76 | + // this needs to be the last step |
| 77 | + self.arena.deinit(); |
| 78 | + } |
| 79 | + |
| 80 | + pub fn get(self: *ComputeShuffledIndex, index: u32) !u32 { |
| 81 | + var permuted = index; |
| 82 | + const allocator = self.arena.allocator(); |
| 83 | + |
| 84 | + for (0..self.rounds) |i| { |
| 85 | + var pivot = self.pivot_by_index.get(@intCast(i)); |
| 86 | + if (pivot == null) { |
| 87 | + self.pivot_buffer[SEED_SIZE] = @intCast(i % 256); |
| 88 | + var digest = [_]u8{0} ** 32; |
| 89 | + Sha256.hash(self.pivot_buffer[0..], digest[0..], .{}); |
| 90 | + const u64Slice = std.mem.bytesAsSlice(u64, digest[0..8]); |
| 91 | + const u64_value = u64Slice[0]; |
| 92 | + const le_value = if (native_endian == .big) @byteSwap(u64_value) else u64_value; |
| 93 | + pivot = @intCast(le_value % self.index_count); |
| 94 | + } |
| 95 | + |
| 96 | + const flip = (pivot.? + self.index_count - permuted) % self.index_count; |
| 97 | + const position = @max(permuted, flip); |
| 98 | + const position_div: u32 = position / 256; |
| 99 | + |
| 100 | + var source_by_position = self.source_by_position_by_index.get(@intCast(i)); |
| 101 | + if (source_by_position == null) { |
| 102 | + const _source_by_position = try allocator.create(U8SliceByU32); |
| 103 | + _source_by_position.* = U8SliceByU32.init(allocator); |
| 104 | + try self.source_by_position_by_index.put(@intCast(i), _source_by_position); |
| 105 | + source_by_position = _source_by_position; |
| 106 | + } |
| 107 | + |
| 108 | + var source = source_by_position.?.get(position_div); |
| 109 | + if (source == null) { |
| 110 | + self.source_buffer[SEED_SIZE] = @intCast(i % 256); |
| 111 | + const u32Slice = std.mem.bytesAsSlice(u32, self.source_buffer[SEED_SIZE + 1 ..]); |
| 112 | + u32Slice[0] = if (native_endian == .big) @byteSwap(position_div) else position_div; |
| 113 | + |
| 114 | + const _source = try allocator.alloc(u8, 32); |
| 115 | + var hash = [_]u8{0} ** 32; |
| 116 | + Sha256.hash(self.source_buffer[0..], &hash, .{}); |
| 117 | + @memcpy(_source, hash[0..]); |
| 118 | + try source_by_position.?.put(position_div, _source); |
| 119 | + source = _source; |
| 120 | + } |
| 121 | + |
| 122 | + const byte = source.?[@intCast(position % 256 / 8)]; |
| 123 | + const bit = (byte >> @intCast(position % 8)) & 1; |
| 124 | + permuted = if (bit == 1) flip else permuted; |
| 125 | + } |
| 126 | + |
| 127 | + return permuted; |
| 128 | + } |
| 129 | +}; |
| 130 | + |
| 131 | +pub fn computeProposerIndexElectra(allocator: Allocator, seed: []const u8, active_indices: []u32, effective_balance_increments: []u16, max_effective_balance_electra: u64, effective_balance_increment: u32, rounds: u32) !u32 { |
| 132 | + var out = [_]u32{0}; |
| 133 | + try getCommitteeIndices(allocator, seed, active_indices, effective_balance_increments, ByteCount.Two, max_effective_balance_electra, effective_balance_increment, rounds, out[0..]); |
| 134 | + return out[0]; |
| 135 | +} |
| 136 | + |
| 137 | +pub fn computeProposerIndex(allocator: Allocator, seed: []const u8, active_indices: []u32, effective_balance_increments: []u16, rand_byte_count: ByteCount, max_effective_balance: u64, effective_balance_increment: u32, rounds: u32) !u32 { |
| 138 | + var out = [_]u32{0}; |
| 139 | + try getCommitteeIndices(allocator, seed, active_indices, effective_balance_increments, rand_byte_count, max_effective_balance, effective_balance_increment, rounds, out[0..]); |
| 140 | + return out[0]; |
| 141 | +} |
| 142 | + |
| 143 | +pub fn computeSyncCommitteeIndicesElectra(allocator: Allocator, seed: []const u8, active_indices: []u32, effective_balance_increments: []u16, max_effective_balance_electra: u64, effective_balance_increment: u32, rounds: u32, out: []u32) !void { |
| 144 | + try getCommitteeIndices( |
| 145 | + allocator, |
| 146 | + seed, |
| 147 | + active_indices, |
| 148 | + effective_balance_increments, |
| 149 | + ByteCount.Two, |
| 150 | + max_effective_balance_electra, |
| 151 | + effective_balance_increment, |
| 152 | + rounds, |
| 153 | + out, |
| 154 | + ); |
| 155 | +} |
| 156 | + |
| 157 | +pub fn computeSyncCommitteeIndices(allocator: Allocator, seed: []const u8, active_indices: []u32, effective_balance_increments: []u16, rand_byte_count: ByteCount, max_effective_balance_electra: u64, effective_balance_increment: u32, rounds: u32, out: []u32) !void { |
| 158 | + try getCommitteeIndices( |
| 159 | + allocator, |
| 160 | + seed, |
| 161 | + active_indices, |
| 162 | + effective_balance_increments, |
| 163 | + rand_byte_count, |
| 164 | + max_effective_balance_electra, |
| 165 | + effective_balance_increment, |
| 166 | + rounds, |
| 167 | + out, |
| 168 | + ); |
| 169 | +} |
| 170 | + |
| 171 | +pub const ByteCount = enum(u8) { |
| 172 | + One = 1, |
| 173 | + Two = 2, |
| 174 | +}; |
| 175 | + |
| 176 | +/// the same to Rust implementation with "out" param to simplify memory allocation |
| 177 | +fn getCommitteeIndices(allocator: Allocator, seed: []const u8, active_indices: []const u32, effective_balance_increments: []const u16, rand_byte_count: ByteCount, max_effective_balance: u64, effective_balance_increment: u32, rounds: u32, out: []u32) !void { |
| 178 | + const max_random_value: usize = if (rand_byte_count == .One) 0xff else 0xffff; |
| 179 | + const max_effective_balance_increment: usize = max_effective_balance / effective_balance_increment; |
| 180 | + |
| 181 | + var compute_shuffled_index = try ComputeShuffledIndex.init(allocator, seed, @intCast(active_indices.len), rounds); |
| 182 | + defer compute_shuffled_index.deinit(); |
| 183 | + var shuffled_result = U32U32HashMap.init(allocator); |
| 184 | + defer shuffled_result.deinit(); |
| 185 | + |
| 186 | + var i: u32 = 0; |
| 187 | + var cached_hash_input = [_]u8{0} ** (32 + 8); |
| 188 | + // seed should have 32 bytes as checked in ComputeShuffledIndex.init |
| 189 | + @memcpy(cached_hash_input[0..32], seed); |
| 190 | + var cached_hash = [_]u8{0} ** 32; |
| 191 | + var next_committee_index: usize = 0; |
| 192 | + |
| 193 | + while (next_committee_index < out.len) { |
| 194 | + const index: u32 = @intCast(i % active_indices.len); |
| 195 | + var shuffled_index = shuffled_result.get(index); |
| 196 | + if (shuffled_index == null) { |
| 197 | + const _shuffled_index = try compute_shuffled_index.get(index); |
| 198 | + try shuffled_result.put(index, _shuffled_index); |
| 199 | + shuffled_index = _shuffled_index; |
| 200 | + } |
| 201 | + const candidate_index = active_indices[@intCast(shuffled_index.?)]; |
| 202 | + |
| 203 | + const hash_increment: u32 = if (rand_byte_count == .One) 32 else 16; |
| 204 | + if (i % hash_increment == 0) { |
| 205 | + const num_hash_increment = @divFloor(i, hash_increment); |
| 206 | + // suppose number of hash_increment always fit u32, the last 4 bytes of cached_hash_input is always 0 |
| 207 | + // this is the same to below Rust implementation |
| 208 | + // cached_hash_input[32..36].copy_from_slice(&(i / hash_increment).to_le_bytes()); |
| 209 | + const u32_slice = std.mem.bytesAsSlice(u32, cached_hash_input[32..36]); |
| 210 | + u32_slice[0] = if (native_endian == .big) @byteSwap(num_hash_increment) else num_hash_increment; |
| 211 | + Sha256.hash(cached_hash_input[0..], cached_hash[0..], .{}); |
| 212 | + } |
| 213 | + |
| 214 | + const random_bytes = cached_hash; |
| 215 | + const random_value: usize = switch (rand_byte_count) { |
| 216 | + .One => blk: { |
| 217 | + const offset: usize = @intCast(i % 32); |
| 218 | + break :blk @intCast(random_bytes[offset]); |
| 219 | + }, |
| 220 | + .Two => blk: { |
| 221 | + const offset: usize = @intCast((i % 16) * 2); |
| 222 | + const u16_slice = std.mem.bytesAsSlice(u16, random_bytes[offset..(offset + 2)]); |
| 223 | + const value = u16_slice[0]; |
| 224 | + const le_value = if (native_endian == .big) @byteSwap(value) else value; |
| 225 | + break :blk @intCast(le_value); |
| 226 | + }, |
| 227 | + }; |
| 228 | + |
| 229 | + const candidate_effective_balance_increment = effective_balance_increments[@intCast(candidate_index)]; |
| 230 | + if (candidate_effective_balance_increment * max_random_value >= max_effective_balance_increment * random_value) { |
| 231 | + out[next_committee_index] = candidate_index; |
| 232 | + next_committee_index += 1; |
| 233 | + } |
| 234 | + |
| 235 | + i += 1; |
| 236 | + } |
| 237 | +} |
| 238 | + |
| 239 | +test "ComputeShuffledIndex" { |
| 240 | + const allocator = std.testing.allocator; |
| 241 | + const seed = [_]u8{1} ** SEED_SIZE; |
| 242 | + const index_count = 1000; |
| 243 | + // SHUFFLE_ROUND_COUNT is 90 in ethereum mainnet |
| 244 | + const rounds = 90; |
| 245 | + |
| 246 | + var instance = try ComputeShuffledIndex.init(allocator, seed[0..], index_count, rounds); |
| 247 | + defer instance.deinit(); |
| 248 | + |
| 249 | + const expected = [_]u32{ 789, 161, 541, 509, 498, 445, 270, 2, 505, 621, 947, 550, 338, 814, 285, 597, 169, 819, 644, 638, 751, 514, 750, 523, 303, 231, 391, 982, 409, 396, 641, 837 }; |
| 250 | + |
| 251 | + for (0..index_count) |i| { |
| 252 | + if (i < 32) { |
| 253 | + const shuffled_index = try instance.get(@intCast(i)); |
| 254 | + try std.testing.expectEqual(expected[i], shuffled_index); |
| 255 | + } |
| 256 | + } |
| 257 | +} |
| 258 | + |
| 259 | +test "compute_proposer_index" { |
| 260 | + const allocator = std.testing.allocator; |
| 261 | + const seed = [_]u8{1} ** SEED_SIZE; |
| 262 | + const index_count = 1000; |
| 263 | + // SHUFFLE_ROUND_COUNT is 90 in ethereum mainnet |
| 264 | + const rounds = 90; |
| 265 | + var active_indices = [_]u32{0} ** index_count; |
| 266 | + for (0..index_count) |i| { |
| 267 | + active_indices[i] = @intCast(i); |
| 268 | + } |
| 269 | + var effective_balance_increments = [_]u16{0} ** index_count; |
| 270 | + for (0..index_count) |i| { |
| 271 | + effective_balance_increments[i] = @intCast(32 + 32 * (i % 64)); |
| 272 | + } |
| 273 | + // phase0 |
| 274 | + const MAX_EFFECTIVE_BALANCE: u64 = 32000000000; |
| 275 | + const EFFECTIVE_BALANCE_INCREMENT: u32 = 1000000000; |
| 276 | + const phase0_index = try computeProposerIndex(allocator, seed[0..], active_indices[0..], effective_balance_increments[0..], ByteCount.One, MAX_EFFECTIVE_BALANCE, EFFECTIVE_BALANCE_INCREMENT, rounds); |
| 277 | + try std.testing.expectEqual(789, phase0_index); |
| 278 | + |
| 279 | + // electra |
| 280 | + const MAX_EFFECTIVE_BALANCE_ELECTRA: u64 = 2048000000000; |
| 281 | + const electra_index = try computeProposerIndex(allocator, seed[0..], active_indices[0..], effective_balance_increments[0..], ByteCount.Two, MAX_EFFECTIVE_BALANCE_ELECTRA, EFFECTIVE_BALANCE_INCREMENT, rounds); |
| 282 | + try std.testing.expectEqual(161, electra_index); |
| 283 | +} |
| 284 | + |
| 285 | +test "compute_sync_committee_indices" { |
| 286 | + const allocator = std.testing.allocator; |
| 287 | + const seed = [_]u8{ 74, 7, 102, 54, 84, 136, 68, 56, 19, 191, 186, 58, 72, 53, 151, 49, 220, 123, 42, 116, 59, 7, 73, 162, 110, 145, 93, 199, 163, 66, 85, 34 }; |
| 288 | + const vc = 1000; |
| 289 | + // SHUFFLE_ROUND_COUNT is 90 in ethereum mainnet |
| 290 | + const rounds = 90; |
| 291 | + var active_indices = [_]u32{0} ** vc; |
| 292 | + for (0..vc) |i| { |
| 293 | + active_indices[i] = @intCast(i); |
| 294 | + } |
| 295 | + var effective_balance_increments = [_]u16{0} ** vc; |
| 296 | + for (0..vc) |i| { |
| 297 | + effective_balance_increments[i] = @intCast(32 + 32 * (i % 64)); |
| 298 | + } |
| 299 | + |
| 300 | + // only get first 32 indices to make it easier to test |
| 301 | + var out = [_]u32{0} ** 32; |
| 302 | + |
| 303 | + // phase0 |
| 304 | + const MAX_EFFECTIVE_BALANCE: u64 = 32000000000; |
| 305 | + const EFFECTIVE_BALANCE_INCREMENT: u32 = 1000000000; |
| 306 | + try computeSyncCommitteeIndices(allocator, seed[0..], active_indices[0..], effective_balance_increments[0..], ByteCount.One, MAX_EFFECTIVE_BALANCE, EFFECTIVE_BALANCE_INCREMENT, rounds, out[0..]); |
| 307 | + const expected_phase0 = [_]u32{ 293, 726, 771, 677, 530, 475, 322, 66, 521, 106, 774, 23, 508, 410, 526, 44, 213, 948, 248, 903, 85, 853, 171, 679, 309, 791, 851, 817, 609, 119, 128, 983 }; |
| 308 | + try std.testing.expectEqualSlices(u32, expected_phase0[0..], out[0..]); |
| 309 | + |
| 310 | + // electra |
| 311 | + const MAX_EFFECTIVE_BALANCE_ELECTRA: u64 = 2048000000000; |
| 312 | + try computeSyncCommitteeIndices(allocator, seed[0..], active_indices[0..], effective_balance_increments[0..], ByteCount.Two, MAX_EFFECTIVE_BALANCE_ELECTRA, EFFECTIVE_BALANCE_INCREMENT, rounds, out[0..]); |
| 313 | + const expected_electra = [_]u32{ 726, 475, 521, 23, 508, 410, 213, 948, 248, 85, 171, 309, 791, 817, 119, 126, 651, 416, 273, 471, 739, 290, 588, 840, 665, 945, 496, 158, 757, 616, 226, 766 }; |
| 314 | + try std.testing.expectEqualSlices(u32, expected_electra[0..], out[0..]); |
| 315 | +} |
0 commit comments