Skip to content

Commit 4e7d868

Browse files
x3c41aclaude
andcommitted
Consolidate store_big_data tests into single script
- Add --skip-authorize flag for live networks with pre-authorized accounts - Add --skip-ipfs-verify flag to skip IPFS download verification - Add --fast flag for TX_MODE_IN_POOL (broadcast only) - Add --num-signers=N flag to configure parallel workers - Remove redundant store_big_data_live.js - Update justfile to use unified script with appropriate flags Co-Authored-By: Claude Opus 4.5 <[email protected]>
1 parent 068b599 commit 4e7d868

File tree

3 files changed

+102
-410
lines changed

3 files changed

+102
-410
lines changed

examples/justfile

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -648,7 +648,7 @@ run-authorize-preimage-and-store-papi runtime ws_url="ws://localhost:10000" seed
648648
WESTEND_RPC := "wss://westend-bulletin-rpc.polkadot.io"
649649
PASEO_RPC := "wss://paseo-bulletin-rpc.polkadot.io"
650650

651-
# Run store-big-data-live test against a live endpoint
651+
# Run store-big-data test against a live endpoint
652652
# Parameters:
653653
# ws_url - WebSocket URL of the Bulletin chain node
654654
# seed - Account seed phrase (must be pre-authorized on the network)
@@ -663,17 +663,19 @@ _run-live-tests ws_url seed ipfs_gateway_url="http://127.0.0.1:8283" image_size=
663663

664664
just papi-generate "{{ ws_url }}"
665665

666+
# Live networks use pre-authorized accounts, so skip authorization
667+
EXTRA_FLAGS="--skip-authorize"
668+
666669
# Check if IPFS peers are configured for live network
667-
EXTRA_FLAGS=""
668670
if [ -z "$WESTEND_PEER1_ID" ] || [ -z "$WESTEND_PEER1_ADDR" ]; then
669671
echo ""
670672
echo "⚠️ IPFS peer info not configured - running storage-only test."
671673
echo " To enable IPFS verification, set WESTEND_PEER1_ID and WESTEND_PEER1_ADDR."
672674
echo ""
673-
EXTRA_FLAGS="--skip-ipfs-verify"
675+
EXTRA_FLAGS="$EXTRA_FLAGS --skip-ipfs-verify"
674676
fi
675677

676-
node store_big_data_live.js "{{ ws_url }}" "{{ seed }}" "{{ ipfs_gateway_url }}" "{{ image_size }}" $EXTRA_FLAGS
678+
node store_big_data.js "{{ ws_url }}" "{{ seed }}" "{{ ipfs_gateway_url }}" "{{ image_size }}" $EXTRA_FLAGS
677679

678680
# Run live tests against Westend Bulletin
679681
# Parameters:

examples/store_big_data.js

Lines changed: 96 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import fs from 'fs'
44
import os from "os";
55
import path from "path";
66
import assert from "assert";
7-
import {authorizeAccount, store, fetchCid, TX_MODE_FINALIZED_BLOCK, TX_MODE_IN_BLOCK} from "./api.js";
7+
import {authorizeAccount, store, fetchCid, TX_MODE_FINALIZED_BLOCK, TX_MODE_IN_BLOCK, TX_MODE_IN_POOL} from "./api.js";
88
import { buildUnixFSDagPB, cidFromBytes } from "./cid_dag_metadata.js";
99
import {
1010
setupKeyringAndSigners,
@@ -30,6 +30,10 @@ import { bulletin } from './.papi/descriptors/dist/index.mjs';
3030

3131
// Command line arguments: [ws_url] [seed] [ipfs_gateway_url] [image_size]
3232
// Note: --signer-disc=XX flag is also supported for parallel runs
33+
// Note: --num-signers=N flag controls number of parallel workers (default: 16)
34+
// Note: --skip-authorize flag skips account authorization (for live networks with pre-authorized accounts)
35+
// Note: --skip-ipfs-verify flag skips IPFS download verification
36+
// Note: --fast flag uses TX_MODE_IN_POOL for faster uploads (broadcast only)
3337
const args = process.argv.slice(2).filter(arg => !arg.startsWith('--'));
3438
const NODE_WS = args[0] || 'ws://localhost:10000';
3539
const SEED = args[1] || '//Alice';
@@ -38,7 +42,14 @@ const IPFS_GATEWAY_URL = args[2] || DEFAULT_IPFS_GATEWAY_URL;
3842
const IPFS_API_URL = IPFS_GATEWAY_URL.replace(':8283', ':5011');
3943
// Image size preset: small, big32, big64, big96
4044
const IMAGE_SIZE = args[3] || 'big64';
41-
const NUM_SIGNERS = 16;
45+
46+
// Optional flags
47+
const signerDiscriminator = process.argv.find(arg => arg.startsWith("--signer-disc="))?.split("=")[1] ?? null;
48+
const numSignersArg = process.argv.find(arg => arg.startsWith("--num-signers="))?.split("=")[1];
49+
const NUM_SIGNERS = numSignersArg ? parseInt(numSignersArg, 10) : 16;
50+
const SKIP_AUTHORIZE = process.argv.includes("--skip-authorize");
51+
const SKIP_IPFS_VERIFY = process.argv.includes("--skip-ipfs-verify");
52+
const FAST_MODE = process.argv.includes("--fast");
4253

4354
// -------------------- queue --------------------
4455
const queue = [];
@@ -59,7 +70,7 @@ const stats = {
5970
blockHashes: {}, // Map block number -> block hash for timestamp lookups
6071
};
6172

62-
function waitForQueueLength(targetLength, timeoutMs = 300000) {
73+
function waitForQueueLength(targetLength, timeoutMs = 600000) {
6374
return new Promise((resolve, reject) => {
6475
const start = Date.now();
6576

@@ -101,15 +112,20 @@ async function processJob(typedApi, workerId, signer, chunk) {
101112
`Worker ${workerId} submitting tx for chunk ${chunk.cid} of size ${chunk.len} bytes`
102113
);
103114

104-
let { cid, blockHash, blockNumber } = await store(typedApi, signer.signer, chunk.bytes);
115+
const txMode = FAST_MODE ? TX_MODE_IN_POOL : TX_MODE_IN_BLOCK;
116+
let { cid, blockHash, blockNumber } = await store(typedApi, signer.signer, chunk.bytes, null, null, txMode);
105117
pushToResultQueue({ cid, blockNumber });
106118
if (blockNumber !== undefined) {
107119
stats.blockNumbers.push(blockNumber);
108120
if (blockHash && !stats.blockHashes[blockNumber]) {
109121
stats.blockHashes[blockNumber] = blockHash;
110122
}
111123
}
112-
console.log(`Worker ${workerId} tx included in block #${blockNumber} with CID: ${cid}`);
124+
if (FAST_MODE) {
125+
console.log(`Worker ${workerId} tx broadcasted with CID: ${cid}`);
126+
} else {
127+
console.log(`Worker ${workerId} tx included in block #${blockNumber} with CID: ${cid}`);
128+
}
113129
}
114130

115131
// -------------------- helpers --------------------
@@ -181,6 +197,7 @@ async function printStatistics(dataSize, typedApi) {
181197
console.log(`│ File size │ ${formatBytes(dataSize).padEnd(25)} │`);
182198
console.log(`│ Chunk/TX size │ ${formatBytes(CHUNK_SIZE).padEnd(25)} │`);
183199
console.log(`│ Number of chunks │ ${numTxs.toString().padEnd(25)} │`);
200+
console.log(`│ Num signers │ ${NUM_SIGNERS.toString().padEnd(25)} │`);
184201
console.log(`│ Avg txs per block │ ${`${avgTxsPerBlock} (${numTxs}/${totalBlocksInRange})`.padEnd(25)} │`);
185202
console.log(`│ Time elapsed │ ${formatDuration(elapsed).padEnd(25)} │`);
186203
console.log(`│ Blocks elapsed │ ${`${blocksElapsed} (#${startBlock} → #${endBlock})`.padEnd(25)} │`);
@@ -230,19 +247,22 @@ export async function storeChunkedFile(api, filePath) {
230247
}
231248

232249
// Connect to IPFS API (for ipfs-http-client operations like block.get)
233-
const ipfs = create({
234-
url: IPFS_API_URL,
235-
});
236-
237-
// Optional signer discriminator, when we want to run the script in parallel and don't take care of nonces.
238-
// E.g.: node store_big_data.js --signer-disc=BB
239-
const signerDiscriminator = process.argv.find(arg => arg.startsWith("--signer-disc="))?.split("=")[1] ?? null;
250+
let ipfs = null;
251+
if (!SKIP_IPFS_VERIFY) {
252+
ipfs = create({
253+
url: IPFS_API_URL,
254+
});
255+
}
240256

241257
async function main() {
242258
await cryptoWaitReady()
243259

244260
logHeader('STORE BIG DATA TEST');
245-
logConnection(NODE_WS, SEED, IPFS_GATEWAY_URL);
261+
logConnection(NODE_WS, SEED.length > 20 ? SEED.substring(0, 10) + '...' : SEED, IPFS_GATEWAY_URL);
262+
console.log(`Image size: ${IMAGE_SIZE}, Num signers: ${NUM_SIGNERS}`);
263+
if (SKIP_AUTHORIZE) console.log('Skipping authorization (--skip-authorize)');
264+
if (SKIP_IPFS_VERIFY) console.log('Skipping IPFS verification (--skip-ipfs-verify)');
265+
if (FAST_MODE) console.log('Fast mode enabled (--fast)');
246266

247267
let client, resultCode;
248268
try {
@@ -255,29 +275,36 @@ async function main() {
255275
// Init WS PAPI client and typed api.
256276
client = createClient(getWsProvider(NODE_WS));
257277
const bulletinAPI = client.getTypedApi(bulletin);
258-
const { sudoSigner, _ } = setupKeyringAndSigners(SEED, '//Bigdatasigner');
259278

260-
// Let's do parallelism with multiple accounts
279+
// Create signers - use discriminator if provided for parallel runs
261280
const signers = Array.from({ length: NUM_SIGNERS }, (_, i) => {
262-
if (!signerDiscriminator) {
263-
return newSigner(`//Signer${i + 1}`)
264-
} else {
281+
if (signerDiscriminator) {
265282
console.log(`Using signerDiscriminator: "//Signer${signerDiscriminator}${i + 1}"`);
266-
return newSigner(`//Signer${signerDiscriminator}${i + 1}`)
283+
return newSigner(`//Signer${signerDiscriminator}${i + 1}`);
284+
} else {
285+
return newSigner(`//Signer${i + 1}`);
267286
}
268287
});
269288

270-
// Authorize accounts.
271-
await authorizeAccount(
272-
bulletinAPI,
273-
sudoSigner,
274-
signers.map(a => a.address),
275-
100,
276-
BigInt(100 * 1024 * 1024), // 100 MiB
277-
TX_MODE_FINALIZED_BLOCK,
278-
);
289+
console.log(`Using ${signers.length} signer(s) with addresses:`);
290+
signers.forEach((s, i) => console.log(` Signer ${i}: ${s.address}`));
291+
292+
// Authorize accounts (skip for live networks with pre-authorized accounts)
293+
if (!SKIP_AUTHORIZE) {
294+
const { sudoSigner, _ } = setupKeyringAndSigners(SEED, '//Bigdatasigner');
295+
await authorizeAccount(
296+
bulletinAPI,
297+
sudoSigner,
298+
signers.map(a => a.address),
299+
100,
300+
BigInt(100 * 1024 * 1024), // 100 MiB
301+
TX_MODE_FINALIZED_BLOCK,
302+
);
303+
} else {
304+
console.log('Skipping authorization (accounts must be pre-authorized on live networks)');
305+
}
279306

280-
// Start 8 workers
307+
// Start workers
281308
signers.forEach((signer, i) => {
282309
startWorker(bulletinAPI, i, signer);
283310
});
@@ -300,49 +327,59 @@ async function main() {
300327

301328
console.log(`Storing DAG...`);
302329
let { rootCid, dagBytes } = await buildUnixFSDagPB(chunks, 0xb220);
330+
const dagTxMode = FAST_MODE ? TX_MODE_IN_POOL : TX_MODE_IN_BLOCK;
303331
// Store with dag-pb codec (0x70) to match rootCid from buildUnixFSDagPB
304332
let { cid } = await store(
305333
bulletinAPI,
306334
signers[0].signer,
307335
dagBytes,
308336
0x70, // dag-pb codec
309337
0xb220, // blake2b-256
310-
TX_MODE_IN_BLOCK
338+
dagTxMode
311339
);
312-
console.log(`Downloading...${cid} / ${rootCid}`);
340+
console.log(`DAG stored with CID: ${cid}`);
313341
assert.deepStrictEqual(cid, rootCid, '❌ CID mismatch between stored and computed DAG root');
314-
let downloadedContent = await fetchCid(IPFS_GATEWAY_URL, rootCid);
315-
console.log(`✅ Reconstructed file size: ${downloadedContent.length} bytes`);
316-
await fileToDisk(downloadedFileByDagPath, downloadedContent);
317-
filesAreEqual(filePath, downloadedFileByDagPath);
318-
assert.strictEqual(
319-
dataSize,
320-
downloadedContent.length,
321-
'❌ Failed to download all the data!'
322-
);
323-
324-
// Check all chunks are there.
325-
console.log(`Downloading by chunks...`);
326-
let downloadedChunks = [];
327-
for (const chunk of chunks) {
328-
// Download the chunk from IPFS.
329-
let block = await ipfs.block.get(chunk.cid, {timeout: 15000});
330-
downloadedChunks.push(block);
331-
}
332-
let fullBuffer = Buffer.concat(downloadedChunks);
333-
console.log(`✅ Reconstructed file size: ${fullBuffer.length} bytes`);
334-
await fileToDisk(downloadedFilePath, fullBuffer);
335-
filesAreEqual(filePath, downloadedFilePath);
336-
assert.strictEqual(
337-
dataSize,
338-
fullBuffer.length,
339-
'❌ Failed to download all the data!'
340-
);
341342

342343
// Print storage statistics
343344
await printStatistics(dataSize, bulletinAPI);
344345

345-
logTestResult(true, 'Store Big Data Test');
346+
if (SKIP_IPFS_VERIFY) {
347+
console.log('\n--skip-ipfs-verify flag set, skipping IPFS download verification');
348+
console.log(`Root CID for manual verification: ${rootCid}`);
349+
console.log(`IPFS Gateway URL: ${IPFS_GATEWAY_URL}/ipfs/${rootCid}`);
350+
logTestResult(true, 'Store Big Data Test (Storage Only)');
351+
} else {
352+
console.log(`Downloading...${cid} / ${rootCid}`);
353+
let downloadedContent = await fetchCid(IPFS_GATEWAY_URL, rootCid);
354+
console.log(`✅ Reconstructed file size: ${downloadedContent.length} bytes`);
355+
await fileToDisk(downloadedFileByDagPath, downloadedContent);
356+
filesAreEqual(filePath, downloadedFileByDagPath);
357+
assert.strictEqual(
358+
dataSize,
359+
downloadedContent.length,
360+
'❌ Failed to download all the data!'
361+
);
362+
363+
// Check all chunks are there.
364+
console.log(`Downloading by chunks...`);
365+
let downloadedChunks = [];
366+
for (const chunk of chunks) {
367+
// Download the chunk from IPFS.
368+
let block = await ipfs.block.get(chunk.cid, {timeout: 15000});
369+
downloadedChunks.push(block);
370+
}
371+
let fullBuffer = Buffer.concat(downloadedChunks);
372+
console.log(`✅ Reconstructed file size: ${fullBuffer.length} bytes`);
373+
await fileToDisk(downloadedFilePath, fullBuffer);
374+
filesAreEqual(filePath, downloadedFilePath);
375+
assert.strictEqual(
376+
dataSize,
377+
fullBuffer.length,
378+
'❌ Failed to download all the data!'
379+
);
380+
381+
logTestResult(true, 'Store Big Data Test');
382+
}
346383
resultCode = 0;
347384
} catch (error) {
348385
logError(`Error: ${error.message}`);

0 commit comments

Comments
 (0)