@@ -4,7 +4,7 @@ import fs from 'fs'
44import os from "os" ;
55import path from "path" ;
66import assert from "assert" ;
7- import { authorizeAccount , store , fetchCid , TX_MODE_FINALIZED_BLOCK , TX_MODE_IN_BLOCK } from "./api.js" ;
7+ import { authorizeAccount , store , fetchCid , TX_MODE_FINALIZED_BLOCK , TX_MODE_IN_BLOCK , TX_MODE_IN_POOL } from "./api.js" ;
88import { buildUnixFSDagPB , cidFromBytes } from "./cid_dag_metadata.js" ;
99import {
1010 setupKeyringAndSigners ,
@@ -30,6 +30,10 @@ import { bulletin } from './.papi/descriptors/dist/index.mjs';
3030
3131// Command line arguments: [ws_url] [seed] [ipfs_gateway_url] [image_size]
3232// Note: --signer-disc=XX flag is also supported for parallel runs
33+ // Note: --num-signers=N flag controls number of parallel workers (default: 16)
34+ // Note: --skip-authorize flag skips account authorization (for live networks with pre-authorized accounts)
35+ // Note: --skip-ipfs-verify flag skips IPFS download verification
36+ // Note: --fast flag uses TX_MODE_IN_POOL for faster uploads (broadcast only)
3337const args = process . argv . slice ( 2 ) . filter ( arg => ! arg . startsWith ( '--' ) ) ;
3438const NODE_WS = args [ 0 ] || 'ws://localhost:10000' ;
3539const SEED = args [ 1 ] || '//Alice' ;
@@ -38,7 +42,14 @@ const IPFS_GATEWAY_URL = args[2] || DEFAULT_IPFS_GATEWAY_URL;
3842const IPFS_API_URL = IPFS_GATEWAY_URL . replace ( ':8283' , ':5011' ) ;
3943// Image size preset: small, big32, big64, big96
4044const IMAGE_SIZE = args [ 3 ] || 'big64' ;
41- const NUM_SIGNERS = 16 ;
45+
46+ // Optional flags
47+ const signerDiscriminator = process . argv . find ( arg => arg . startsWith ( "--signer-disc=" ) ) ?. split ( "=" ) [ 1 ] ?? null ;
48+ const numSignersArg = process . argv . find ( arg => arg . startsWith ( "--num-signers=" ) ) ?. split ( "=" ) [ 1 ] ;
49+ const NUM_SIGNERS = numSignersArg ? parseInt ( numSignersArg , 10 ) : 16 ;
50+ const SKIP_AUTHORIZE = process . argv . includes ( "--skip-authorize" ) ;
51+ const SKIP_IPFS_VERIFY = process . argv . includes ( "--skip-ipfs-verify" ) ;
52+ const FAST_MODE = process . argv . includes ( "--fast" ) ;
4253
4354// -------------------- queue --------------------
4455const queue = [ ] ;
@@ -59,7 +70,7 @@ const stats = {
5970 blockHashes : { } , // Map block number -> block hash for timestamp lookups
6071} ;
6172
62- function waitForQueueLength ( targetLength , timeoutMs = 300000 ) {
73+ function waitForQueueLength ( targetLength , timeoutMs = 600000 ) {
6374 return new Promise ( ( resolve , reject ) => {
6475 const start = Date . now ( ) ;
6576
@@ -101,15 +112,20 @@ async function processJob(typedApi, workerId, signer, chunk) {
101112 `Worker ${ workerId } submitting tx for chunk ${ chunk . cid } of size ${ chunk . len } bytes`
102113 ) ;
103114
104- let { cid, blockHash, blockNumber } = await store ( typedApi , signer . signer , chunk . bytes ) ;
115+ const txMode = FAST_MODE ? TX_MODE_IN_POOL : TX_MODE_IN_BLOCK ;
116+ let { cid, blockHash, blockNumber } = await store ( typedApi , signer . signer , chunk . bytes , null , null , txMode ) ;
105117 pushToResultQueue ( { cid, blockNumber } ) ;
106118 if ( blockNumber !== undefined ) {
107119 stats . blockNumbers . push ( blockNumber ) ;
108120 if ( blockHash && ! stats . blockHashes [ blockNumber ] ) {
109121 stats . blockHashes [ blockNumber ] = blockHash ;
110122 }
111123 }
112- console . log ( `Worker ${ workerId } tx included in block #${ blockNumber } with CID: ${ cid } ` ) ;
124+ if ( FAST_MODE ) {
125+ console . log ( `Worker ${ workerId } tx broadcasted with CID: ${ cid } ` ) ;
126+ } else {
127+ console . log ( `Worker ${ workerId } tx included in block #${ blockNumber } with CID: ${ cid } ` ) ;
128+ }
113129}
114130
115131// -------------------- helpers --------------------
@@ -181,6 +197,7 @@ async function printStatistics(dataSize, typedApi) {
181197 console . log ( `│ File size │ ${ formatBytes ( dataSize ) . padEnd ( 25 ) } │` ) ;
182198 console . log ( `│ Chunk/TX size │ ${ formatBytes ( CHUNK_SIZE ) . padEnd ( 25 ) } │` ) ;
183199 console . log ( `│ Number of chunks │ ${ numTxs . toString ( ) . padEnd ( 25 ) } │` ) ;
200+ console . log ( `│ Num signers │ ${ NUM_SIGNERS . toString ( ) . padEnd ( 25 ) } │` ) ;
184201 console . log ( `│ Avg txs per block │ ${ `${ avgTxsPerBlock } (${ numTxs } /${ totalBlocksInRange } )` . padEnd ( 25 ) } │` ) ;
185202 console . log ( `│ Time elapsed │ ${ formatDuration ( elapsed ) . padEnd ( 25 ) } │` ) ;
186203 console . log ( `│ Blocks elapsed │ ${ `${ blocksElapsed } (#${ startBlock } → #${ endBlock } )` . padEnd ( 25 ) } │` ) ;
@@ -230,19 +247,22 @@ export async function storeChunkedFile(api, filePath) {
230247}
231248
232249// Connect to IPFS API (for ipfs-http-client operations like block.get)
233- const ipfs = create ( {
234- url : IPFS_API_URL ,
235- } ) ;
236-
237- // Optional signer discriminator, when we want to run the script in parallel and don't take care of nonces.
238- // E.g.: node store_big_data.js --signer-disc=BB
239- const signerDiscriminator = process . argv . find ( arg => arg . startsWith ( "--signer-disc=" ) ) ?. split ( "=" ) [ 1 ] ?? null ;
250+ let ipfs = null ;
251+ if ( ! SKIP_IPFS_VERIFY ) {
252+ ipfs = create ( {
253+ url : IPFS_API_URL ,
254+ } ) ;
255+ }
240256
241257async function main ( ) {
242258 await cryptoWaitReady ( )
243259
244260 logHeader ( 'STORE BIG DATA TEST' ) ;
245- logConnection ( NODE_WS , SEED , IPFS_GATEWAY_URL ) ;
261+ logConnection ( NODE_WS , SEED . length > 20 ? SEED . substring ( 0 , 10 ) + '...' : SEED , IPFS_GATEWAY_URL ) ;
262+ console . log ( `Image size: ${ IMAGE_SIZE } , Num signers: ${ NUM_SIGNERS } ` ) ;
263+ if ( SKIP_AUTHORIZE ) console . log ( 'Skipping authorization (--skip-authorize)' ) ;
264+ if ( SKIP_IPFS_VERIFY ) console . log ( 'Skipping IPFS verification (--skip-ipfs-verify)' ) ;
265+ if ( FAST_MODE ) console . log ( 'Fast mode enabled (--fast)' ) ;
246266
247267 let client , resultCode ;
248268 try {
@@ -255,29 +275,36 @@ async function main() {
255275 // Init WS PAPI client and typed api.
256276 client = createClient ( getWsProvider ( NODE_WS ) ) ;
257277 const bulletinAPI = client . getTypedApi ( bulletin ) ;
258- const { sudoSigner, _ } = setupKeyringAndSigners ( SEED , '//Bigdatasigner' ) ;
259278
260- // Let's do parallelism with multiple accounts
279+ // Create signers - use discriminator if provided for parallel runs
261280 const signers = Array . from ( { length : NUM_SIGNERS } , ( _ , i ) => {
262- if ( ! signerDiscriminator ) {
263- return newSigner ( `//Signer${ i + 1 } ` )
264- } else {
281+ if ( signerDiscriminator ) {
265282 console . log ( `Using signerDiscriminator: "//Signer${ signerDiscriminator } ${ i + 1 } "` ) ;
266- return newSigner ( `//Signer${ signerDiscriminator } ${ i + 1 } ` )
283+ return newSigner ( `//Signer${ signerDiscriminator } ${ i + 1 } ` ) ;
284+ } else {
285+ return newSigner ( `//Signer${ i + 1 } ` ) ;
267286 }
268287 } ) ;
269288
270- // Authorize accounts.
271- await authorizeAccount (
272- bulletinAPI ,
273- sudoSigner ,
274- signers . map ( a => a . address ) ,
275- 100 ,
276- BigInt ( 100 * 1024 * 1024 ) , // 100 MiB
277- TX_MODE_FINALIZED_BLOCK ,
278- ) ;
289+ console . log ( `Using ${ signers . length } signer(s) with addresses:` ) ;
290+ signers . forEach ( ( s , i ) => console . log ( ` Signer ${ i } : ${ s . address } ` ) ) ;
291+
292+ // Authorize accounts (skip for live networks with pre-authorized accounts)
293+ if ( ! SKIP_AUTHORIZE ) {
294+ const { sudoSigner, _ } = setupKeyringAndSigners ( SEED , '//Bigdatasigner' ) ;
295+ await authorizeAccount (
296+ bulletinAPI ,
297+ sudoSigner ,
298+ signers . map ( a => a . address ) ,
299+ 100 ,
300+ BigInt ( 100 * 1024 * 1024 ) , // 100 MiB
301+ TX_MODE_FINALIZED_BLOCK ,
302+ ) ;
303+ } else {
304+ console . log ( 'Skipping authorization (accounts must be pre-authorized on live networks)' ) ;
305+ }
279306
280- // Start 8 workers
307+ // Start workers
281308 signers . forEach ( ( signer , i ) => {
282309 startWorker ( bulletinAPI , i , signer ) ;
283310 } ) ;
@@ -300,49 +327,59 @@ async function main() {
300327
301328 console . log ( `Storing DAG...` ) ;
302329 let { rootCid, dagBytes } = await buildUnixFSDagPB ( chunks , 0xb220 ) ;
330+ const dagTxMode = FAST_MODE ? TX_MODE_IN_POOL : TX_MODE_IN_BLOCK ;
303331 // Store with dag-pb codec (0x70) to match rootCid from buildUnixFSDagPB
304332 let { cid } = await store (
305333 bulletinAPI ,
306334 signers [ 0 ] . signer ,
307335 dagBytes ,
308336 0x70 , // dag-pb codec
309337 0xb220 , // blake2b-256
310- TX_MODE_IN_BLOCK
338+ dagTxMode
311339 ) ;
312- console . log ( `Downloading... ${ cid } / ${ rootCid } ` ) ;
340+ console . log ( `DAG stored with CID: ${ cid } ` ) ;
313341 assert . deepStrictEqual ( cid , rootCid , '❌ CID mismatch between stored and computed DAG root' ) ;
314- let downloadedContent = await fetchCid ( IPFS_GATEWAY_URL , rootCid ) ;
315- console . log ( `✅ Reconstructed file size: ${ downloadedContent . length } bytes` ) ;
316- await fileToDisk ( downloadedFileByDagPath , downloadedContent ) ;
317- filesAreEqual ( filePath , downloadedFileByDagPath ) ;
318- assert . strictEqual (
319- dataSize ,
320- downloadedContent . length ,
321- '❌ Failed to download all the data!'
322- ) ;
323-
324- // Check all chunks are there.
325- console . log ( `Downloading by chunks...` ) ;
326- let downloadedChunks = [ ] ;
327- for ( const chunk of chunks ) {
328- // Download the chunk from IPFS.
329- let block = await ipfs . block . get ( chunk . cid , { timeout : 15000 } ) ;
330- downloadedChunks . push ( block ) ;
331- }
332- let fullBuffer = Buffer . concat ( downloadedChunks ) ;
333- console . log ( `✅ Reconstructed file size: ${ fullBuffer . length } bytes` ) ;
334- await fileToDisk ( downloadedFilePath , fullBuffer ) ;
335- filesAreEqual ( filePath , downloadedFilePath ) ;
336- assert . strictEqual (
337- dataSize ,
338- fullBuffer . length ,
339- '❌ Failed to download all the data!'
340- ) ;
341342
342343 // Print storage statistics
343344 await printStatistics ( dataSize , bulletinAPI ) ;
344345
345- logTestResult ( true , 'Store Big Data Test' ) ;
346+ if ( SKIP_IPFS_VERIFY ) {
347+ console . log ( '\n--skip-ipfs-verify flag set, skipping IPFS download verification' ) ;
348+ console . log ( `Root CID for manual verification: ${ rootCid } ` ) ;
349+ console . log ( `IPFS Gateway URL: ${ IPFS_GATEWAY_URL } /ipfs/${ rootCid } ` ) ;
350+ logTestResult ( true , 'Store Big Data Test (Storage Only)' ) ;
351+ } else {
352+ console . log ( `Downloading...${ cid } / ${ rootCid } ` ) ;
353+ let downloadedContent = await fetchCid ( IPFS_GATEWAY_URL , rootCid ) ;
354+ console . log ( `✅ Reconstructed file size: ${ downloadedContent . length } bytes` ) ;
355+ await fileToDisk ( downloadedFileByDagPath , downloadedContent ) ;
356+ filesAreEqual ( filePath , downloadedFileByDagPath ) ;
357+ assert . strictEqual (
358+ dataSize ,
359+ downloadedContent . length ,
360+ '❌ Failed to download all the data!'
361+ ) ;
362+
363+ // Check all chunks are there.
364+ console . log ( `Downloading by chunks...` ) ;
365+ let downloadedChunks = [ ] ;
366+ for ( const chunk of chunks ) {
367+ // Download the chunk from IPFS.
368+ let block = await ipfs . block . get ( chunk . cid , { timeout : 15000 } ) ;
369+ downloadedChunks . push ( block ) ;
370+ }
371+ let fullBuffer = Buffer . concat ( downloadedChunks ) ;
372+ console . log ( `✅ Reconstructed file size: ${ fullBuffer . length } bytes` ) ;
373+ await fileToDisk ( downloadedFilePath , fullBuffer ) ;
374+ filesAreEqual ( filePath , downloadedFilePath ) ;
375+ assert . strictEqual (
376+ dataSize ,
377+ fullBuffer . length ,
378+ '❌ Failed to download all the data!'
379+ ) ;
380+
381+ logTestResult ( true , 'Store Big Data Test' ) ;
382+ }
346383 resultCode = 0 ;
347384 } catch ( error ) {
348385 logError ( `Error: ${ error . message } ` ) ;
0 commit comments