diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 23ca0069..a9386c57 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -162,7 +162,7 @@ jobs: - name: Test store-big-data (Westend parachain) working-directory: examples - run: just run-test-store-big-data "${{ env.TEST_DIR }}" + run: just run-test-store-big-data "${{ env.TEST_DIR }}" "big32" - name: Test authorize-preimage-and-store (Westend parachain) working-directory: examples @@ -197,7 +197,7 @@ jobs: - name: Test store-big-data (Polkadot solochain) working-directory: examples - run: just run-test-store-big-data "${{ env.TEST_DIR }}" + run: just run-test-store-big-data "${{ env.TEST_DIR }}" "big32" - name: Test authorize-preimage-and-store (Polkadot solochain) working-directory: examples diff --git a/examples/README.md b/examples/README.md index 39ba035a..bb63cbe6 100644 --- a/examples/README.md +++ b/examples/README.md @@ -79,7 +79,7 @@ tar -xvzf kubo_v0.38.1_darwin-arm64.tar.gz ```shell docker pull ipfs/kubo:latest -docker run -d --name ipfs-node -v ipfs-data:/data/ipfs -p 4001:4001 -p 8080:8080 -p 5001:5001 ipfs/kubo:latest +docker run -d --name ipfs-node -v ipfs-data:/data/ipfs -p 4011:4011 -p 8283:8283 -p 5011:5011 ipfs/kubo:latest docker logs -f ipfs-node ``` diff --git a/examples/authorize_and_store_papi.js b/examples/authorize_and_store_papi.js index ea2a1d2c..33f93c13 100644 --- a/examples/authorize_and_store_papi.js +++ b/examples/authorize_and_store_papi.js @@ -3,7 +3,7 @@ import { createClient } from 'polkadot-api'; import { getWsProvider } from 'polkadot-api/ws-provider'; import { cryptoWaitReady } from '@polkadot/util-crypto'; import { authorizeAccount, fetchCid, store, TX_MODE_FINALIZED_BLOCK } from './api.js'; -import { setupKeyringAndSigners } from './common.js'; +import { setupKeyringAndSigners, DEFAULT_IPFS_GATEWAY_URL } from './common.js'; import { logHeader, logConnection, logSuccess, logError, logTestResult } from './logger.js'; import { cidFromBytes } from "./cid_dag_metadata.js"; import { bulletin } from './.papi/descriptors/dist/index.mjs'; @@ -12,7 +12,7 @@ import { bulletin } from './.papi/descriptors/dist/index.mjs'; const args = process.argv.slice(2); const NODE_WS = args[0] || 'ws://localhost:10000'; const SEED = args[1] || '//Alice'; -const HTTP_IPFS_API = args[2] || 'http://127.0.0.1:8080'; +const HTTP_IPFS_API = args[2] || DEFAULT_IPFS_GATEWAY_URL; async function main() { await cryptoWaitReady(); diff --git a/examples/authorize_and_store_papi_smoldot.js b/examples/authorize_and_store_papi_smoldot.js index 730d27e6..a7be11ec 100644 --- a/examples/authorize_and_store_papi_smoldot.js +++ b/examples/authorize_and_store_papi_smoldot.js @@ -5,7 +5,7 @@ import { createClient } from 'polkadot-api'; import { getSmProvider } from 'polkadot-api/sm-provider'; import { cryptoWaitReady } from '@polkadot/util-crypto'; import { authorizeAccount, fetchCid, store } from './api.js'; -import { setupKeyringAndSigners, waitForChainReady } from './common.js'; +import { setupKeyringAndSigners, waitForChainReady, DEFAULT_IPFS_GATEWAY_URL } from './common.js'; import { logHeader, logConfig, logSuccess, logError, logTestResult } from './logger.js'; import { cidFromBytes } from "./cid_dag_metadata.js"; import { bulletin } from './.papi/descriptors/dist/index.mjs'; @@ -111,7 +111,7 @@ async function main() { // Optional parachain chainspec path (only needed for parachains) const parachainSpecPath = process.argv[3] || null; // Optional IPFS API URL - const HTTP_IPFS_API = process.argv[4] || 'http://127.0.0.1:8080'; + const HTTP_IPFS_API = process.argv[4] || DEFAULT_IPFS_GATEWAY_URL; logConfig({ 'Mode': 'Smoldot Light Client', diff --git a/examples/authorize_preimage_and_store_papi.js b/examples/authorize_preimage_and_store_papi.js index 9d60d2e5..6864f10f 100644 --- a/examples/authorize_preimage_and_store_papi.js +++ b/examples/authorize_preimage_and_store_papi.js @@ -3,7 +3,7 @@ import { createClient } from 'polkadot-api'; import { getWsProvider } from 'polkadot-api/ws-provider'; import { cryptoWaitReady } from '@polkadot/util-crypto'; import { authorizeAccount, authorizePreimage, fetchCid, store, TX_MODE_IN_BLOCK, TX_MODE_FINALIZED_BLOCK } from './api.js'; -import { setupKeyringAndSigners, getContentHash } from './common.js'; +import { setupKeyringAndSigners, getContentHash, DEFAULT_IPFS_GATEWAY_URL } from './common.js'; import { logHeader, logConnection, logSection, logSuccess, logError, logInfo, logTestResult } from './logger.js'; import { cidFromBytes } from "./cid_dag_metadata.js"; import { bulletin } from './.papi/descriptors/dist/index.mjs'; @@ -12,7 +12,7 @@ import { bulletin } from './.papi/descriptors/dist/index.mjs'; const args = process.argv.slice(2); const NODE_WS = args[0] || 'ws://localhost:10000'; const SEED = args[1] || '//Alice'; -const HTTP_IPFS_API = args[2] || 'http://127.0.0.1:8080'; +const HTTP_IPFS_API = args[2] || DEFAULT_IPFS_GATEWAY_URL; /** * Run a preimage authorization + store test. diff --git a/examples/common.js b/examples/common.js index 3dc46250..36db0e1e 100644 --- a/examples/common.js +++ b/examples/common.js @@ -6,7 +6,8 @@ import fs from "fs"; import assert from "assert"; // ---- CONFIG ---- -export const HTTP_IPFS_API = 'http://127.0.0.1:8080'; // Local IPFS HTTP gateway +export const DEFAULT_IPFS_API_URL = 'http://127.0.0.1:5011'; // IPFS HTTP API (for ipfs-http-client) +export const DEFAULT_IPFS_GATEWAY_URL = 'http://127.0.0.1:8283'; // IPFS HTTP Gateway (for /ipfs/CID requests) export const CHUNK_SIZE = 1 * 1024 * 1024; // 1 MiB // ----------------- @@ -47,7 +48,7 @@ export function newSigner(seed) { * * @param {string} file * @param {string} text - * @param {"small" | "big32" | "big64"} size + * @param {"small" | "big32" | "big64" | "big96"} size */ export function generateTextImage(file, text, size = "small") { console.log(`Generating ${size} image with text: ${text} to the file: ${file}...`); @@ -77,6 +78,15 @@ export function generateTextImage(file, text, size = "small") { noise: 50, targetBytes: 65 * 1024 * 1024, }, + // ~96 MiB + big96: { + width: 9000, + height: 6500, + quality: 0.95, + shapes: 1000, + noise: 50, + targetBytes: 98 * 1024 * 1024, + }, }; const cfg = presets[size]; @@ -117,7 +127,7 @@ export function generateTextImage(file, text, size = "small") { // 🔧 Big images: tune quality to hit target size let imageBytes; - if ((size === "big32" || size === "big64") && cfg.targetBytes) { + if ((size === "big32" || size === "big64" || size === "big96") && cfg.targetBytes) { let quality = cfg.quality; do { @@ -271,7 +281,7 @@ export function toHex(bytes) { // console.error("Unknown command:", command); // console.error("Usage:"); // console.error( -// ' node common.js generateTextImage "TEXT" [small|big32|big64]' +// ' node common.js generateTextImage "TEXT" [small|big32|big64|big96]' // ); // process.exit(1); // } diff --git a/examples/justfile b/examples/justfile index 02517b57..b449dfdd 100644 --- a/examples/justfile +++ b/examples/justfile @@ -216,24 +216,52 @@ ipfs-start test_dir: _check-docker #!/usr/bin/env bash set -e echo "🐳 Starting IPFS Docker container..." - + + # Use non-standard ports to avoid conflicts with other IPFS instances + IPFS_SWARM_PORT=4011 + IPFS_API_PORT=5011 + IPFS_GATEWAY_PORT=8283 + # Pull latest kubo image if not present docker pull ipfs/kubo:latest - + + # Remove old volume to start fresh (no cached peers) + docker volume rm ipfs-data 2>/dev/null || true + # Determine network mode based on OS if [[ "$OSTYPE" == "darwin"* ]]; then - # macOS - use port mapping - NETWORK_ARGS="-p 4001:4001 -p 8080:8080 -p 5001:5001" + # macOS - use bridge with port mapping + NETWORK_ARGS="-p ${IPFS_SWARM_PORT}:${IPFS_SWARM_PORT} -p ${IPFS_GATEWAY_PORT}:${IPFS_GATEWAY_PORT} -p ${IPFS_API_PORT}:${IPFS_API_PORT}" else - # Linux (including CI) - use host networking for direct access - NETWORK_ARGS="--network host -p 4001:4001 -p 8080:8080 -p 5001:5001" + # Linux (including CI) - use host networking so container can reach localhost services + NETWORK_ARGS="--network host" fi - - # Start Docker container + + # Start container - daemon will init and start docker run -d --name ipfs-node -v ipfs-data:/data/ipfs $NETWORK_ARGS ipfs/kubo:latest - echo " Container: ipfs-node" - echo " Waiting for container to start..." + echo " Container: ipfs-node (network: $NETWORK_ARGS)" + echo " Waiting for IPFS to initialize..." + sleep 5 + + # Configure IPFS for isolated mode while daemon is running (config changes take effect on restart) + echo " Configuring isolated mode..." + docker exec ipfs-node ipfs bootstrap rm --all + docker exec ipfs-node ipfs config --json Routing.Type '"none"' + docker exec ipfs-node ipfs config --json Discovery.MDNS.Enabled false + docker exec ipfs-node ipfs config --json Swarm.RelayClient.Enabled false + docker exec ipfs-node ipfs config --json Swarm.RelayService.Enabled false + + # Configure custom ports to avoid conflicts with other IPFS instances + echo " Configuring custom ports (swarm: ${IPFS_SWARM_PORT}, api: ${IPFS_API_PORT}, gateway: ${IPFS_GATEWAY_PORT})..." + docker exec ipfs-node ipfs config --json Addresses.Swarm "[\"/ip4/0.0.0.0/tcp/${IPFS_SWARM_PORT}\", \"/ip6/::/tcp/${IPFS_SWARM_PORT}\"]" + docker exec ipfs-node ipfs config Addresses.API "/ip4/0.0.0.0/tcp/${IPFS_API_PORT}" + docker exec ipfs-node ipfs config Addresses.Gateway "/ip4/0.0.0.0/tcp/${IPFS_GATEWAY_PORT}" + + # Restart container to apply config changes + echo " Restarting daemon with isolated config..." + docker restart ipfs-node sleep 5 + docker exec ipfs-node ipfs --version # Bitswap logging @@ -248,32 +276,31 @@ ipfs-start test_dir: _check-docker ipfs-connect runtime: #!/usr/bin/env bash set -e - + echo "🔗 Connecting IPFS nodes (Docker, runtime: {{ runtime }})..." - + # Detect the correct host and protocol for Docker if [[ "$OSTYPE" == "darwin"* ]]; then - # macOS - use dns4/host.docker.internal + # macOS - use dns4/host.docker.internal (bridge network) PROTOCOL="dns4" DOCKER_HOST="host.docker.internal" else - # Linux (with host networking) - use ip4/127.0.0.1 + # Linux - use ip4/127.0.0.1 (host network mode) PROTOCOL="ip4" DOCKER_HOST="127.0.0.1" fi - + echo " Using Docker host: /$PROTOCOL/$DOCKER_HOST" - # TODO: improve this for multiple runtimes - # Different peer IDs for different runtimes + # Different peer IDs and ports for different runtimes if [ "{{ runtime }}" = "bulletin-westend-runtime" ]; then - # Westend parachain peer IDs - docker exec ipfs-node ipfs swarm connect /$PROTOCOL/$DOCKER_HOST/tcp/10001/ws/p2p/12D3KooWJKVVNYByvML4Pgx1GWAYryYo6exA68jQX9Mw3AJ6G5gQ || true - docker exec ipfs-node ipfs swarm connect /$PROTOCOL/$DOCKER_HOST/tcp/12347/ws/p2p/12D3KooWJ8sqAYtMBX3z3jy2iM98XGLFVzVfUPtmgDzxXSPkVpZZ || true + # Westend parachain peer IDs (WebSocket ports: 10002, 12348) + docker exec ipfs-node ipfs swarm connect /$PROTOCOL/$DOCKER_HOST/tcp/10002/ws/p2p/12D3KooWJKVVNYByvML4Pgx1GWAYryYo6exA68jQX9Mw3AJ6G5gQ || true + docker exec ipfs-node ipfs swarm connect /$PROTOCOL/$DOCKER_HOST/tcp/12348/ws/p2p/12D3KooWJ8sqAYtMBX3z3jy2iM98XGLFVzVfUPtmgDzxXSPkVpZZ || true elif [ "{{ runtime }}" = "bulletin-polkadot-runtime" ]; then - # Polkadot solo chain peer IDs - docker exec ipfs-node ipfs swarm connect /$PROTOCOL/$DOCKER_HOST/tcp/10001/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm || true - docker exec ipfs-node ipfs swarm connect /$PROTOCOL/$DOCKER_HOST/tcp/12347/ws/p2p/12D3KooWRkZhiRhsqmrQ28rt73K7V3aCBpqKrLGSXmZ99PTcTZby || true + # Polkadot solo chain peer IDs (WebSocket ports: 10002, 12348) + docker exec ipfs-node ipfs swarm connect /$PROTOCOL/$DOCKER_HOST/tcp/10002/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm || true + docker exec ipfs-node ipfs swarm connect /$PROTOCOL/$DOCKER_HOST/tcp/12348/ws/p2p/12D3KooWRkZhiRhsqmrQ28rt73K7V3aCBpqKrLGSXmZ99PTcTZby || true else echo "🐳 Unhandled runtime: {{ runtime }} specified!" exit 1 @@ -302,9 +329,9 @@ ipfs-reconnect-start test_dir runtime: cd "$ROOT_DIR" # TODO: improve this for multiple runtimes if [ "{{ runtime }}" = "bulletin-westend-runtime" ]; then - ./scripts/ipfs-reconnect-westend.sh docker > {{ test_dir }}/ipfs-reconnect.log 2>&1 & + ./scripts/ipfs-reconnect-westend.sh docker 10 > {{ test_dir }}/ipfs-reconnect.log 2>&1 & elif [ "{{ runtime }}" = "bulletin-polkadot-runtime" ]; then - ./scripts/ipfs-reconnect-solo.sh docker > {{ test_dir }}/ipfs-reconnect.log 2>&1 & + ./scripts/ipfs-reconnect-solo.sh docker 10 > {{ test_dir }}/ipfs-reconnect.log 2>&1 & else echo "🐳 Unhandled runtime: {{ runtime }} specified!" exit 1 @@ -393,8 +420,8 @@ stop-services test_dir: # mode - Connection mode: "ws" (WebSocket RPC node) or "smoldot" (light client) # ws_url - WebSocket URL (default: ws://localhost:10000, only used in ws mode) # seed - Account seed phrase or dev seed (default: //Alice, only used in ws mode) -# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:8080) -run-test-authorize-and-store test_dir runtime mode="ws" ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:8080": +# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:8283) +run-test-authorize-and-store test_dir runtime mode="ws" ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:8283": #!/usr/bin/env bash set -e @@ -426,8 +453,8 @@ run-test-authorize-and-store test_dir runtime mode="ws" ws_url="ws://localhost:1 # test_dir - Test directory where services are running # ws_url - WebSocket URL of the Bulletin chain node (default: ws://localhost:10000) # seed - Account seed phrase or dev seed (default: //Alice) -# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:8080) -run-test-store-chunked-data test_dir ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:8080": +# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:8283) +run-test-store-chunked-data test_dir ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:8283": #!/usr/bin/env bash set -e node store_chunked_data.js "{{ ws_url }}" "{{ seed }}" "{{ http_ipfs_api }}" @@ -435,21 +462,22 @@ run-test-store-chunked-data test_dir ws_url="ws://localhost:10000" seed="//Alice # Run store-big-data test only (services must already be running via start-services) # Parameters: # test_dir - Test directory where services are running +# image_size - Image size preset: small, big32, big64, big96 (default: big64) # ws_url - WebSocket URL of the Bulletin chain node (default: ws://localhost:10000) # seed - Account seed phrase or dev seed (default: //Alice) -# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:5001) -run-test-store-big-data test_dir ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:5001": +# ipfs_gateway_url - IPFS Gateway URL (default: http://127.0.0.1:8283) +run-test-store-big-data test_dir image_size="big64" ws_url="ws://localhost:10000" seed="//Alice" ipfs_gateway_url="http://127.0.0.1:8283": #!/usr/bin/env bash set -e - node store_big_data.js "{{ ws_url }}" "{{ seed }}" "{{ http_ipfs_api }}" + node store_big_data.js "{{ ws_url }}" "{{ seed }}" "{{ ipfs_gateway_url }}" "{{ image_size }}" # Run authorize-preimage-and-store test only (services must already be running via start-services) # Parameters: # test_dir - Test directory where services are running # ws_url - WebSocket URL of the Bulletin chain node (default: ws://localhost:10000) # seed - Account seed phrase or dev seed (default: //Alice) -# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:8080) -run-test-authorize-preimage-and-store test_dir ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:8080": +# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:8283) +run-test-authorize-preimage-and-store test_dir ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:8283": #!/usr/bin/env bash set -e node authorize_preimage_and_store_papi.js "{{ ws_url }}" "{{ seed }}" "{{ http_ipfs_api }}" @@ -488,13 +516,13 @@ run-authorize-and-store runtime mode="ws": npm-install # Parachain: relay chain (required) + parachain spec (optional) RELAY_CHAINSPEC_PATH="$TEST_DIR/bob/cfg/westend-local.json" PARACHAIN_CHAINSPEC_PATH="$TEST_DIR/bulletin-westend-collator-2/cfg/westend-local-2487.json" - node $SCRIPT_NAME "$RELAY_CHAINSPEC_PATH" "$PARACHAIN_CHAINSPEC_PATH" "http://127.0.0.1:8080" + node $SCRIPT_NAME "$RELAY_CHAINSPEC_PATH" "$PARACHAIN_CHAINSPEC_PATH" "http://127.0.0.1:8283" else # bulletin-polkadot-runtime (solochain) CHAINSPEC_PATH="$TEST_DIR/bob/cfg/bulletin-polkadot-local.json" - node $SCRIPT_NAME "$CHAINSPEC_PATH" "" "http://127.0.0.1:8080" + node $SCRIPT_NAME "$CHAINSPEC_PATH" "" "http://127.0.0.1:8283" fi else - node $SCRIPT_NAME "ws://localhost:10000" "//Alice" "http://127.0.0.1:8080" + node $SCRIPT_NAME "ws://localhost:10000" "//Alice" "http://127.0.0.1:8283" fi EXAMPLE_EXIT=$? @@ -514,8 +542,8 @@ run-authorize-and-store runtime mode="ws": npm-install # runtime - Runtime name (e.g., "bulletin-polkadot-runtime", "bulletin-westend-runtime") # ws_url - WebSocket URL (default: ws://localhost:10000) # seed - Account seed phrase or dev seed (default: //Alice) -# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:8080) -run-store-chunked-data runtime ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:8080": npm-install +# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:8283) +run-store-chunked-data runtime ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:8283": npm-install #!/usr/bin/env bash set -e @@ -544,8 +572,8 @@ run-store-chunked-data runtime ws_url="ws://localhost:10000" seed="//Alice" http # runtime - Runtime name (e.g., "bulletin-polkadot-runtime", "bulletin-westend-runtime") # ws_url - WebSocket URL (default: ws://localhost:10000) # seed - Account seed phrase or dev seed (default: //Alice) -# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:5001) -run-store-big-data runtime ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:5001": npm-install +# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:5011) +run-store-big-data runtime ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:5011": npm-install #!/usr/bin/env bash set -e @@ -574,8 +602,8 @@ run-store-big-data runtime ws_url="ws://localhost:10000" seed="//Alice" http_ipf # runtime - Runtime name (e.g., "bulletin-polkadot-runtime", "bulletin-westend-runtime") # ws_url - WebSocket URL (default: ws://localhost:10000) # seed - Account seed phrase or dev seed (default: //Alice) -# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:8080) -run-authorize-preimage-and-store-papi runtime ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:8080": npm-install +# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:8283) +run-authorize-preimage-and-store-papi runtime ws_url="ws://localhost:10000" seed="//Alice" http_ipfs_api="http://127.0.0.1:8283": npm-install #!/usr/bin/env bash set -e @@ -618,23 +646,26 @@ PASEO_RPC := "wss://paseo-bulletin-rpc.polkadot.io" # Parameters: # ws_url - WebSocket URL of the Bulletin chain node # seed - Account seed phrase (must be pre-authorized on the network) -# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:5001 for local Docker Kubo) -_run-live-tests ws_url seed http_ipfs_api="http://127.0.0.1:5001": npm-install +# http_ipfs_api - IPFS API URL (default: http://127.0.0.1:8283 for local Docker Kubo) +_run-live-tests ws_url seed http_ipfs_api="http://127.0.0.1:8283": npm-install #!/usr/bin/env bash set -e echo "🌐 Running live tests against: {{ ws_url }}" just papi-generate "{{ ws_url }}" - # Prerequisites for live testing: - # 1. Start local IPFS: docker run -d --name ipfs-node -p 4001:4001 -p 5001:5001 -p 8080:8080 ipfs/kubo:latest - # 2. Connect to Bulletin collators: docker exec ipfs-node ipfs swarm connect + # To use local Docker Kubo with external RPC: + # 1. Start local IPFS: docker run -d --name ipfs-node -p 4011:4011 -p 5011:5011 -p 8283:8283 ipfs/kubo:latest + # 2. Connect to Bulletin IPFS nodes: docker exec ipfs-node ipfs swarm connect + # (Get multiaddrs from the Bulletin chain's IPFS bootstrap list) node store_big_data.js "{{ ws_url }}" "{{ seed }}" "{{ http_ipfs_api }}" # Run live tests against Westend Bulletin -run-live-tests-westend seed http_ipfs_api="http://127.0.0.1:5001": +run-live-tests-westend seed http_ipfs_api="http://127.0.0.1:8283": just _run-live-tests "{{ WESTEND_RPC }}" "{{ seed }}" "{{ http_ipfs_api }}" # Run live tests against Paseo Bulletin -run-live-tests-paseo seed http_ipfs_api="http://127.0.0.1:5001": +run-live-tests-paseo seed http_ipfs_api="http://127.0.0.1:8283": just _run-live-tests "{{ PASEO_RPC }}" "{{ seed }}" "{{ http_ipfs_api }}" + +# TODO: missingn PoP diff --git a/examples/native_ipfs_dag_pb_chunked_data.js b/examples/native_ipfs_dag_pb_chunked_data.js index 6d7fcee1..32fc5b32 100644 --- a/examples/native_ipfs_dag_pb_chunked_data.js +++ b/examples/native_ipfs_dag_pb_chunked_data.js @@ -2,7 +2,7 @@ import { createClient } from 'polkadot-api'; import { getWsProvider } from 'polkadot-api/ws-provider'; import { cryptoWaitReady } from '@polkadot/util-crypto'; import { cidFromBytes, buildUnixFSDagPB, convertCid } from './cid_dag_metadata.js'; -import { generateTextImage, fileToDisk, filesAreEqual, newSigner, HTTP_IPFS_API } from './common.js'; +import { generateTextImage, fileToDisk, filesAreEqual, newSigner, DEFAULT_IPFS_GATEWAY_URL as HTTP_IPFS_API } from './common.js'; import { authorizeAccount, store, storeChunkedFile, fetchCid } from './api.js'; import { bulletin } from './.papi/descriptors/dist/index.mjs'; import { withPolkadotSdkCompat } from "polkadot-api/polkadot-sdk-compat" @@ -62,10 +62,10 @@ async function main() { // (Other words Bulletin is compatible) console.log('🧱 DAG stored on Bulletin with CID:', rootCid.toString()) console.log('\n🌐 Try opening in browser:') - console.log(` http://127.0.0.1:8080/ipfs/${rootCid.toString()}`) + console.log(` ${HTTP_IPFS_API}/ipfs/${rootCid.toString()}`) console.log(" (You'll see binary content since this is an image)") console.log('') - console.log(` http://127.0.0.1:8080/ipfs/${convertCid(rootCid, 0x55)}`) + console.log(` ${HTTP_IPFS_API}/ipfs/${convertCid(rootCid, 0x55)}`) console.log(" (You'll see the DAG file itself)") // Download the content from IPFS HTTP gateway. diff --git a/examples/store_big_data.js b/examples/store_big_data.js index ed929b7b..7e3d7c33 100644 --- a/examples/store_big_data.js +++ b/examples/store_big_data.js @@ -4,7 +4,7 @@ import fs from 'fs' import os from "os"; import path from "path"; import assert from "assert"; -import { authorizeAccount, store, fetchCid, TX_MODE_FINALIZED_BLOCK } from "./api.js"; +import {authorizeAccount, store, fetchCid, TX_MODE_FINALIZED_BLOCK, TX_MODE_IN_BLOCK} from "./api.js"; import { buildUnixFSDagPB, cidFromBytes } from "./cid_dag_metadata.js"; import { setupKeyringAndSigners, @@ -13,6 +13,8 @@ import { fileToDisk, filesAreEqual, generateTextImage, + DEFAULT_IPFS_API_URL, + DEFAULT_IPFS_GATEWAY_URL, } from "./common.js"; import { logHeader, @@ -26,12 +28,16 @@ import { createClient } from 'polkadot-api'; import { getWsProvider } from "polkadot-api/ws-provider"; import { bulletin } from './.papi/descriptors/dist/index.mjs'; -// Command line arguments: [ws_url] [seed] [ipfs_api_url] +// Command line arguments: [ws_url] [seed] [ipfs_gateway_url] [image_size] // Note: --signer-disc=XX flag is also supported for parallel runs const args = process.argv.slice(2).filter(arg => !arg.startsWith('--')); const NODE_WS = args[0] || 'ws://localhost:10000'; const SEED = args[1] || '//Alice'; -const HTTP_IPFS_API = args[2] || 'http://127.0.0.1:5001'; +const IPFS_GATEWAY_URL = args[2] || DEFAULT_IPFS_GATEWAY_URL; +// Derive API URL from gateway URL (port 8283 -> 5011) +const IPFS_API_URL = IPFS_GATEWAY_URL.replace(':8283', ':5011'); +// Image size preset: small, big32, big64, big96 +const IMAGE_SIZE = args[3] || 'big64'; const NUM_SIGNERS = 16; // -------------------- queue -------------------- @@ -50,6 +56,7 @@ const stats = { startTime: null, endTime: null, blockNumbers: [], // Track all block numbers where txs were included + blockHashes: {}, // Map block number -> block hash for timestamp lookups }; function waitForQueueLength(targetLength, timeoutMs = 300000) { @@ -94,11 +101,13 @@ async function processJob(typedApi, workerId, signer, chunk) { `Worker ${workerId} submitting tx for chunk ${chunk.cid} of size ${chunk.len} bytes` ); - // Use longer timeout (120s) for parallel workers to avoid timeouts under heavy load let { cid, blockHash, blockNumber } = await store(typedApi, signer.signer, chunk.bytes); pushToResultQueue({ cid, blockNumber }); if (blockNumber !== undefined) { stats.blockNumbers.push(blockNumber); + if (blockHash && !stats.blockHashes[blockNumber]) { + stats.blockHashes[blockNumber] = blockHash; + } } console.log(`Worker ${workerId} tx included in block #${blockNumber} with CID: ${cid}`); } @@ -120,7 +129,7 @@ function formatDuration(ms) { return ms + ' ms'; } -function printStatistics(dataSize) { +async function printStatistics(dataSize, typedApi) { const numTxs = stats.blockNumbers.length; const elapsed = stats.endTime - stats.startTime; @@ -135,29 +144,61 @@ function printStatistics(dataSize) { txsPerBlock[blockNum] = (txsPerBlock[blockNum] || 0) + 1; } const numBlocksWithTxs = Object.keys(txsPerBlock).length; - const avgTxsPerBlock = numBlocksWithTxs > 0 ? (numTxs / numBlocksWithTxs).toFixed(2) : 'N/A'; + const totalBlocksInRange = blocksElapsed + 1; + const avgTxsPerBlock = totalBlocksInRange > 0 ? (numTxs / totalBlocksInRange).toFixed(2) : 'N/A'; + + // Fetch block timestamps for all blocks in range + const blockTimestamps = {}; + for (let blockNum = startBlock; blockNum <= endBlock; blockNum++) { + try { + // Get block hash - either from our stored hashes or query the chain + let blockHash = stats.blockHashes[blockNum]; + if (!blockHash) { + const queriedHash = await typedApi.query.System.BlockHash.getValue(blockNum); + // Handle different hash formats (string, Binary, Uint8Array) + // PAPI Binary objects have asHex() method, fall back to toString() + const hashStr = typeof queriedHash === 'string' + ? queriedHash + : (queriedHash?.asHex?.() || queriedHash?.toHex?.() || queriedHash?.toString?.() || ''); + // Check if hash is not empty (all zeros means pruned/unavailable) + if (hashStr && !hashStr.match(/^(0x)?0+$/)) { + blockHash = queriedHash; + } + } + if (blockHash) { + const timestamp = await typedApi.query.Timestamp.Now.getValue({ at: blockHash }); + blockTimestamps[blockNum] = timestamp; + } + } catch (e) { + console.error(`Failed to fetch timestamp for block #${blockNum}:`, e.message); + } + } console.log('\n'); - console.log('═══════════════════════════════════════════════════════════════════════════════'); - console.log(' 📊 STORAGE STATISTICS '); - console.log('═══════════════════════════════════════════════════════════════════════════════'); - console.log(`| File size | ${formatBytes(dataSize).padEnd(20)} |`); - console.log(`| Chunk/TX size | ${formatBytes(CHUNK_SIZE).padEnd(20)} |`); - console.log(`| Number of chunks | ${numTxs.toString().padEnd(20)} |`); - console.log(`| Avg txs per block | ${avgTxsPerBlock.toString().padEnd(20)} |`); - console.log(`| Time elapsed | ${formatDuration(elapsed).padEnd(20)} |`); - console.log(`| Blocks elapsed | ${`${blocksElapsed} (#${startBlock} → #${endBlock})`.padEnd(20)} |`); - console.log(`| Throughput | ${formatBytes(dataSize / (elapsed / 1000)).padEnd(20)} /s |`); - console.log('═══════════════════════════════════════════════════════════════════════════════'); - console.log(' 📦 TRANSACTIONS PER BLOCK '); - console.log('═══════════════════════════════════════════════════════════════════════════════'); + console.log('════════════════════════════════════════════════════════════════════════════════════════════════════════'); + console.log(' 📊 STORAGE STATISTICS '); + console.log('════════════════════════════════════════════════════════════════════════════════════════════════════════'); + console.log(`│ File size │ ${formatBytes(dataSize).padEnd(25)} │`); + console.log(`│ Chunk/TX size │ ${formatBytes(CHUNK_SIZE).padEnd(25)} │`); + console.log(`│ Number of chunks │ ${numTxs.toString().padEnd(25)} │`); + console.log(`│ Avg txs per block │ ${`${avgTxsPerBlock} (${numTxs}/${totalBlocksInRange})`.padEnd(25)} │`); + console.log(`│ Time elapsed │ ${formatDuration(elapsed).padEnd(25)} │`); + console.log(`│ Blocks elapsed │ ${`${blocksElapsed} (#${startBlock} → #${endBlock})`.padEnd(25)} │`); + console.log(`│ Throughput │ ${formatBytes(dataSize / (elapsed / 1000)).padEnd(22)} /s │`); + console.log('════════════════════════════════════════════════════════════════════════════════════════════════════════'); + console.log(' 📦 TRANSACTIONS PER BLOCK '); + console.log('════════════════════════════════════════════════════════════════════════════════════════════════════════'); + console.log('│ Block │ Time │ TXs │ Size │ Bar │'); + console.log('├─────────────┼─────────────────────────┼─────┼──────────────┼──────────────────────┤'); for (let blockNum = startBlock; blockNum <= endBlock; blockNum++) { const count = txsPerBlock[blockNum] || 0; const size = count > 0 ? formatBytes(count * CHUNK_SIZE) : '-'; - const bar = count > 0 ? '█'.repeat(count) : ''; - console.log(`| Block #${blockNum.toString().padEnd(10)} | ${count.toString().padStart(3)} txs | ${size.padEnd(12)} | ${bar}`); + const bar = count > 0 ? '█'.repeat(Math.min(count, 20)) : ''; + const timestamp = blockTimestamps[blockNum]; + const timeStr = timestamp ? new Date(Number(timestamp)).toISOString().replace('T', ' ').replace('Z', '') : '-'; + console.log(`│ #${blockNum.toString().padEnd(10)} │ ${timeStr.padEnd(23)} │ ${count.toString().padStart(3)} │ ${size.padEnd(12)} │ ${bar.padEnd(20)} │`); } - console.log('═══════════════════════════════════════════════════════════════════════════════'); + console.log('════════════════════════════════════════════════════════════════════════════════════════════════════════'); console.log('\n'); } @@ -188,9 +229,9 @@ export async function storeChunkedFile(api, filePath) { return { chunks, dataSize: fileData.length }; } -// Connect to a local IPFS gateway (e.g. Kubo) +// Connect to IPFS API (for ipfs-http-client operations like block.get) const ipfs = create({ - url: HTTP_IPFS_API, + url: IPFS_API_URL, }); // Optional signer discriminator, when we want to run the script in parallel and don't take care of nonces. @@ -201,7 +242,7 @@ async function main() { await cryptoWaitReady() logHeader('STORE BIG DATA TEST'); - logConnection(NODE_WS, SEED, HTTP_IPFS_API); + logConnection(NODE_WS, SEED, IPFS_GATEWAY_URL); let client, resultCode; try { @@ -209,7 +250,7 @@ async function main() { const filePath = path.join(tmpDir, "image.jpeg"); const downloadedFilePath = path.join(tmpDir, "downloaded.jpeg"); const downloadedFileByDagPath = path.join(tmpDir, "downloadedByDag.jpeg"); - generateTextImage(filePath, "Hello, Bulletin big64 - " + new Date().toString(), "big64"); + generateTextImage(filePath, `Hello, Bulletin ${IMAGE_SIZE} - ` + new Date().toString(), IMAGE_SIZE); // Init WS PAPI client and typed api. client = createClient(getWsProvider(NODE_WS)); @@ -259,9 +300,18 @@ async function main() { console.log(`Storing DAG...`); let { rootCid, dagBytes } = await buildUnixFSDagPB(chunks, 0xb220); - let { cid } = await store(bulletinAPI, signers[0].signer, dagBytes); + // Store with dag-pb codec (0x70) to match rootCid from buildUnixFSDagPB + let { cid } = await store( + bulletinAPI, + signers[0].signer, + dagBytes, + 0x70, // dag-pb codec + 0xb220, // blake2b-256 + TX_MODE_IN_BLOCK + ); console.log(`Downloading...${cid} / ${rootCid}`); - let downloadedContent = await fetchCid(HTTP_IPFS_API, rootCid); + assert.deepStrictEqual(cid, rootCid, '❌ CID mismatch between stored and computed DAG root'); + let downloadedContent = await fetchCid(IPFS_GATEWAY_URL, rootCid); console.log(`✅ Reconstructed file size: ${downloadedContent.length} bytes`); await fileToDisk(downloadedFileByDagPath, downloadedContent); filesAreEqual(filePath, downloadedFileByDagPath); @@ -290,7 +340,7 @@ async function main() { ); // Print storage statistics - printStatistics(dataSize); + await printStatistics(dataSize, bulletinAPI); logTestResult(true, 'Store Big Data Test'); resultCode = 0; diff --git a/examples/store_chunked_data.js b/examples/store_chunked_data.js index e3c0df36..38b155df 100644 --- a/examples/store_chunked_data.js +++ b/examples/store_chunked_data.js @@ -6,7 +6,7 @@ import { CID } from 'multiformats/cid' import * as dagPB from '@ipld/dag-pb' import { TextDecoder } from 'util' import assert from "assert"; -import { generateTextImage, filesAreEqual, fileToDisk, setupKeyringAndSigners } from './common.js' +import { generateTextImage, filesAreEqual, fileToDisk, setupKeyringAndSigners, DEFAULT_IPFS_GATEWAY_URL } from './common.js' import { logHeader, logConnection, logSuccess, logError, logTestResult } from './logger.js' import { authorizeAccount, fetchCid, store, storeChunkedFile, TX_MODE_FINALIZED_BLOCK } from "./api.js"; import { buildUnixFSDagPB, cidFromBytes, convertCid } from "./cid_dag_metadata.js"; @@ -19,7 +19,7 @@ import { bulletin } from './.papi/descriptors/dist/index.mjs'; const args = process.argv.slice(2); const NODE_WS = args[0] || 'ws://localhost:10000'; const SEED = args[1] || '//Alice'; -const HTTP_IPFS_API = args[2] || 'http://127.0.0.1:8080'; +const HTTP_IPFS_API = args[2] || DEFAULT_IPFS_GATEWAY_URL; const CHUNK_SIZE = 6 * 1024 // 6 KB /** @@ -215,10 +215,10 @@ async function main() { ); console.log('🧱 DAG stored on IPFS with CID:', rawDagCid.toString()) console.log('\n🌐 Try opening in browser:') - console.log(` http://127.0.0.1:8080/ipfs/${rootCid.toString()}`) - console.log(' (You’ll see binary content since this is an image)') - console.log(` http://127.0.0.1:8080/ipfs/${rawDagCid.toString()}`) - console.log(' (You’ll see the encoded DAG descriptor content)') + console.log(` ${HTTP_IPFS_API}/ipfs/${rootCid.toString()}`) + console.log(" (You'll see binary content since this is an image)") + console.log(` ${HTTP_IPFS_API}/ipfs/${rawDagCid.toString()}`) + console.log(" (You'll see the encoded DAG descriptor content)") // Download the content from IPFS HTTP gateway const fullBuffer = await fetchCid(HTTP_IPFS_API, rootCid); diff --git a/scripts/ipfs-reconnect-solo.sh b/scripts/ipfs-reconnect-solo.sh index 2abd43d6..661bde90 100755 --- a/scripts/ipfs-reconnect-solo.sh +++ b/scripts/ipfs-reconnect-solo.sh @@ -2,35 +2,51 @@ THIS_DIR=$(cd $(dirname $0); pwd) -# Choose mode based on argument +# Arguments: mode [sleep_interval] mode="${1:-local}" +sleep_interval="${2:-2}" if [ "$mode" = "docker" ]; then check_cmd="docker exec ipfs-node ipfs" - check_host="172.17.0.1" + + if [[ "$OSTYPE" == "darwin"* ]]; then + # macOS - use dns4/host.docker.internal (bridge network) + check_protocol="dns4" + check_host="host.docker.internal" + else + # Linux - use ip4/127.0.0.1 (host network mode) + check_protocol="ip4" + check_host="127.0.0.1" + fi else check_cmd="${THIS_DIR}/../kubo/ipfs" + check_protocol="ip4" check_host="127.0.0.1" fi -# Peers to monitor -PEERS_TO_CHECK=( - "/ip4/${check_host}/tcp/10001/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" - "/ip4/${check_host}/tcp/12347/ws/p2p/12D3KooWRkZhiRhsqmrQ28rt73K7V3aCBpqKrLGSXmZ99PTcTZby" +# Peer IDs to monitor +PEER_IDS=( + "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" + "12D3KooWRkZhiRhsqmrQ28rt73K7V3aCBpqKrLGSXmZ99PTcTZby" ) +# Full addresses for connecting (WebSocket ports: 10002, 12348) +declare -A PEER_ADDRS +PEER_ADDRS["12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"]="/${check_protocol}/${check_host}/tcp/10002/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" +PEER_ADDRS["12D3KooWRkZhiRhsqmrQ28rt73K7V3aCBpqKrLGSXmZ99PTcTZby"]="/${check_protocol}/${check_host}/tcp/12348/ws/p2p/12D3KooWRkZhiRhsqmrQ28rt73K7V3aCBpqKrLGSXmZ99PTcTZby" + while true; do # Read all current connections once PEERS="$(${check_cmd} swarm peers)" + echo "Connected peers: $PEERS" - for PEER in "${PEERS_TO_CHECK[@]}"; do - echo "$PEERS" | grep -q "$PEER" - if [ $? -ne 0 ]; then - echo "$(date) - $PEER disconnected. Reconnecting..." - ${check_cmd} swarm connect "$PEER" + for PEER_ID in "${PEER_IDS[@]}"; do + if echo "$PEERS" | grep -q "$PEER_ID"; then + echo "$(date) - $PEER_ID connected." else - echo "$(date) - $PEER connected." + echo "$(date) - $PEER_ID disconnected. Reconnecting..." + ${check_cmd} swarm connect "${PEER_ADDRS[$PEER_ID]}" fi done - sleep 2 + sleep "$sleep_interval" done diff --git a/scripts/ipfs-reconnect-westend.sh b/scripts/ipfs-reconnect-westend.sh index 937642a5..83f2904d 100755 --- a/scripts/ipfs-reconnect-westend.sh +++ b/scripts/ipfs-reconnect-westend.sh @@ -2,35 +2,51 @@ THIS_DIR=$(cd $(dirname $0); pwd) -# Choose mode based on argument +# Arguments: mode [sleep_interval] mode="${1:-local}" +sleep_interval="${2:-2}" if [ "$mode" = "docker" ]; then check_cmd="docker exec ipfs-node ipfs" - check_host="172.17.0.1" + + if [[ "$OSTYPE" == "darwin"* ]]; then + # macOS - use dns4/host.docker.internal (bridge network) + check_protocol="dns4" + check_host="host.docker.internal" + else + # Linux - use ip4/127.0.0.1 (host network mode) + check_protocol="ip4" + check_host="127.0.0.1" + fi else check_cmd="${THIS_DIR}/../kubo/ipfs" + check_protocol="ip4" check_host="127.0.0.1" fi -# Peers to monitor -PEERS_TO_CHECK=( - "/ip4/${check_host}/tcp/10001/ws/p2p/12D3KooWJKVVNYByvML4Pgx1GWAYryYo6exA68jQX9Mw3AJ6G5gQ" - "/ip4/${check_host}/tcp/12347/ws/p2p/12D3KooWJ8sqAYtMBX3z3jy2iM98XGLFVzVfUPtmgDzxXSPkVpZZ" +# Peer IDs to monitor +PEER_IDS=( + "12D3KooWJKVVNYByvML4Pgx1GWAYryYo6exA68jQX9Mw3AJ6G5gQ" + "12D3KooWJ8sqAYtMBX3z3jy2iM98XGLFVzVfUPtmgDzxXSPkVpZZ" ) +# Full addresses for connecting (WebSocket ports: 10002, 12348) +declare -A PEER_ADDRS +PEER_ADDRS["12D3KooWJKVVNYByvML4Pgx1GWAYryYo6exA68jQX9Mw3AJ6G5gQ"]="/${check_protocol}/${check_host}/tcp/10002/ws/p2p/12D3KooWJKVVNYByvML4Pgx1GWAYryYo6exA68jQX9Mw3AJ6G5gQ" +PEER_ADDRS["12D3KooWJ8sqAYtMBX3z3jy2iM98XGLFVzVfUPtmgDzxXSPkVpZZ"]="/${check_protocol}/${check_host}/tcp/12348/ws/p2p/12D3KooWJ8sqAYtMBX3z3jy2iM98XGLFVzVfUPtmgDzxXSPkVpZZ" + while true; do # Read all current connections once PEERS="$(${check_cmd} swarm peers)" + echo "Connected peers: $PEERS" - for PEER in "${PEERS_TO_CHECK[@]}"; do - echo "$PEERS" | grep -q "$PEER" - if [ $? -ne 0 ]; then - echo "$(date) - $PEER disconnected. Reconnecting..." - ${check_cmd} swarm connect "$PEER" + for PEER_ID in "${PEER_IDS[@]}"; do + if echo "$PEERS" | grep -q "$PEER_ID"; then + echo "$(date) - $PEER_ID connected." else - echo "$(date) - $PEER connected." + echo "$(date) - $PEER_ID disconnected. Reconnecting..." + ${check_cmd} swarm connect "${PEER_ADDRS[$PEER_ID]}" fi done - sleep 2 + sleep "$sleep_interval" done diff --git a/zombienet/bulletin-polkadot-local.toml b/zombienet/bulletin-polkadot-local.toml index b05f223c..861005f7 100644 --- a/zombienet/bulletin-polkadot-local.toml +++ b/zombienet/bulletin-polkadot-local.toml @@ -11,11 +11,25 @@ name = "alice" p2p_port = 10001 rpc_port = 10000 validator = true -args = ["--ipfs-server", "-lruntime=debug,sub-libp2p::bitswap=trace,runtime::transaction-storage=trace,litep2p::ipfs::bitswap=debug"] +args = [ + "--ipfs-server", + # Increase tx pool from 20->40 MiB + # "--pool-kbytes 40960", + "--listen-addr=/ip4/0.0.0.0/tcp/10001", + "--listen-addr=/ip4/0.0.0.0/tcp/10002/ws", + "-lruntime=debug,sub-libp2p::bitswap=trace,runtime::transaction-storage=trace,litep2p::ipfs::bitswap=debug", +] [[relaychain.nodes]] name = "bob" p2p_port = 12347 rpc_port = 12346 validator = true -args = ["-lruntime=debug,bitswap=trace,sub-libp2p::bitswap=trace,runtime::transaction-storage=trace,litep2p::ipfs::bitswap=debug"] +args = [ + "--ipfs-server", + # Increase tx pool from 20->40 MiB + # "--pool-kbytes 40960", + "--listen-addr=/ip4/0.0.0.0/tcp/12347", + "--listen-addr=/ip4/0.0.0.0/tcp/12348/ws", + "-lruntime=debug,bitswap=trace,sub-libp2p::bitswap=trace,runtime::transaction-storage=trace,litep2p::ipfs::bitswap=debug", +] diff --git a/zombienet/bulletin-westend-local.toml b/zombienet/bulletin-westend-local.toml index f9e7be45..644497e3 100644 --- a/zombienet/bulletin-westend-local.toml +++ b/zombienet/bulletin-westend-local.toml @@ -43,10 +43,12 @@ p2p_port = 10001 rpc_port = 10000 args = [ "--ipfs-server", - "-lparachain=debug,runtime=trace,xcm=trace,sub-libp2p::bitswap=trace,runtime::transaction-storage=trace", + # Increase tx pool from 20->64 MiB + # "--pool-kbytes 65536", + "-lparachain=info,runtime=debug,xcm=trace,sub-libp2p::bitswap=trace,runtime::transaction-storage=trace", # WebSocket P2P on p2p_port + 1 for smoldot light client support - "--listen-addr", - "/ip4/0.0.0.0/tcp/10002/ws", + "--listen-addr=/ip4/0.0.0.0/tcp/10001", + "--listen-addr=/ip4/0.0.0.0/tcp/10002/ws", ] [[parachains.collators]] @@ -57,8 +59,10 @@ p2p_port = 12347 rpc_port = 12346 args = [ "--ipfs-server", - "-lparachain=debug,runtime=trace,xcm=trace,bitswap=trace,sub-libp2p::bitswap=trace,runtime::transaction-storage=trace", + # Increase tx pool from 20->64 MiB + # "--pool-kbytes 65536", + "-lparachain=info,runtime=debug,xcm=trace,bitswap=trace,sub-libp2p::bitswap=trace,runtime::transaction-storage=trace", # WebSocket P2P on p2p_port + 1 for smoldot light client support - "--listen-addr", - "/ip4/0.0.0.0/tcp/12348/ws", + "--listen-addr=/ip4/0.0.0.0/tcp/12347", + "--listen-addr=/ip4/0.0.0.0/tcp/12348/ws", ]