diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml index 1a574eb1d8..f204f870a2 100644 --- a/.github/workflows/check-bittensor-e2e-tests.yml.yml +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -213,11 +213,34 @@ jobs: - name: Load Docker Image run: docker load -i subtensor-localnet.tar - - name: Run tests +# - name: Run tests +# working-directory: ${{ github.workspace }}/btcli +# run: | +# source ${{ github.workspace }}/venv/bin/activate +# uv run pytest ${{ matrix.test-file }} -s + + - name: Run with retry working-directory: ${{ github.workspace }}/btcli run: | source ${{ github.workspace }}/venv/bin/activate - uv run pytest ${{ matrix.test-file }} -s + set +e + for i in 1 2; do + echo "🔁 Attempt $i: Running tests" + uv run pytest ${{ matrix.test-file }} -s + status=$? + if [ $status -eq 0 ]; then + echo "✅ Tests passed on attempt $i" + break + else + echo "❌ Tests failed on attempt $i" + if [ $i -eq 2 ]; then + echo "🔥 Tests failed after 2 attempts" + exit 1 + fi + echo "🕒 Retrying..." + sleep 5 + fi + done # main sdk job run-sdk-e2e-tests: @@ -285,8 +308,31 @@ jobs: - name: Load Docker Image run: docker load -i subtensor-localnet.tar - - name: Run tests +# - name: Run tests +# working-directory: ${{ github.workspace }}/bittensor +# run: | +# source ${{ github.workspace }}/venv/bin/activate +# uv run pytest ${{ matrix.test-file }} -s + + - name: Run with retry working-directory: ${{ github.workspace }}/bittensor run: | source ${{ github.workspace }}/venv/bin/activate - uv run pytest ${{ matrix.test-file }} -s \ No newline at end of file + set +e + for i in 1 2; do + echo "🔁 Attempt $i: Running tests" + uv run pytest ${{ matrix.test-file }} -s + status=$? + if [ $status -eq 0 ]; then + echo "✅ Tests passed on attempt $i" + break + else + echo "❌ Tests failed on attempt $i" + if [ $i -eq 2 ]; then + echo "🔥 Tests failed after 2 attempts" + exit 1 + fi + echo "🕒 Retrying..." + sleep 5 + fi + done \ No newline at end of file diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index c2afccae66..01de6eece8 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -57,6 +57,11 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} + - name: Patch non-fast-block node + run: | + chmod +x ./scripts/localnet_patch.sh + ./scripts/localnet_patch.sh + - name: Build and push Docker image uses: docker/build-push-action@v6 with: diff --git a/.github/workflows/evm-tests.yml b/.github/workflows/evm-tests.yml new file mode 100644 index 0000000000..355e2b873f --- /dev/null +++ b/.github/workflows/evm-tests.yml @@ -0,0 +1,44 @@ +name: EVM E2E Tests + +on: + pull_request: + + ## Allow running workflow manually from the Actions tab + workflow_dispatch: + inputs: + verbose: + description: "Output more information when triggered manually" + required: false + default: "" + +env: + CARGO_TERM_COLOR: always + VERBOSE: ${{ github.events.input.verbose }} + +jobs: + run: + runs-on: SubtensorCI + env: + RUST_BACKTRACE: full + steps: + - name: Check-out repository under $GITHUB_WORKSPACE + uses: actions/checkout@v4 + + - name: Utilize Shared Rust Cache + uses: Swatinem/rust-cache@v2 + + - name: Set up Node.js + uses: actions/setup-node@v2 + with: + node-version: "22" + + - name: Install dependencies + run: | + sudo apt-get update && + sudo apt-get install -y clang curl libssl-dev llvm libudev-dev protobuf-compiler nodejs + + - name: Run tests + working-directory: ${{ github.workspace }} + run: | + npm install --global yarn + ./evm-tests/run-ci.sh diff --git a/.github/workflows/label-triggers.yml b/.github/workflows/label-triggers.yml index f3c330f85c..bcf43e4c23 100644 --- a/.github/workflows/label-triggers.yml +++ b/.github/workflows/label-triggers.yml @@ -25,4 +25,4 @@ jobs: owner: context.repo.owner, repo: context.repo.repo, body: '@opentensor/cerebrum / @opentensor/gyrus / @opentensor/cortex breaking change detected! Please prepare accordingly!' - }) + }) \ No newline at end of file diff --git a/.github/workflows/run-benchmarks.yml b/.github/workflows/run-benchmarks.yml new file mode 100644 index 0000000000..6040485eca --- /dev/null +++ b/.github/workflows/run-benchmarks.yml @@ -0,0 +1,118 @@ +# .github/workflows/benchmarks.yml +name: Validate-Benchmarks + +on: + pull_request: + types: + - opened + - synchronize + workflow_dispatch: + +concurrency: + group: run-benchmarks-${{ github.ref }} + cancel-in-progress: true + +jobs: + validate-benchmarks: + runs-on: Benchmarking + + env: + SKIP_BENCHMARKS: '0' + + steps: + - name: Check out PR branch + if: ${{ env.SKIP_BENCHMARKS != '1' }} + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + fetch-depth: 0 + + - name: Install GitHub CLI + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + sudo apt-get update + sudo apt-get install -y gh + echo "${{ secrets.GITHUB_TOKEN }}" | gh auth login --with-token + + - name: Check skip label + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label found — skipping benchmarks." + echo "SKIP_BENCHMARKS=1" >> "$GITHUB_ENV" + fi + + - name: Install system dependencies + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + sudo apt-get update + sudo apt-get install -y clang curl libssl-dev llvm libudev-dev protobuf-compiler + + - name: Check skip label + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label found — skipping benchmarks." + echo "SKIP_BENCHMARKS=1" >> "$GITHUB_ENV" + fi + + - name: Install Rust toolchain + if: ${{ env.SKIP_BENCHMARKS != '1' }} + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + + - name: Check skip label + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label found — skipping benchmarks." + echo "SKIP_BENCHMARKS=1" >> "$GITHUB_ENV" + fi + + - name: Cache Rust build + if: ${{ env.SKIP_BENCHMARKS != '1' }} + uses: Swatinem/rust-cache@v2 + with: + key: bench-${{ hashFiles('**/Cargo.lock') }} + + - name: Check skip label + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label found — skipping benchmarks." + echo "SKIP_BENCHMARKS=1" >> "$GITHUB_ENV" + fi + + - name: Build node with benchmarks + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + cargo build --profile production -p node-subtensor --features runtime-benchmarks + + - name: Check skip label + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label found — skipping benchmarks." + echo "SKIP_BENCHMARKS=1" >> "$GITHUB_ENV" + fi + + - name: Run & validate benchmarks + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + chmod +x scripts/benchmark_action.sh + ./scripts/benchmark_action.sh + + - name: Check skip label after run + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label was found — but benchmarks already ran." + fi diff --git a/.github/workflows/rustdocs.yml b/.github/workflows/rustdocs.yml new file mode 100644 index 0000000000..a58a3a8c2a --- /dev/null +++ b/.github/workflows/rustdocs.yml @@ -0,0 +1,60 @@ +name: Publish rustdocs + +on: + push: + branches: + - main + workflow_dispatch: + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + runs-on: SubtensorCI + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Install rustup + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + + - name: Generate documentation + uses: actions-rs/cargo@v1 + with: + command: doc + args: --document-private-items + + - name: Fix file permissions + shell: sh + run: | + chmod -c -R +rX "target/doc" | + while read line; do + echo "::warning title=Invalid file permissions automatically fixed::$line" + done + - name: Generate index.html file + run: | + echo "" > target/doc/index.html + - name: Upload documentation + uses: actions/upload-pages-artifact@v1 + with: + path: ./target/doc + + deploy: + needs: build + runs-on: SubtensorCI + + permissions: + pages: write + id-token: write + environment: + name: github-pages + url: ${{ steps.pages.outputs.page_url }} + + steps: + - name: Deploy documentation + id: pages + uses: actions/deploy-pages@v2 diff --git a/Cargo.lock b/Cargo.lock index ce6924d7b4..b0c56ffb2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2364,7 +2364,7 @@ checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fc-api" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "async-trait", "fp-storage", @@ -2373,10 +2373,26 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "fc-aura" +version = "1.0.0-dev" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" +dependencies = [ + "fc-rpc", + "fp-storage", + "sc-client-api", + "sc-consensus-aura", + "sp-api", + "sp-consensus-aura", + "sp-inherents", + "sp-runtime", + "sp-timestamp", +] + [[package]] name = "fc-consensus" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "async-trait", "fp-consensus", @@ -2392,7 +2408,7 @@ dependencies = [ [[package]] name = "fc-db" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "async-trait", "ethereum", @@ -2422,7 +2438,7 @@ dependencies = [ [[package]] name = "fc-mapping-sync" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "fc-db", "fc-storage", @@ -2445,7 +2461,7 @@ dependencies = [ [[package]] name = "fc-rpc" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -2468,7 +2484,6 @@ dependencies = [ "rand", "rlp", "sc-client-api", - "sc-consensus-aura", "sc-network", "sc-network-sync", "sc-rpc", @@ -2482,7 +2497,6 @@ dependencies = [ "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-aura", "sp-core", "sp-externalities 0.29.0", "sp-inherents", @@ -2490,7 +2504,6 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-storage 21.0.0", - "sp-timestamp", "substrate-prometheus-endpoint", "thiserror", "tokio", @@ -2499,7 +2512,7 @@ dependencies = [ [[package]] name = "fc-rpc-core" version = "1.1.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -2508,13 +2521,13 @@ dependencies = [ "rustc-hex", "serde", "serde_json", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", ] [[package]] name = "fc-storage" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -2670,7 +2683,7 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "fork-tree" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", ] @@ -2697,7 +2710,7 @@ dependencies = [ [[package]] name = "fp-account" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "hex", "impl-serde", @@ -2716,7 +2729,7 @@ dependencies = [ [[package]] name = "fp-consensus" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "parity-scale-codec", @@ -2727,7 +2740,7 @@ dependencies = [ [[package]] name = "fp-ethereum" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -2739,7 +2752,7 @@ dependencies = [ [[package]] name = "fp-evm" version = "3.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "evm", "frame-support", @@ -2754,7 +2767,7 @@ dependencies = [ [[package]] name = "fp-rpc" version = "3.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -2770,7 +2783,7 @@ dependencies = [ [[package]] name = "fp-self-contained" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "frame-support", "parity-scale-codec", @@ -2782,7 +2795,7 @@ dependencies = [ [[package]] name = "fp-storage" version = "2.0.0" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "parity-scale-codec", "serde", @@ -2797,7 +2810,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-support-procedural", @@ -2821,7 +2834,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "43.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "Inflector", "array-bytes", @@ -2871,7 +2884,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "aquamarine", "frame-support", @@ -2901,7 +2914,7 @@ dependencies = [ [[package]] name = "frame-metadata-hash-extension" version = "0.6.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "docify", @@ -2915,8 +2928,8 @@ dependencies = [ [[package]] name = "frame-support" -version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "38.2.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "aquamarine", "array-bytes", @@ -2939,7 +2952,7 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-crypto-hashing-proc-macro", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-genesis-builder", "sp-inherents", "sp-io", @@ -2947,7 +2960,7 @@ dependencies = [ "sp-runtime", "sp-staking", "sp-state-machine", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-tracing 17.0.1", "sp-weights", "static_assertions", @@ -2956,8 +2969,8 @@ dependencies = [ [[package]] name = "frame-support-procedural" -version = "30.0.3" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "30.0.6" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "Inflector", "cfg-expr", @@ -2970,7 +2983,7 @@ dependencies = [ "proc-macro-warning 1.0.2", "proc-macro2", "quote", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "syn 2.0.90", ] @@ -2990,7 +3003,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support-procedural-tools-derive 12.0.0", "proc-macro-crate 3.2.0", @@ -3013,7 +3026,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "12.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "proc-macro2", "quote", @@ -3023,7 +3036,7 @@ dependencies = [ [[package]] name = "frame-system" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "cfg-if", "docify", @@ -3035,7 +3048,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-version", "sp-weights", ] @@ -3043,7 +3056,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -3057,7 +3070,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "parity-scale-codec", @@ -3067,7 +3080,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "parity-scale-codec", @@ -4236,9 +4249,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.159" +version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "libloading" @@ -5526,6 +5539,7 @@ dependencies = [ "async-trait", "clap", "fc-api", + "fc-aura", "fc-consensus", "fc-db", "fc-mapping-sync", @@ -5628,6 +5642,7 @@ dependencies = [ "pallet-base-fee", "pallet-collective", "pallet-commitments", + "pallet-crowdloan", "pallet-drand", "pallet-ethereum", "pallet-evm", @@ -5668,7 +5683,7 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-storage 21.0.0", "sp-tracing 17.0.1", "sp-transaction-pool", @@ -6003,7 +6018,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-tracing 17.0.1", "sp-weights", "substrate-fixed", @@ -6013,7 +6028,7 @@ dependencies = [ [[package]] name = "pallet-aura" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6029,7 +6044,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6041,8 +6056,8 @@ dependencies = [ [[package]] name = "pallet-balances" -version = "39.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "39.0.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-benchmarking", @@ -6057,7 +6072,7 @@ dependencies = [ [[package]] name = "pallet-base-fee" version = "1.0.0" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "fp-evm", "frame-support", @@ -6081,7 +6096,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "subtensor-macros", ] @@ -6106,12 +6121,31 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "subtensor-macros", "tle", "w3f-bls", ] +[[package]] +name = "pallet-crowdloan" +version = "0.1.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-balances", + "pallet-preimage", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", + "subtensor-macros", +] + [[package]] name = "pallet-drand" version = "0.0.1" @@ -6148,7 +6182,7 @@ dependencies = [ [[package]] name = "pallet-ethereum" version = "4.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -6170,7 +6204,7 @@ dependencies = [ [[package]] name = "pallet-evm" version = "6.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "environmental", "evm", @@ -6193,7 +6227,7 @@ dependencies = [ [[package]] name = "pallet-evm-chain-id" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "frame-support", "frame-system", @@ -6204,7 +6238,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-modexp" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "fp-evm", "num", @@ -6213,7 +6247,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-sha3fips" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "fp-evm", "tiny-keccak", @@ -6222,7 +6256,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-simple" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "fp-evm", "ripemd", @@ -6232,7 +6266,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6254,7 +6288,7 @@ dependencies = [ [[package]] name = "pallet-hotfix-sufficients" version = "1.0.0" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6269,7 +6303,7 @@ dependencies = [ [[package]] name = "pallet-insecure-randomness-collective-flip" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6282,7 +6316,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6298,7 +6332,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6313,7 +6347,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6346,7 +6380,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6370,14 +6404,14 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "subtensor-macros", ] [[package]] name = "pallet-root-testing" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6391,15 +6425,15 @@ dependencies = [ [[package]] name = "pallet-safe-mode" version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", - "pallet-proxy 38.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", - "pallet-utility 38.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "pallet-proxy 38.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", + "pallet-utility 38.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "parity-scale-codec", "scale-info", "sp-arithmetic", @@ -6409,7 +6443,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "39.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-benchmarking", @@ -6426,7 +6460,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6484,7 +6518,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-tracing 17.0.1", "sp-version", "substrate-fixed", @@ -6496,7 +6530,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-benchmarking", @@ -6511,7 +6545,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-benchmarking", @@ -6529,8 +6563,8 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" -version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "38.0.2" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6545,7 +6579,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "41.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6561,7 +6595,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6592,7 +6626,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -7056,7 +7090,7 @@ dependencies = [ [[package]] name = "precompile-utils" version = "0.1.0" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "environmental", "evm", @@ -7080,14 +7114,14 @@ dependencies = [ [[package]] name = "precompile-utils-macro" version = "0.1.0" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "case", "num_enum", "prettyplease 0.2.22", "proc-macro2", "quote", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "syn 1.0.109", ] @@ -8072,7 +8106,7 @@ name = "safe-math" version = "0.1.0" dependencies = [ "num-traits", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "substrate-fixed", ] @@ -8106,7 +8140,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "log", "sp-core", @@ -8117,7 +8151,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "futures", "futures-timer", @@ -8139,7 +8173,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.42.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "sp-api", @@ -8154,7 +8188,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "docify", @@ -8170,7 +8204,7 @@ dependencies = [ "serde_json", "sp-blockchain", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-genesis-builder", "sp-io", "sp-runtime", @@ -8181,7 +8215,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "12.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", @@ -8192,7 +8226,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.47.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "chrono", @@ -8233,7 +8267,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "fnv", "futures", @@ -8259,8 +8293,8 @@ dependencies = [ [[package]] name = "sc-client-db" -version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "0.44.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "hash-db", "kvdb", @@ -8286,7 +8320,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -8310,7 +8344,7 @@ dependencies = [ [[package]] name = "sc-consensus-aura" version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -8339,7 +8373,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "fork-tree", @@ -8364,7 +8398,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-slots", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-inherents", "sp-keystore", "sp-runtime", @@ -8375,7 +8409,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "fork-tree", "parity-scale-codec", @@ -8388,7 +8422,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.30.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "ahash 0.8.11", "array-bytes", @@ -8422,7 +8456,7 @@ dependencies = [ "sp-consensus", "sp-consensus-grandpa", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-keystore", "sp-runtime", "substrate-prometheus-endpoint", @@ -8432,7 +8466,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.30.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "finality-grandpa", "futures", @@ -8452,7 +8486,7 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" version = "0.46.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "assert_matches", "async-trait", @@ -8487,7 +8521,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -8510,7 +8544,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.40.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", @@ -8533,7 +8567,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.35.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "polkavm", "sc-allocator", @@ -8546,7 +8580,7 @@ dependencies = [ [[package]] name = "sc-executor-polkavm" version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "log", "polkavm", @@ -8557,7 +8591,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.35.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "anyhow", "cfg-if", @@ -8575,7 +8609,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "console", "futures", @@ -8592,7 +8626,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "33.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "parking_lot 0.12.3", @@ -8606,7 +8640,7 @@ dependencies = [ [[package]] name = "sc-mixnet" version = "0.15.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "arrayvec", @@ -8634,8 +8668,8 @@ dependencies = [ [[package]] name = "sc-network" -version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "0.45.6" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "async-channel", @@ -8686,7 +8720,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -8704,7 +8738,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "ahash 0.8.11", "futures", @@ -8722,8 +8756,8 @@ dependencies = [ [[package]] name = "sc-network-light" -version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "0.44.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "async-channel", @@ -8743,8 +8777,8 @@ dependencies = [ [[package]] name = "sc-network-sync" -version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "0.44.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "async-channel", @@ -8780,8 +8814,8 @@ dependencies = [ [[package]] name = "sc-network-transactions" -version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "0.44.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "futures", @@ -8800,7 +8834,7 @@ dependencies = [ [[package]] name = "sc-network-types" version = "0.12.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "bs58 0.5.1", "ed25519-dalek", @@ -8817,7 +8851,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "40.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "bytes", @@ -8851,7 +8885,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.18.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -8860,7 +8894,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "40.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "futures", "jsonrpsee", @@ -8892,7 +8926,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -8911,8 +8945,8 @@ dependencies = [ [[package]] name = "sc-rpc-server" -version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "17.1.2" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "dyn-clone", "forwarded-header-value", @@ -8936,7 +8970,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "futures", @@ -8968,7 +9002,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.46.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "directories", @@ -9032,7 +9066,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.36.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "log", "parity-scale-codec", @@ -9043,7 +9077,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "derive_more", "futures", @@ -9056,15 +9090,15 @@ dependencies = [ "serde", "serde_json", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-io", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", ] [[package]] name = "sc-telemetry" version = "25.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "chrono", "futures", @@ -9084,7 +9118,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "37.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "chrono", "console", @@ -9113,7 +9147,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", @@ -9124,7 +9158,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -9140,7 +9174,7 @@ dependencies = [ "sp-api", "sp-blockchain", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-runtime", "sp-tracing 17.0.1", "sp-transaction-pool", @@ -9151,7 +9185,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -9167,7 +9201,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-channel", "futures", @@ -9613,7 +9647,7 @@ name = "share-pool" version = "0.1.0" dependencies = [ "safe-math", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "substrate-fixed", ] @@ -9759,7 +9793,7 @@ dependencies = [ [[package]] name = "sp-api" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "hash-db", @@ -9781,7 +9815,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "Inflector", "blake2 0.10.6", @@ -9795,7 +9829,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "scale-info", @@ -9807,7 +9841,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "integer-sqrt", @@ -9830,7 +9864,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "sp-api", "sp-inherents", @@ -9840,7 +9874,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "37.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "futures", "parity-scale-codec", @@ -9859,7 +9893,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.40.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -9874,7 +9908,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.40.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "parity-scale-codec", @@ -9890,7 +9924,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.40.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "parity-scale-codec", @@ -9908,7 +9942,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "21.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "finality-grandpa", "log", @@ -9925,7 +9959,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.40.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "scale-info", @@ -9936,7 +9970,7 @@ dependencies = [ [[package]] name = "sp-core" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "bitflags 1.3.2", @@ -9965,11 +9999,11 @@ dependencies = [ "secp256k1", "secrecy", "serde", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-externalities 0.29.0", "sp-runtime-interface 28.0.0", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-storage 21.0.0", "ss58-registry", "substrate-bip39", @@ -10002,7 +10036,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.14.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -10036,7 +10070,7 @@ dependencies = [ [[package]] name = "sp-crypto-hashing" version = "0.1.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "blake2b_simd", "byteorder", @@ -10049,17 +10083,17 @@ dependencies = [ [[package]] name = "sp-crypto-hashing-proc-macro" version = "0.1.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "quote", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "syn 2.0.90", ] [[package]] name = "sp-database" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "kvdb", "parking_lot 0.12.3", @@ -10068,7 +10102,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "proc-macro2", "quote", @@ -10098,7 +10132,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.29.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "environmental", "parity-scale-codec", @@ -10108,7 +10142,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.15.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "scale-info", @@ -10120,7 +10154,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -10132,8 +10166,8 @@ dependencies = [ [[package]] name = "sp-io" -version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "38.0.2" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "bytes", "docify", @@ -10145,7 +10179,7 @@ dependencies = [ "rustversion", "secp256k1", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-externalities 0.29.0", "sp-keystore", "sp-runtime-interface 28.0.0", @@ -10159,7 +10193,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "39.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "sp-core", "sp-runtime", @@ -10169,7 +10203,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.40.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", @@ -10180,7 +10214,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "thiserror", "zstd 0.12.4", @@ -10189,7 +10223,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.7.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -10199,7 +10233,7 @@ dependencies = [ [[package]] name = "sp-mixnet" version = "0.12.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "scale-info", @@ -10210,7 +10244,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "sp-api", "sp-core", @@ -10220,7 +10254,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "backtrace", "lazy_static", @@ -10230,7 +10264,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "32.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "rustc-hash 1.1.0", "serde", @@ -10239,8 +10273,8 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "39.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "39.0.5" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "either", @@ -10258,7 +10292,7 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-io", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-weights", "tracing", ] @@ -10285,7 +10319,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -10294,7 +10328,7 @@ dependencies = [ "primitive-types", "sp-externalities 0.29.0", "sp-runtime-interface-proc-macro 18.0.0", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-storage 21.0.0", "sp-tracing 17.0.1", "sp-wasm-interface 21.0.1", @@ -10317,7 +10351,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "18.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "Inflector", "expander", @@ -10330,7 +10364,7 @@ dependencies = [ [[package]] name = "sp-session" version = "36.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "scale-info", @@ -10344,7 +10378,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "36.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -10357,7 +10391,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.43.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "hash-db", "log", @@ -10377,7 +10411,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "18.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "aes-gcm", "curve25519-dalek", @@ -10390,7 +10424,7 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-externalities 0.29.0", "sp-runtime", "sp-runtime-interface 28.0.0", @@ -10401,7 +10435,7 @@ dependencies = [ [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" [[package]] name = "sp-std" @@ -10423,19 +10457,19 @@ dependencies = [ [[package]] name = "sp-storage" version = "21.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "impl-serde", "parity-scale-codec", "ref-cast", "serde", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", ] [[package]] name = "sp-timestamp" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "parity-scale-codec", @@ -10458,7 +10492,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "17.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "tracing", @@ -10469,7 +10503,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "sp-api", "sp-runtime", @@ -10478,7 +10512,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "parity-scale-codec", @@ -10492,7 +10526,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "ahash 0.8.11", "hash-db", @@ -10515,7 +10549,7 @@ dependencies = [ [[package]] name = "sp-version" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10524,7 +10558,7 @@ dependencies = [ "serde", "sp-crypto-hashing-proc-macro", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-version-proc-macro", "thiserror", ] @@ -10532,7 +10566,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -10554,7 +10588,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "21.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -10566,7 +10600,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "31.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "bounded-collections", "parity-scale-codec", @@ -10574,7 +10608,7 @@ dependencies = [ "serde", "smallvec", "sp-arithmetic", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", ] [[package]] @@ -10754,8 +10788,8 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "staging-xcm" -version = "14.2.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "14.2.2" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "bounded-collections", @@ -10875,7 +10909,7 @@ dependencies = [ [[package]] name = "substrate-bip39" version = "0.6.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "hmac 0.12.1", "pbkdf2", @@ -10887,7 +10921,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" [[package]] name = "substrate-fixed" @@ -10903,7 +10937,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "39.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-system-rpc-runtime-api", @@ -10923,7 +10957,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.17.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "http-body-util", "hyper 1.5.0", @@ -10936,8 +10970,8 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" -version = "24.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "24.0.2" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "build-helper", @@ -11044,7 +11078,7 @@ dependencies = [ "precompile-utils", "sp-core", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "subtensor-runtime-common", ] @@ -11337,9 +11371,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.40.0" +version = "1.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" dependencies = [ "backtrace", "bytes", @@ -11355,9 +11389,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", @@ -12688,7 +12722,7 @@ dependencies = [ [[package]] name = "xcm-procedural" version = "10.1.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "Inflector", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index b1456c6eff..548bc5af63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,7 @@ pallet-admin-utils = { default-features = false, path = "pallets/admin-utils" } pallet-collective = { default-features = false, path = "pallets/collective" } pallet-commitments = { default-features = false, path = "pallets/commitments" } pallet-registry = { default-features = false, path = "pallets/registry" } +pallet-crowdloan = { default-features = false, path = "pallets/crowdloan" } pallet-subtensor = { default-features = false, path = "pallets/subtensor" } subtensor-custom-rpc = { default-features = false, path = "pallets/subtensor/rpc" } subtensor-custom-rpc-runtime-api = { default-features = false, path = "pallets/subtensor/runtime-api" } @@ -99,127 +100,128 @@ approx = "0.5" subtensor-macros = { path = "support/macros" } -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -frame-executive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-metadata-hash-extension = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +frame-executive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-metadata-hash-extension = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-support = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } -pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } pallet-proxy = { path = "pallets/proxy", default-features = false } -pallet-safe-mode = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-safe-mode = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } pallet-utility = { path = "pallets/utility", default-features = false } -pallet-root-testing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-root-testing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } -sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-consensus-grandpa-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-chain-spec-derive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-consensus-slots = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-executor = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-network = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-service = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-consensus-grandpa-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-chain-spec-derive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-consensus-slots = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-executor = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-network = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-service = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-genesis-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-session = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-storage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sp-tracing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-version = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-genesis-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-core = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-io = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-session = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-std = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-storage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sp-tracing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-version = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } -substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } substrate-fixed = { git = "https://github.com/opentensor/substrate-fixed.git", tag = "v0.5.9" } -substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } -sc-consensus-manual-seal = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sc-network-sync = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sc-consensus-manual-seal = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sc-network-sync = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } # Frontier -fp-evm = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fp-rpc = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fp-self-contained = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false, features = [ +fp-evm = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fp-rpc = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fp-self-contained = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false, features = [ "serde", ] } -fp-account = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-storage = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-db = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-consensus = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fp-consensus = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fp-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-api = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-rpc = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false, features = [ +fp-account = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-storage = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-db = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-consensus = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fp-consensus = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fp-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-api = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-rpc = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false, features = [ "rpc-binary-search-estimate", ] } -fc-rpc-core = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-mapping-sync = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -precompile-utils = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } +fc-rpc-core = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-aura = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-mapping-sync = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +precompile-utils = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } # Frontier FRAME -pallet-base-fee = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-ethereum = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-evm = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-evm-chain-id = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-evm-precompile-modexp = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-evm-precompile-sha3fips = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-evm-precompile-simple = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-hotfix-sufficients = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } +pallet-base-fee = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-ethereum = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-evm = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-evm-chain-id = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-evm-precompile-modexp = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-evm-precompile-sha3fips = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-evm-precompile-simple = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-hotfix-sufficients = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } #DRAND pallet-drand = { path = "pallets/drand", default-features = false } -sp-crypto-ec-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", features = [ +sp-crypto-ec-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", features = [ "bls12-381", ] } getrandom = { version = "0.2.15", features = [ "custom", ], default-features = false } -sp-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } w3f-bls = { version = "=0.1.3", default-features = false } ark-crypto-primitives = { version = "0.4.0", default-features = false, features = [ "r1cs", @@ -267,3 +269,4 @@ runtime-benchmarks = [ "node-subtensor-runtime/runtime-benchmarks", ] metadata-hash = ["node-subtensor-runtime/metadata-hash"] +pow-faucet = [] diff --git a/Dockerfile b/Dockerfile index 35e1e20b56..447ed98b5e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,14 @@ -ARG BASE_IMAGE=rust:1.83 -FROM $BASE_IMAGE AS base_builder +# ------------------------------------------------------------------------------ +# Subtensor Dockerfile (hardened) +# – Builds production and local binaries +# – Final runtime images run as non-root `subtensor` user (UID/GID 10001) +# ------------------------------------------------------------------------------ + +############################################################################### +# ---------- 1. Common build environment ------------------------------------- +############################################################################### +ARG BASE_IMAGE=rust:latest +FROM ${BASE_IMAGE} AS base_builder LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ ai.opentensor.image.vendor="Opentensor Foundation" \ @@ -7,56 +16,88 @@ LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ ai.opentensor.image.description="Opentensor Subtensor Blockchain" \ ai.opentensor.image.documentation="https://docs.bittensor.com" -RUN rustup update stable -RUN rustup target add wasm32-unknown-unknown --toolchain stable - +# Rust targets +RUN rustup update stable && \ + rustup target add wasm32-unknown-unknown --toolchain stable -# Set up Rust environment +# Build prerequisites ENV RUST_BACKTRACE=1 -RUN apt-get update && apt-get install -y curl build-essential protobuf-compiler clang git pkg-config libssl-dev -RUN rm -rf /var/lib/apt/lists/* +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + curl build-essential protobuf-compiler clang git pkg-config libssl-dev && \ + rm -rf /var/lib/apt/lists/* -# Copy entire repository +# Copy entire repository once for all build stages (maximises cache hits) COPY . /build WORKDIR /build -# -# Image for building prod -# +############################################################################### +# ---------- 2. Production build stage --------------------------------------- +############################################################################### FROM base_builder AS prod_builder -# Build the project -RUN cargo build -p node-subtensor --profile production --features="metadata-hash" --locked -# Verify the binary was produced -RUN test -e /build/target/production/node-subtensor -EXPOSE 30333 9933 9944 -# -# Final prod image -# -FROM $BASE_IMAGE AS subtensor -# Copy all chainspec files -COPY --from=prod_builder /build/*.json / -# Copy final binary -COPY --from=prod_builder /build/target/production/node-subtensor /usr/local/bin +# Build the production binary (profile defined in Cargo.toml) +RUN cargo build -p node-subtensor --profile production --features "metadata-hash" --locked \ + && test -e /build/target/production/node-subtensor # sanity-check +############################################################################### +# ---------- 3. Final production image (hardened) ---------------------------- +############################################################################### +FROM ${BASE_IMAGE} AS subtensor + +# ---- security hardening: create least-privilege user ---- +RUN addgroup --system --gid 10001 subtensor && \ + adduser --system --uid 10001 --gid 10001 --home /home/subtensor --disabled-password subtensor + +# Writable data directory to be used as --base-path +RUN mkdir -p /data && chown -R subtensor:subtensor /data + +# Workdir for the non-root user +WORKDIR /home/subtensor + +# Copy chainspecs and binary with correct ownership +COPY --chown=subtensor:subtensor --from=prod_builder /build/*.json ./ +COPY --chown=subtensor:subtensor --from=prod_builder /build/chainspecs/*.json ./chainspecs/ +COPY --from=prod_builder /build/target/production/node-subtensor /usr/local/bin/ +RUN chown subtensor:subtensor /usr/local/bin/node-subtensor -# -# Image for building local -# -FROM base_builder AS local_builder -# Build the project -RUN cargo build --workspace --profile release --features="pow-faucet" -# Verify the binary was produced -RUN test -e /build/target/release/node-subtensor EXPOSE 30333 9933 9944 +USER subtensor +ENTRYPOINT ["node-subtensor"] +CMD ["--base-path","/data"] +############################################################################### +# ---------- 4. Local build stage -------------------------------------------- +############################################################################### +FROM base_builder AS local_builder + +# Build the workspace in release mode with the pow-faucet feature +RUN cargo build --workspace --profile release --features "pow-faucet" \ + && test -e /build/target/release/node-subtensor # sanity-check + +############################################################################### +# ---------- 5. Final local image (hardened) ---------------------------------- +############################################################################### +FROM ${BASE_IMAGE} AS subtensor-local + +# Least-privilege user +RUN addgroup --system --gid 10001 subtensor && \ + adduser --system --uid 10001 --gid 10001 --home /home/subtensor --disabled-password subtensor -# -# Final local image -# -FROM $BASE_IMAGE AS subtensor-local -# Copy all chainspec files -COPY --from=local_builder /build/*.json / -# Copy final binary -COPY --from=local_builder /build/target/release/node-subtensor /usr/local/bin -RUN "node-subtensor" build-spec --disable-default-bootnode --raw --chain local > /localnet.json +RUN mkdir -p /data && chown -R subtensor:subtensor /data +WORKDIR /home/subtensor + +# Copy artifacts +COPY --chown=subtensor:subtensor --from=local_builder /build/*.json ./ +COPY --chown=subtensor:subtensor --from=local_builder /build/chainspecs/*.json ./chainspecs/ +COPY --from=local_builder /build/target/release/node-subtensor /usr/local/bin/ +RUN chown subtensor:subtensor /usr/local/bin/node-subtensor + +# Generate a local chainspec for convenience (run as root before user switch) +RUN node-subtensor build-spec --disable-default-bootnode --raw --chain local > /localnet.json \ + && chown subtensor:subtensor /localnet.json + +EXPOSE 30333 9933 9944 +USER subtensor +ENTRYPOINT ["node-subtensor"] +CMD ["--base-path","/data","--chain","/localnet.json"] diff --git a/README.md b/README.md index 4d1cdf645a..c5c53b5d65 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,7 @@ ``` # **Subtensor** +[![CodeQL](https://github.com/opentensor/subtensor/actions/workflows/github-code-scanning/codeql/badge.svg)](https://github.com/opentensor/subtensor/actions) [![Discord Chat](https://img.shields.io/discord/308323056592486420.svg)](https://discord.gg/bittensor) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) @@ -272,7 +273,7 @@ blocks and executing the state changes they define. The Substrate project in thi [FRAME](https://docs.substrate.io/main-docs/fundamentals/runtime-intro/#frame) to construct a blockchain runtime. FRAME allows runtime developers to declare domain-specific logic in modules called "pallets". At the heart of FRAME is a helpful -[macro language](https://docs.substrate.io/reference/frame-macros/) that makes it easy to +[macro language](https://docs.polkadot.com/develop/parachains/customize-parachain/overview/#pallet-structure) that makes it easy to create pallets and flexibly compose them to create blockchains that can address [a variety of needs](https://substrate.io/ecosystem/projects/). diff --git a/docs/consensus.md b/docs/consensus.md index 881b465b48..c3a04c380f 100644 --- a/docs/consensus.md +++ b/docs/consensus.md @@ -17,6 +17,8 @@ Community oversight (as in Steemit) must identify wrongful downvoting, but only High-volume, on-demand generative content (as in Bittensor) demands automated evaluation and divide-and-conquer validation, but introduces subjectivity both in the automated value measures and mutually exclusive task subsets across subnet validators. A coalition of validators can collude to skew scoring of subnet servers in their favour, which is harder to detect because of the inherent subjectivity. Existing consensus mechanisms will fail to deter reward manipulation for such high-volume subjective utility networks, so the need for a more sophisticated consensus arises. +--- + ### Consensus Mechanism Yuma Consensus guarantees long-term network honesty despite persistent adversarial presence in high-volume subjective utility networks. It directly penalizes selfish scoring by down-correction to the majority consensus and slashing of cabal voting stake, and also penalizes low-scoring of honest servers via forfeited validator rewards when cabals don’t score at consensus. @@ -31,6 +33,8 @@ Yuma Consensus is adversarially-resilient when majority stake is honest, via sta **Cabal sets high self-weight**: Cabal servers with poor utility will receive low weights from majority stake, and high self-weight from minority cabals will then get reduced to the low consensus. This means that minority cabals lose voting power as penalty for unfair voting while still receiving low consensus weight despite high self-weight. This consensus mechanism thus protects against selfish weighting if the majority stake is honest. +--- + ### Game-theoretic framework #### Preliminaries @@ -112,6 +116,64 @@ let mut ema_bonds: Vec> = mat_ema( &bonds_delta, &bonds, alpha ); / let mut dividends: Vec = inplace_normalize(matmul_transpose( &ema_bonds, &incentive )); // Validator reward ``` +--- + +### Monte Carlo simulations + +We consider a two-team game between (protagonist) honest stake ($0.5< S_H\le 1$) and (adversarial) cabal stake ($1 - S_H$), with $|H|$ honest and $|C|$ cabal players, that have $S_H = \sum_{i\in H}S_i$ honest stake and $1-S_H = \sum_{i\in C}S_i$ cabal stake. + +#### Network sizing + +A network size of $N=|H|+|C|=(|H_V|+|H_S|)+(|C_V|+|C_S|)=512$ and validator count of $|H_V|+|C_V|=64$ is considered for consensus guarantee experiments, and the honest/cabal ratio $|H|/N=S_H$ reflects the honest stake ratio $S_H$, but modifying extremes to ensure that each subset has at least one validator and at least one server. + +#### Stake sampling + +For the Monte Carlo simulations we use Gaussian distributions for stake and weight assignments, and ensure that the honest/cabal ratios are met. Note that stake is only assigned to validator nodes $H_V$ and $C_V$ and not servers. + +Firstly, we sample initial validator ($i\in H_V\cup C_V$) stake values $S'_i \sim \mathcal{N}(1,\sigma_S^{2})$ with a typical $\sigma_S=0.3$ standard deviation, followed by clamping to avoid negative stake: + +$$S'_i = \begin{cases} +x & \text{if } x \sim \mathcal{N}(1, \sigma_S^2), x \ge 0 \\ +0 & \text{if } x \sim \mathcal{N}(1, \sigma_S^2), x < 0 +\end{cases}$$ + +Then we normalize each honest/cabal subset and multiply by its stake proportion, which thus gives an overall normalized stake and the correct stake ratio for each subset: + +$$S_{i\in H_V} = S_H \cdot S'\_i \left/ \sum_{k\in H_V} S'\_k\right.\qquad\qquad S_{i\in C_V} = (1-S_H)\cdot S'\_i \left/ \sum_{k\in C_V}S'\_k\right.$$ + +#### Weight sampling + +Similarly, we randomize the weights that validators $H_V,C_V$ set on servers $H_S,C_S$. +Specifically, honest players $i\in H$ set $W_H = \sum_{j\in H}W_{ij}$ self-weight and $1-W_H = \sum_{j\in C}W_{ij}$ weight on cabal players, while cabal players $i\in C$ set $W_C = \sum_{j\in C}W_{ij}$ self-weight and $1-W_C = \sum_{j\in H}W_{ij}$ weight on honest players. + +We firstly sample initial weights $W'_{ij} \sim \mathcal{N}(1,\sigma_W^{2})$ with various standard deviations ranging in $0\ge\sigma_W\ge0.4$, but then clamping to avoid negative weights: + +$$W'_{ij} = \begin{cases} +x & \text{if } x \sim \mathcal{N}(1, \sigma_S^2), x \geq 0 \\ +0 & \text{if } x \sim \mathcal{N}(1, \sigma_S^2), x < 0 +\end{cases}$$ + +Weight setting between the two subsets forms quadrants $H_V\rightarrow H_S$, $H_V\rightarrow C_S$, $C_V\rightarrow H_S$, and $C_V\rightarrow C_S$, so we ensure those weight ratios are met by normalizing each weight subset and multiplying by the corresponding quadrant ratio: + +$$W_{i\in H_V, j\in H_S} = W_H\cdot W'\_{ij} \left/ \sum_{k\in H_S}W'\_{ik}\right.\qquad\qquad W_{i\in H_V, j\in C_S} = (1-W_H)\cdot W'\_{ij} \left/ \sum_{k\in C_S}W'\_{ik}\right.$$ + +$$W_{i\in C_V, j\in H_S} = (1-W_C)\cdot W'\_{ij} \left/ \sum_{k\in H_S}W'\_{ik}\right.\qquad\qquad W_{i\in C_V, j\in C_S} = W_C\cdot W'\_{ij} \left/ \sum_{k\in C_S}W'\_{ik}\right.$$ + +#### Emission calculation + +Given the simulation parameters of the network size, validator count, a defined major/honest stake $S_H$, a defined major/honest utility $W_H$, and a defined minor/cabal self-weight $W_C$, we have now instantiated the network with randomly sampled stake and weights and can proceed with an emission calculation. + +We calculate the consensus $\overline{W_j} = \arg \max_w \left( \sum_i S_i \cdot \left\lbrace W_{ij} \ge w \right\rbrace \ge \kappa \right)$ for each server $j$, and calculate consensus-clipped weights $\overline{W_{ij}} = \min( W_{ij}, \overline{W_j} )$. This then gives us the adjusted weights that offers a measure of protection against reward manipulation. + +To calculate emissions for this epoch, we firstly calculate server rank $R_j = \sum_i S_i \cdot \overline{W_{ij}}$ then incentive $I_j = R_j / \sum_k R_k$, as well as validator bonds $\Delta B_{ij} = S_i \cdot \widetilde{W_{ij}} \left/ \left( \sum_k S_k \cdot \widetilde{W_{kj}} \right) \right.$ and rewards $D_i = \sum_j B_{ij} \cdot I_j$. + +Then we add up server incentive and validator bonds over honest nodes to obtain honest emission $E_H = \xi \cdot D_{i\in H} + (1-\xi) \cdot I_{i\in H}$ with a typical validator reward ratio of $\xi=0.5$. +The objective is to prove major stake retention $S_H\ge E_H$ for a single epoch, which by extension proves retention over many epochs due to additive nature of EMA bonds, so we do not bother with validator EMA bonds in these experiments. + +The honest objective $S_H\le E_H$ at least retains scoring power $S_H$ over all action transitions in the game, otherwise when $E_H\le S_H$ honest emission will erode to 0 over time, despite a starting condition of $0.5\lt S_H$. + +--- + ### Consensus guarantees Yuma Consensus guarantees honest majority stake retention $S_H\le E_H$ even under worst-case adversarial attacks, given sufficiently large honest utility $W_H$. The specific honest stake and utility pairs that delineate the guarantees are complicated by natural variances inside large realistic networks. Therefore, we use extensive random sampling simulations (Monte Carlo studies) of large realistic networks and subject them to varying degrees of adversarial attacks, and calculate comprehensive consensus guarantees under representative conditions. @@ -124,9 +186,9 @@ The x-axis is major self-weight and the y-axis is minor self-weight, and each co Major/honest self-weight $W_H$ is the true honest utility, while minor/cabal self-weight $W_C$ is an arbitrary value a self-serving coalition may self-report.

- - - + + +

To understand how we construct these plots, let us first consider contour plot for a single major/honest stake setting $S_H=0.6$. Here each contour value is the honest emission $E_H$, and we highlight at (1) the specific contour $E_H=0.6$ that matches the honest stake. This means that any weight setting on contour $E_H=S_H=0.6$ will retain honest stake, while any setting to the right of it will grow honest stake. @@ -138,18 +200,20 @@ A compound plot then combines all the highlighted $S_H=E_H$ contours from indivi Retention graphs like these comprehensively capture consensus guarantees across all primary conditions, and we utilize these to analyze the effect of consensus hyperparameters. Subtensor integration tests run Monte Carlo simulations of large realistic networks under adversarial conditions, and constructs retention profiles to confirm consensus guarantees of the actual blockchain implementation. -Retention profiles are reproducible by running [`_map_consensus_guarantees`](../pallets/subtensor/tests/epoch.rs) (decorate with `#[test]`). +Retention profiles are reproducible by running test [`map_consensus_guarantees()`](../pallets/subtensor/src/tests/consensus.rs) and plotting with [`map_consensus.py`](../scripts/map_consensus.py). ```bash -RUST_BACKTRACE=1 SKIP_WASM_BUILD=1 cargo test -- _map_consensus_guarantees --exact --nocapture > consensus.txt +RUST_BACKTRACE=1 SKIP_WASM_BUILD=1 RUSTFLAGS="-C opt-level=3" cargo test --manifest-path=pallets/subtensor/Cargo.toml -- tests::consensus::map_consensus_guarantees --exact --nocapture > consensus.txt + +python scripts/map_consensus.py consensus.txt ``` #### Subjectivity variance Yuma Consensus corrects reward manipulation in subjective utility networks, but the extent of subjectivity influences the exact consensus guarantees. In particular, we expect lower subjectivity to offer improved guarantees since there is stronger consensus. However, for higher variance in assigned weights it is easier to hide reward manipulation, we then expect poorer guarantees.

- - - + + +

We assume normally distributed weights originating from a particular side, either honest or cabal, then we modify the weight deviation magnitude $\sigma(W)$ in terms of the mean weight $\mu(W)$. @@ -167,9 +231,9 @@ Increasing $\kappa$ demands greater honest stake, e.g. when $\kappa=0.6$ there i Hence $\kappa=0.5$ is typically the most sensible setting.

- - - + + +

#### Bonds penalty (β) @@ -179,9 +243,9 @@ Lower-stake validators may experience lower service priority, which can result i Full bonds penalty $\beta=1$ may not be desired, due to the presence of non-adversarial cases like these.

- - - + + +

We expect that greater bonds penalty will penalize out-of-consensus validators more, which means less emission going to cabals. Comprehensive simulation with $\beta = 0$, $0.5$, and $1$ respectively show 78%, 76%, and 73% honest utility requirement. This confirms the expectation, that greater bonds penalty means greater inflation going to the honest majority. @@ -191,10 +255,110 @@ Subnet servers need incentive to deliver high utility, and subnet validators nee We expect that more emission going to validators will improve security guarantees, since self-serving validation can then be economically disincentivized.

- - - + + +

We set validation reward ratio at $\xi=0$, $0.25$, and $0.5$ and respectively observe 82%, 78%, 73% honest utility requirement for 60% honest stake preservation. -This means that network security improves as the validation reward ratio is increased, although a significant server incentive ratio still needs to be maintained to ensure overall high utility. \ No newline at end of file +This means that network security improves as the validation reward ratio is increased, although a significant server incentive ratio still needs to be maintained to ensure overall high utility. + +--- + +### Reproduce Consensus Plots (Runpod) + +This guide demonstrates how to reproduce consensus retention profile plots on a minimal Runpod CPU instance. + +#### 1. Deploy Runpod Instance + +Navigate to https://www.runpod.io/console/deploy and select the following: + +* **Pod Type:** CPU Pod, CPU5 (5.7 GHz • DDR5 RAM • NVMe) or equivalent. +* **Instance Configuration:** Compute-Optimized ($0.07/hr, 2 vCPUs, 4GB RAM). + +**Important:** Edit the template and set "Container Disk (Temporary)" to 20GB. This ensures sufficient disk space for the process. + +Retrieve the connection details, including the SSH command and port, under "Connect" -> "SSH over exposed TCP". You can optionally enable Jupyter access (`8888:localhost:8888`) if desired. Connect to your instance via SSH: + +```bash +ssh -L 8888:localhost:8888 root@ -p -i ~/.ssh/id_ed25519 # Replace placeholders +``` + +#### 2. Set up the Environment + +1. **Start a `tmux` session for persistence:** + + ```bash + tmux + ``` + +2. **Update system packages and install prerequisites (Python, Rust, and dependencies):** + + ```bash + sudo apt-get update && sudo apt install -y build-essential clang curl git make libssl-dev llvm libudev-dev protobuf-compiler python3 python3-pip \ + && curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \ + && source ~/.cargo/env && rustup default stable && rustup update \ + && rustup target add wasm32-unknown-unknown \ + && rustup toolchain install nightly \ + && rustup target add --toolchain nightly wasm32-unknown-unknown + + ``` + +3. **Clone the Subtensor repository and checkout the relevant branch:** + + ```bash + git clone https://github.com/opentensor/subtensor.git + cd subtensor + git checkout main + + ``` + + +#### 3. Simulate Networks and Generate Data + +The Subtensor integration tests simulate large, realistic networks under adversarial conditions to generate retention profiles that validate the blockchain's consensus guarantees. Building takes about 10 minutes, and the actual test itself another 15 minutes approximately. + + +```bash +RUST_BACKTRACE=1 SKIP_WASM_BUILD=1 RUSTFLAGS="-C opt-level=3" cargo test --manifest-path=pallets/subtensor/Cargo.toml -- tests::consensus::map_consensus_guarantees --exact --nocapture > consensus.txt +``` +This command runs the `map_consensus_guarantees` test and saves the output to `consensus.txt`. Replace `` with a float e.g. 1.0 (100% bonds penalty). + +#### 4. Generate Contour Plots + +1. **Create a Python virtual environment and install necessary libraries:** + + ```bash + python3 -m venv .venv + source .venv/bin/activate + pip install numpy matplotlib jupyterlab + + ``` + +2. **Run the plotting script:** + + ```bash + python3 scripts/map_consensus.py consensus.txt + ``` + This generates an SVG file named `consensus_plot.svg` in the current directory. + + +#### 5. Explore and Modify (Optional) + +You can use Jupyter-lab to interactively explore and modify the generated plots: + +1. **Start Jupyter-lab (on VPS):** + ```bash + jupyter-lab --allow-root --port=8888 + ``` + +2. **Connect to Jupyter:** Open the provided URL (e.g., `http://localhost:8888/tree?token=...`) in your local workstation web browser. + +3. **Modify the plotting script:** Edit `scripts/map_consensus.py` to customize the plots, otherwise download the SVG file. + + +#### Disclaimer + +> This reproduction procedure is provided as a guide and may require adjustments depending on your specific VPS environment and configuration. While every effort has been made to ensure accuracy and completeness, variations in system setup, software versions, or network conditions could affect the results. +> +> Please exercise caution when executing commands with root privileges and ensure you understand the potential implications before proceeding. The author assumes no responsibility for any issues arising from the use of this procedure. If you encounter problems or have suggestions for improvement, please open an issue on this repository. diff --git a/docs/rust-setup.md b/docs/rust-setup.md index 346b424a08..fedff7b381 100644 --- a/docs/rust-setup.md +++ b/docs/rust-setup.md @@ -2,7 +2,7 @@ title: Installation --- This guide is for reference only, please check the latest information on getting starting with Substrate -[here](https://docs.substrate.io/main-docs/install/). +[here](https://docs.polkadot.com/main-docs/install/). This page will guide you through the **2 steps** needed to prepare a computer for **Substrate** development. Since Substrate is built with [the Rust programming language](https://www.rust-lang.org/), the first @@ -14,7 +14,7 @@ Unix-based operating systems. ## Build dependencies Substrate development is easiest on Unix-based operating systems like macOS or Linux. The examples -in the [Substrate Docs](https://docs.substrate.io) use Unix-style terminals to demonstrate how to +in the [Substrate Docs](https://docs.polkadot.com) use Unix-style terminals to demonstrate how to interact with Substrate from the command line. ### Ubuntu/Debian @@ -76,7 +76,7 @@ brew install openssl recommend to use [Windows Subsystem Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10) (WSL) and follow the instructions for [Ubuntu/Debian](#ubuntudebian). Please refer to the separate -[guide for native Windows development](https://docs.substrate.io/main-docs/install/windows/). +[guide for native Windows development](https://docs.polkadot.com/main-docs/install/windows/). ## Rust developer environment @@ -102,7 +102,7 @@ rustup target add wasm32-unknown-unknown --toolchain nightly ## Test your set-up Now the best way to ensure that you have successfully prepared a computer for Substrate -development is to follow the steps in [our first Substrate tutorial](https://docs.substrate.io/tutorials/v3/create-your-first-substrate-chain/). +development is to follow the steps in [our first Substrate tutorial](https://docs.polkadot.com/tutorials/v3/create-your-first-substrate-chain/). ## Troubleshooting Substrate builds diff --git a/evm-tests/README.md b/evm-tests/README.md index 7d01034bd8..83dc8f326f 100644 --- a/evm-tests/README.md +++ b/evm-tests/README.md @@ -1,15 +1,30 @@ # type-test -test with ts +The evm-tests folder includes all typescript code to test the basic EVM function +like token transfer, and all precompile contracts in Subtensor. It is +implemented in typescript, use both ethers and viem lib to interact with +contracts. The polkadot API is used to call extrinsic, get storage in Subtensor +. The developers can use it to verify the code change in precompile contracts. + +It is also included in the CI process, all test cases are executed for new +commit. CI flow can get catch any failed test cases. The polkadot API get the +latest metadata from the runtime, the case also can find out any incompatibility +between runtime and precompile contracts. ## polkadot api +To get the metadata, you need start the localnet via run +`./scripts/localnet.sh`. then run following command to get metadata, a folder +name .papi will be created, which include the metadata and type definitions. + ```bash -npx papi add devnet -w ws://10.0.0.11:9944 +npx papi add devnet -w ws://localhost:9944 ``` ## get the new metadata +If the runtime is upgrade, need to get the metadata again. + ```bash sh get-metadata.sh ``` diff --git a/evm-tests/local.test.ts b/evm-tests/local.test.ts deleted file mode 100644 index 9eb24d4327..0000000000 --- a/evm-tests/local.test.ts +++ /dev/null @@ -1,53 +0,0 @@ -import * as assert from "assert"; -import { getAliceSigner, getClient, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" -import { SUB_LOCAL_URL, } from "../src/config"; -import { devnet } from "@polkadot-api/descriptors" -import { PolkadotSigner, TypedApi } from "polkadot-api"; -import { convertPublicKeyToSs58, convertH160ToSS58 } from "../src/address-utils" -import { ethers } from "ethers" -import { INEURON_ADDRESS, INeuronABI } from "../src/contracts/neuron" -import { generateRandomEthersWallet } from "../src/utils" -import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister } from "../src/subtensor" - -describe("Test neuron precompile Serve Axon Prometheus", () => { - // init eth part - // const wallet1 = generateRandomEthersWallet(); - // const wallet2 = generateRandomEthersWallet(); - // const wallet3 = generateRandomEthersWallet(); - - // init substrate part - - // const coldkey = getRandomSubstrateKeypair(); - - let api: TypedApi - - // sudo account alice as signer - let alice: PolkadotSigner; - before(async () => { - // init variables got from await and async - const subClient = await getClient(SUB_LOCAL_URL) - api = await getDevnetApi() - // alice = await getAliceSigner(); - - // await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) - // await forceSetBalanceToEthAddress(api, wallet1.address) - // await forceSetBalanceToEthAddress(api, wallet2.address) - // await forceSetBalanceToEthAddress(api, wallet3.address) - - - let index = 0; - while (index < 30) { - const hotkey = getRandomSubstrateKeypair(); - const coldkey = getRandomSubstrateKeypair(); - await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) - await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) - let netuid = await addNewSubnetwork(api, hotkey, coldkey) - } - - - }) - - it("Serve Axon", async () => { - - }); -}); \ No newline at end of file diff --git a/evm-tests/package-lock.json b/evm-tests/package-lock.json new file mode 100644 index 0000000000..ce2766fb4e --- /dev/null +++ b/evm-tests/package-lock.json @@ -0,0 +1,5815 @@ +{ + "name": "evm-tests", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "license": "ISC", + "dependencies": { + "@polkadot-labs/hdkd": "^0.0.10", + "@polkadot-labs/hdkd-helpers": "^0.0.11", + "@polkadot/api": "15.1.1", + "@types/mocha": "^10.0.10", + "crypto": "^1.0.1", + "dotenv": "16.4.7", + "ethers": "^6.13.5", + "mocha": "^11.1.0", + "polkadot-api": "^1.9.5", + "scale-ts": "^1.6.1", + "viem": "2.23.4" + }, + "devDependencies": { + "@types/bun": "^1.1.13", + "@types/chai": "^5.0.1", + "assert": "^2.1.0", + "chai": "^5.2.0", + "prettier": "^3.3.3", + "ts-node": "^10.9.2", + "typescript": "^5.7.2", + "vite": "^5.4.8" + } + }, + ".papi/descriptors": { + "name": "@polkadot-api/descriptors", + "version": "0.1.0-autogenerated.7914363913476982777", + "extraneous": true, + "peerDependencies": { + "polkadot-api": "*" + } + }, + "node_modules/@adraffy/ens-normalize": { + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/@adraffy/ens-normalize/-/ens-normalize-1.10.1.tgz", + "integrity": "sha512-96Z2IP3mYmF1Xg2cDm8f1gWGf/HUVedQ3FMifV4kG/PQ4yEP51xDtRAEfhVNt5f/uzpNkZHwWQuUcu6D6K+Ekw==", + "license": "MIT" + }, + "node_modules/@babel/code-frame": { + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", + "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.25.9", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", + "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@commander-js/extra-typings": { + "version": "13.1.0", + "resolved": "https://registry.npmjs.org/@commander-js/extra-typings/-/extra-typings-13.1.0.tgz", + "integrity": "sha512-q5P52BYb1hwVWE6dtID7VvuJWrlfbCv4klj7BjUUOqMz4jbSZD4C9fJ9lRjL2jnBGTg+gDDlaXN51rkWcLk4fg==", + "license": "MIT", + "peerDependencies": { + "commander": "~13.1.0" + } + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.2.tgz", + "integrity": "sha512-wCIboOL2yXZym2cgm6mlA742s9QeJ8DjGVaL39dLN4rRwrOgOyYSnOaFPhKZGLb2ngj4EyfAFjsNJwPXZvseag==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.2.tgz", + "integrity": "sha512-NQhH7jFstVY5x8CKbcfa166GoV0EFkaPkCKBQkdPJFvo5u+nGXLEH/ooniLb3QI8Fk58YAx7nsPLozUWfCBOJA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.2.tgz", + "integrity": "sha512-5ZAX5xOmTligeBaeNEPnPaeEuah53Id2tX4c2CVP3JaROTH+j4fnfHCkr1PjXMd78hMst+TlkfKcW/DlTq0i4w==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.2.tgz", + "integrity": "sha512-Ffcx+nnma8Sge4jzddPHCZVRvIfQ0kMsUsCMcJRHkGJ1cDmhe4SsrYIjLUKn1xpHZybmOqCWwB0zQvsjdEHtkg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.2.tgz", + "integrity": "sha512-MpM6LUVTXAzOvN4KbjzU/q5smzryuoNjlriAIx+06RpecwCkL9JpenNzpKd2YMzLJFOdPqBpuub6eVRP5IgiSA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.2.tgz", + "integrity": "sha512-5eRPrTX7wFyuWe8FqEFPG2cU0+butQQVNcT4sVipqjLYQjjh8a8+vUTfgBKM88ObB85ahsnTwF7PSIt6PG+QkA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.2.tgz", + "integrity": "sha512-mLwm4vXKiQ2UTSX4+ImyiPdiHjiZhIaE9QvC7sw0tZ6HoNMjYAqQpGyui5VRIi5sGd+uWq940gdCbY3VLvsO1w==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.2.tgz", + "integrity": "sha512-6qyyn6TjayJSwGpm8J9QYYGQcRgc90nmfdUb0O7pp1s4lTY+9D0H9O02v5JqGApUyiHOtkz6+1hZNvNtEhbwRQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.2.tgz", + "integrity": "sha512-UHBRgJcmjJv5oeQF8EpTRZs/1knq6loLxTsjc3nxO9eXAPDLcWW55flrMVc97qFPbmZP31ta1AZVUKQzKTzb0g==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.2.tgz", + "integrity": "sha512-gq/sjLsOyMT19I8obBISvhoYiZIAaGF8JpeXu1u8yPv8BE5HlWYobmlsfijFIZ9hIVGYkbdFhEqC0NvM4kNO0g==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.2.tgz", + "integrity": "sha512-bBYCv9obgW2cBP+2ZWfjYTU+f5cxRoGGQ5SeDbYdFCAZpYWrfjjfYwvUpP8MlKbP0nwZ5gyOU/0aUzZ5HWPuvQ==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.2.tgz", + "integrity": "sha512-SHNGiKtvnU2dBlM5D8CXRFdd+6etgZ9dXfaPCeJtz+37PIUlixvlIhI23L5khKXs3DIzAn9V8v+qb1TRKrgT5w==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.2.tgz", + "integrity": "sha512-hDDRlzE6rPeoj+5fsADqdUZl1OzqDYow4TB4Y/3PlKBD0ph1e6uPHzIQcv2Z65u2K0kpeByIyAjCmjn1hJgG0Q==", + "cpu": [ + "mips64el" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.2.tgz", + "integrity": "sha512-tsHu2RRSWzipmUi9UBDEzc0nLc4HtpZEI5Ba+Omms5456x5WaNuiG3u7xh5AO6sipnJ9r4cRWQB2tUjPyIkc6g==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.2.tgz", + "integrity": "sha512-k4LtpgV7NJQOml/10uPU0s4SAXGnowi5qBSjaLWMojNCUICNu7TshqHLAEbkBdAszL5TabfvQ48kK84hyFzjnw==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.2.tgz", + "integrity": "sha512-GRa4IshOdvKY7M/rDpRR3gkiTNp34M0eLTaC1a08gNrh4u488aPhuZOCpkF6+2wl3zAN7L7XIpOFBhnaE3/Q8Q==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.2.tgz", + "integrity": "sha512-QInHERlqpTTZ4FRB0fROQWXcYRD64lAoiegezDunLpalZMjcUcld3YzZmVJ2H/Cp0wJRZ8Xtjtj0cEHhYc/uUg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.2.tgz", + "integrity": "sha512-talAIBoY5M8vHc6EeI2WW9d/CkiO9MQJ0IOWX8hrLhxGbro/vBXJvaQXefW2cP0z0nQVTdQ/eNyGFV1GSKrxfw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.2.tgz", + "integrity": "sha512-voZT9Z+tpOxrvfKFyfDYPc4DO4rk06qamv1a/fkuzHpiVBMOhpjK+vBmWM8J1eiB3OLSMFYNaOaBNLXGChf5tg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.2.tgz", + "integrity": "sha512-dcXYOC6NXOqcykeDlwId9kB6OkPUxOEqU+rkrYVqJbK2hagWOMrsTGsMr8+rW02M+d5Op5NNlgMmjzecaRf7Tg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.2.tgz", + "integrity": "sha512-t/TkWwahkH0Tsgoq1Ju7QfgGhArkGLkF1uYz8nQS/PPFlXbP5YgRpqQR3ARRiC2iXoLTWFxc6DJMSK10dVXluw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.2.tgz", + "integrity": "sha512-cfZH1co2+imVdWCjd+D1gf9NjkchVhhdpgb1q5y6Hcv9TP6Zi9ZG/beI3ig8TvwT9lH9dlxLq5MQBBgwuj4xvA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.2.tgz", + "integrity": "sha512-7Loyjh+D/Nx/sOTzV8vfbB3GJuHdOQyrOryFdZvPHLf42Tk9ivBU5Aedi7iyX+x6rbn2Mh68T4qq1SDqJBQO5Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.2.tgz", + "integrity": "sha512-WRJgsz9un0nqZJ4MfhabxaD9Ft8KioqU3JMinOTvobbX6MOSUigSBlogP8QB3uxpJDsFS6yN+3FDBdqE5lg9kg==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.2.tgz", + "integrity": "sha512-kM3HKb16VIXZyIeVrM1ygYmZBKybX8N4p754bw390wGO3Tf2j4L2/WYL+4suWujpgf6GBYs3jv7TyUivdd05JA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", + "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "license": "MIT", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@noble/curves": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/@noble/curves/-/curves-1.8.1.tgz", + "integrity": "sha512-warwspo+UYUPep0Q+vtdVB4Ugn8GGQj8iyB3gnRWsztmUHTI3S1nhdiWNsPUGL0vud7JlRRk1XEu7Lq1KGTnMQ==", + "license": "MIT", + "dependencies": { + "@noble/hashes": "1.7.1" + }, + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@noble/hashes": { + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.7.1.tgz", + "integrity": "sha512-B8XBPsn4vT/KJAGqDzbwztd+6Yte3P4V7iafm24bxgDe/mlRuK6xmWPuCNrKt2vDafZ8MfJLlchDG/vYafQEjQ==", + "license": "MIT", + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@polkadot-api/cli": { + "version": "0.11.9", + "resolved": "https://registry.npmjs.org/@polkadot-api/cli/-/cli-0.11.9.tgz", + "integrity": "sha512-5Qt+YRf/kOCZGiFoWzgyxoZYA9OpN28AFE4jQ4nZI33lty8oH4FR62IF2iLF+KdafhgF9k9l1Kj24zuBFH3Vrw==", + "license": "MIT", + "dependencies": { + "@commander-js/extra-typings": "^13.1.0", + "@polkadot-api/codegen": "0.13.3", + "@polkadot-api/ink-contracts": "0.2.6", + "@polkadot-api/json-rpc-provider": "0.0.4", + "@polkadot-api/known-chains": "0.7.3", + "@polkadot-api/metadata-compatibility": "0.2.0", + "@polkadot-api/observable-client": "0.8.6", + "@polkadot-api/polkadot-sdk-compat": "2.3.2", + "@polkadot-api/sm-provider": "0.1.7", + "@polkadot-api/smoldot": "0.3.8", + "@polkadot-api/substrate-bindings": "0.11.1", + "@polkadot-api/substrate-client": "0.3.0", + "@polkadot-api/utils": "0.1.2", + "@polkadot-api/wasm-executor": "^0.1.2", + "@polkadot-api/ws-provider": "0.4.0", + "@types/node": "^22.14.0", + "commander": "^13.1.0", + "execa": "^9.5.2", + "fs.promises.exists": "^1.1.4", + "ora": "^8.2.0", + "read-pkg": "^9.0.1", + "rxjs": "^7.8.2", + "tsc-prog": "^2.3.0", + "tsup": "^8.4.0", + "typescript": "^5.8.3", + "write-package": "^7.1.0" + }, + "bin": { + "papi": "dist/main.js", + "polkadot-api": "dist/main.js" + } + }, + "node_modules/@polkadot-api/codegen": { + "version": "0.13.3", + "resolved": "https://registry.npmjs.org/@polkadot-api/codegen/-/codegen-0.13.3.tgz", + "integrity": "sha512-+8mp9k5L9myFSLv6Ad5r63JSIeq80/tKbk67rczDq6Co0PlJHqxult+wZHohHuyJSdtu8dHW9JQktTtM2RZT1w==", + "license": "MIT", + "dependencies": { + "@polkadot-api/ink-contracts": "0.2.6", + "@polkadot-api/metadata-builders": "0.10.2", + "@polkadot-api/metadata-compatibility": "0.2.0", + "@polkadot-api/substrate-bindings": "0.11.1", + "@polkadot-api/utils": "0.1.2" + } + }, + "node_modules/@polkadot-api/ink-contracts": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/@polkadot-api/ink-contracts/-/ink-contracts-0.2.6.tgz", + "integrity": "sha512-76oHO/rKRa48w1i4DEmB/9e/FmxKuhMJq7l1OhdnX6mbVO+bAif7FkRUHLfIgsWqCdhCdfLe5J474HRudKhU/A==", + "license": "MIT", + "dependencies": { + "@polkadot-api/metadata-builders": "0.10.2", + "@polkadot-api/substrate-bindings": "0.11.1", + "@polkadot-api/utils": "0.1.2" + } + }, + "node_modules/@polkadot-api/json-rpc-provider": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/@polkadot-api/json-rpc-provider/-/json-rpc-provider-0.0.4.tgz", + "integrity": "sha512-9cDijLIxzHOBuq6yHqpqjJ9jBmXrctjc1OFqU+tQrS96adQze3mTIH6DTgfb/0LMrqxzxffz1HQGrIlEH00WrA==", + "license": "MIT" + }, + "node_modules/@polkadot-api/json-rpc-provider-proxy": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/@polkadot-api/json-rpc-provider-proxy/-/json-rpc-provider-proxy-0.2.4.tgz", + "integrity": "sha512-nuGoY9QpBAiRU7xmXN3nugFvPcnSu3IxTLm1OWcNTGlZ1LW5bvdQHz3JLk56+Jlyb3GJ971hqdg2DJsMXkKCOg==", + "license": "MIT" + }, + "node_modules/@polkadot-api/known-chains": { + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/@polkadot-api/known-chains/-/known-chains-0.7.3.tgz", + "integrity": "sha512-yBRVbOLn0e36+EGWE2/hX8mhTKvfdZtbk2VCgTM9djkz28eDFfiDjEl6biQA8Q0Kd7t3iRzoNbBzpzyBwTMXUg==", + "license": "MIT" + }, + "node_modules/@polkadot-api/logs-provider": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/@polkadot-api/logs-provider/-/logs-provider-0.0.6.tgz", + "integrity": "sha512-4WgHlvy+xee1ADaaVf6+MlK/+jGMtsMgAzvbQOJZnP4PfQuagoTqaeayk8HYKxXGphogLlPbD06tANxcb+nvAg==", + "license": "MIT", + "dependencies": { + "@polkadot-api/json-rpc-provider": "0.0.4" + } + }, + "node_modules/@polkadot-api/metadata-builders": { + "version": "0.10.2", + "resolved": "https://registry.npmjs.org/@polkadot-api/metadata-builders/-/metadata-builders-0.10.2.tgz", + "integrity": "sha512-rtdihBFd25oT9/71Q+EOR9q6E6mCl1pPe/2He/LtlY0TyHiYqO2KpMZNXkoGcw1RHvrV+CAtDFMvK1j3n8aW8w==", + "license": "MIT", + "dependencies": { + "@polkadot-api/substrate-bindings": "0.11.1", + "@polkadot-api/utils": "0.1.2" + } + }, + "node_modules/@polkadot-api/metadata-compatibility": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@polkadot-api/metadata-compatibility/-/metadata-compatibility-0.2.0.tgz", + "integrity": "sha512-ZvHj4KDQy/JFqV51UN6Gk5xnG0qt/BUS4kjYosLWT9y6p5bHg/4ge7QF5lMloInQqV3Rul9NQo4cKUz3SlSQMQ==", + "license": "MIT", + "dependencies": { + "@polkadot-api/metadata-builders": "0.10.2", + "@polkadot-api/substrate-bindings": "0.11.1" + } + }, + "node_modules/@polkadot-api/observable-client": { + "version": "0.8.6", + "resolved": "https://registry.npmjs.org/@polkadot-api/observable-client/-/observable-client-0.8.6.tgz", + "integrity": "sha512-ci5HC8TYjGxoTG/QM+LLuGrfIsn+dtR7BBQz483c/ML8K/Hxl9v+evgZzPi9xNMwZ25mytn9lhA5dovYSEauSA==", + "license": "MIT", + "dependencies": { + "@polkadot-api/metadata-builders": "0.10.2", + "@polkadot-api/substrate-bindings": "0.11.1", + "@polkadot-api/utils": "0.1.2" + }, + "peerDependencies": { + "@polkadot-api/substrate-client": "0.3.0", + "rxjs": ">=7.8.0" + } + }, + "node_modules/@polkadot-api/pjs-signer": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/@polkadot-api/pjs-signer/-/pjs-signer-0.6.5.tgz", + "integrity": "sha512-RQJtvuX8jNR77h9PFTNQPjC4ii0g0uGrfyu5cbTujojg2QboU/6ny26Ty45rzkSOL0GaBLsS7Uf+/7Vf9hCxig==", + "license": "MIT", + "dependencies": { + "@polkadot-api/metadata-builders": "0.10.2", + "@polkadot-api/polkadot-signer": "0.1.6", + "@polkadot-api/signers-common": "0.1.6", + "@polkadot-api/substrate-bindings": "0.11.1", + "@polkadot-api/utils": "0.1.2" + } + }, + "node_modules/@polkadot-api/polkadot-sdk-compat": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/@polkadot-api/polkadot-sdk-compat/-/polkadot-sdk-compat-2.3.2.tgz", + "integrity": "sha512-rLCveP3a6Xd0r218yRqVY34lJ8bXVmE12cArbU4JFp9p8e8Jbb6xdqOdu7bQtjlZUsahhcmfIHYQSXKziST7PA==", + "license": "MIT", + "dependencies": { + "@polkadot-api/json-rpc-provider": "0.0.4" + } + }, + "node_modules/@polkadot-api/polkadot-signer": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/@polkadot-api/polkadot-signer/-/polkadot-signer-0.1.6.tgz", + "integrity": "sha512-X7ghAa4r7doETtjAPTb50IpfGtrBmy3BJM5WCfNKa1saK04VFY9w+vDn+hwEcM4p0PcDHt66Ts74hzvHq54d9A==", + "license": "MIT" + }, + "node_modules/@polkadot-api/signer": { + "version": "0.1.15", + "resolved": "https://registry.npmjs.org/@polkadot-api/signer/-/signer-0.1.15.tgz", + "integrity": "sha512-FUFlHrICB4dGlFa6FeFju/ySr8kTAkhTE/aSmfSxW0rl/cTeDO2fbUS9WmIl8wLB0jsI14I2r5J/p13FvIe1BA==", + "license": "MIT", + "dependencies": { + "@noble/hashes": "^1.7.1", + "@polkadot-api/polkadot-signer": "0.1.6", + "@polkadot-api/signers-common": "0.1.6", + "@polkadot-api/substrate-bindings": "0.11.1", + "@polkadot-api/utils": "0.1.2" + } + }, + "node_modules/@polkadot-api/signers-common": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/@polkadot-api/signers-common/-/signers-common-0.1.6.tgz", + "integrity": "sha512-OEzqpu/AlZIHbvpvwQJ7dhoRIRTXI2D7wYEoT5j0COpAvt3A1L53smECb3xWzkzlb82gINuqpUW5dfhhJ5tQFQ==", + "license": "MIT", + "dependencies": { + "@polkadot-api/metadata-builders": "0.10.2", + "@polkadot-api/polkadot-signer": "0.1.6", + "@polkadot-api/substrate-bindings": "0.11.1", + "@polkadot-api/utils": "0.1.2" + } + }, + "node_modules/@polkadot-api/sm-provider": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/@polkadot-api/sm-provider/-/sm-provider-0.1.7.tgz", + "integrity": "sha512-BhNKVeIFZdawpPVadXszLl8IP4EDjcLHe/GchfRRFkvoNFuwS2nNv/npYIqCviXV+dd2R8VnEELxwScsf380Og==", + "license": "MIT", + "dependencies": { + "@polkadot-api/json-rpc-provider": "0.0.4", + "@polkadot-api/json-rpc-provider-proxy": "0.2.4" + }, + "peerDependencies": { + "@polkadot-api/smoldot": ">=0.3" + } + }, + "node_modules/@polkadot-api/smoldot": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/@polkadot-api/smoldot/-/smoldot-0.3.8.tgz", + "integrity": "sha512-dbJSMRFtELDW+rZIWRwKE/K8oy7+gYaGl+DvaOjARoBW2n80rJ7RAMOCCu+b5h2zgl3elftFBwMNAuAWgHT/Zg==", + "license": "MIT", + "dependencies": { + "@types/node": "^22.9.0", + "smoldot": "2.0.34" + } + }, + "node_modules/@polkadot-api/smoldot/node_modules/smoldot": { + "version": "2.0.34", + "resolved": "https://registry.npmjs.org/smoldot/-/smoldot-2.0.34.tgz", + "integrity": "sha512-mw9tCbGEhEp0koMqLL0jBEixVY1MIN/xI3pE6ZY1TuOPU+LnYy8FloODVyzkvzQPaBYrETXJdRlmA/+k6g3gow==", + "license": "GPL-3.0-or-later WITH Classpath-exception-2.0", + "dependencies": { + "ws": "^8.8.1" + } + }, + "node_modules/@polkadot-api/substrate-bindings": { + "version": "0.11.1", + "resolved": "https://registry.npmjs.org/@polkadot-api/substrate-bindings/-/substrate-bindings-0.11.1.tgz", + "integrity": "sha512-+oqAZB7y18KrP/DqKmU2P3nNmRzjCY7edtW7tyA1g1jPouF7HhRr/Q13lJseDX9sdE2FZGrKZtivzsw8XeXBng==", + "license": "MIT", + "dependencies": { + "@noble/hashes": "^1.7.1", + "@polkadot-api/utils": "0.1.2", + "@scure/base": "^1.2.4", + "scale-ts": "^1.6.1" + } + }, + "node_modules/@polkadot-api/substrate-client": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@polkadot-api/substrate-client/-/substrate-client-0.3.0.tgz", + "integrity": "sha512-0hEvQLKH2zhaFzE8DPkWehvJilec8u2O2wbIEUStm0OJ8jIFtJ40MFjXQfB01dXBWUz1KaVBqS6xd3sZA90Dpw==", + "license": "MIT", + "dependencies": { + "@polkadot-api/json-rpc-provider": "0.0.4", + "@polkadot-api/utils": "0.1.2" + } + }, + "node_modules/@polkadot-api/utils": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/@polkadot-api/utils/-/utils-0.1.2.tgz", + "integrity": "sha512-yhs5k2a8N1SBJcz7EthZoazzLQUkZxbf+0271Xzu42C5AEM9K9uFLbsB+ojzHEM72O5X8lPtSwGKNmS7WQyDyg==", + "license": "MIT" + }, + "node_modules/@polkadot-api/wasm-executor": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/@polkadot-api/wasm-executor/-/wasm-executor-0.1.2.tgz", + "integrity": "sha512-a5wGenltB3EFPdf72u8ewi6HsUg2qubUAf3ekJprZf24lTK3+w8a/GUF/y6r08LJF35MALZ32SAtLqtVTIOGnQ==", + "license": "MIT" + }, + "node_modules/@polkadot-api/ws-provider": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@polkadot-api/ws-provider/-/ws-provider-0.4.0.tgz", + "integrity": "sha512-ZurjUHHAlQ1Ux8HiZz7mtkg1qjq6LmqxcHljcZxne0U7foCZrXdWHsohwlV8kUQUir5kXuDsNvdZN/MFCUMaVw==", + "license": "MIT", + "dependencies": { + "@polkadot-api/json-rpc-provider": "0.0.4", + "@polkadot-api/json-rpc-provider-proxy": "0.2.4", + "ws": "^8.18.1" + } + }, + "node_modules/@polkadot-labs/hdkd": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/@polkadot-labs/hdkd/-/hdkd-0.0.10.tgz", + "integrity": "sha512-jD8l+Ls/kZjvZja4T2Y0G6Be3rfGn0qNs3hvcNeV2CmOMtI7yRkkWPXI7WiJ8AyEoBwBuZt0rm6yzGla6o2HXQ==", + "license": "MIT", + "dependencies": { + "@polkadot-labs/hdkd-helpers": "0.0.10" + } + }, + "node_modules/@polkadot-labs/hdkd-helpers": { + "version": "0.0.11", + "resolved": "https://registry.npmjs.org/@polkadot-labs/hdkd-helpers/-/hdkd-helpers-0.0.11.tgz", + "integrity": "sha512-qPlWqC3NNV/2NYc5GEy+Ovi4UBAgkMGvMfyiYuj2BQN4lW59Q1T9coNx0Yp6XzsnJ1ddaF9PWaUtxj3LdM0IDw==", + "license": "MIT", + "dependencies": { + "@noble/curves": "^1.8.1", + "@noble/hashes": "^1.7.1", + "@scure/base": "^1.2.4", + "micro-sr25519": "^0.1.0", + "scale-ts": "^1.6.1" + } + }, + "node_modules/@polkadot-labs/hdkd/node_modules/@polkadot-labs/hdkd-helpers": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/@polkadot-labs/hdkd-helpers/-/hdkd-helpers-0.0.10.tgz", + "integrity": "sha512-wBKenhN7TjNiMXxBvQWzFf+su8xTaRGqyOKAlAfpyY9oWTOt3G05yMvDHEZ4g/NRLoE4P3fQYQ0bdcMKl7KkDw==", + "license": "MIT", + "dependencies": { + "@noble/curves": "^1.7.0", + "@noble/hashes": "^1.6.1", + "@scure/base": "^1.2.1", + "micro-sr25519": "^0.1.0", + "scale-ts": "^1.6.1" + } + }, + "node_modules/@polkadot/api": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/api/-/api-15.1.1.tgz", + "integrity": "sha512-n3QeQ1CXlzjqyh2eFbEQPcnkXO3J4QYNTIj0Lnz/XFUpzKimHPDA2iUfaXuy5dXjnzS21jFANGSUFoZ+XKi/8g==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/api-augment": "15.1.1", + "@polkadot/api-base": "15.1.1", + "@polkadot/api-derive": "15.1.1", + "@polkadot/keyring": "^13.2.3", + "@polkadot/rpc-augment": "15.1.1", + "@polkadot/rpc-core": "15.1.1", + "@polkadot/rpc-provider": "15.1.1", + "@polkadot/types": "15.1.1", + "@polkadot/types-augment": "15.1.1", + "@polkadot/types-codec": "15.1.1", + "@polkadot/types-create": "15.1.1", + "@polkadot/types-known": "15.1.1", + "@polkadot/util": "^13.2.3", + "@polkadot/util-crypto": "^13.2.3", + "eventemitter3": "^5.0.1", + "rxjs": "^7.8.1", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/api-augment": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/api-augment/-/api-augment-15.1.1.tgz", + "integrity": "sha512-tYASON7vVLz7FGcXVX9dWSd/9pR6FckayEkc08Z6RyjH7HfjtCZ3/Dz7MlGRNql4SnPi4+xpjSD6rwrZcETU1g==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/api-base": "15.1.1", + "@polkadot/rpc-augment": "15.1.1", + "@polkadot/types": "15.1.1", + "@polkadot/types-augment": "15.1.1", + "@polkadot/types-codec": "15.1.1", + "@polkadot/util": "^13.2.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/api-base": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/api-base/-/api-base-15.1.1.tgz", + "integrity": "sha512-OXLZ7/k2RXLIA8hKA8oyii6o8MuGlqujIDcLVaMdtWnQsBg26h8pv/mujT2YSz2OguLxrfdvD+lUGtwZC8kw4A==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/rpc-core": "15.1.1", + "@polkadot/types": "15.1.1", + "@polkadot/util": "^13.2.3", + "rxjs": "^7.8.1", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/api-derive": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/api-derive/-/api-derive-15.1.1.tgz", + "integrity": "sha512-UPcKr9FplfYKPaP7FYEF917Sm1rKnQFX4AzQJn3f8ySp7DDf6EYiHrNICtGifPEAoANTSW+YHlSchhtnvfSIhw==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/api": "15.1.1", + "@polkadot/api-augment": "15.1.1", + "@polkadot/api-base": "15.1.1", + "@polkadot/rpc-core": "15.1.1", + "@polkadot/types": "15.1.1", + "@polkadot/types-codec": "15.1.1", + "@polkadot/util": "^13.2.3", + "@polkadot/util-crypto": "^13.2.3", + "rxjs": "^7.8.1", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/keyring": { + "version": "13.4.3", + "resolved": "https://registry.npmjs.org/@polkadot/keyring/-/keyring-13.4.3.tgz", + "integrity": "sha512-2ePNcvBTznDN2luKbZM5fdxgAnj7V8m276qSTgrHlqKVvg9FsQpRCR6CAU+AjhnHzpe7uiZO+UH+jlXWefI3AA==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/util": "13.4.3", + "@polkadot/util-crypto": "13.4.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@polkadot/util": "13.4.3", + "@polkadot/util-crypto": "13.4.3" + } + }, + "node_modules/@polkadot/networks": { + "version": "13.4.3", + "resolved": "https://registry.npmjs.org/@polkadot/networks/-/networks-13.4.3.tgz", + "integrity": "sha512-Z+YZkltBt//CtkVH8ZYJ1z66qYxdI0yPamzkzZAqw6gj3gjgSxKtxB4baA/rcAw05QTvN2R3dLkkmKr2mnHovQ==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/util": "13.4.3", + "@substrate/ss58-registry": "^1.51.0", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/rpc-augment": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/rpc-augment/-/rpc-augment-15.1.1.tgz", + "integrity": "sha512-s6i4nTy7/1Q5svIMT4TR55GLRv9asG7xbJcntHEsQ2nDs8zZV/mvPWfEUxgup0xVO8sDgyrf6KTTVRKJjySjUg==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/rpc-core": "15.1.1", + "@polkadot/types": "15.1.1", + "@polkadot/types-codec": "15.1.1", + "@polkadot/util": "^13.2.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/rpc-core": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/rpc-core/-/rpc-core-15.1.1.tgz", + "integrity": "sha512-KErbVgPChps7NsxcGch5JCArZHNqs81fDEzs+XoHnD05nzuxcO38v4Yu+M04lHLax2m8ky8K6o3gurBglJENlA==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/rpc-augment": "15.1.1", + "@polkadot/rpc-provider": "15.1.1", + "@polkadot/types": "15.1.1", + "@polkadot/util": "^13.2.3", + "rxjs": "^7.8.1", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/rpc-provider": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/rpc-provider/-/rpc-provider-15.1.1.tgz", + "integrity": "sha512-9OWV1dyX+vmAbKkhMU8J7Q0sCaovPrkwZqo2ejmEpZ/Lr12Hw5JAk4gdvB869QEVP7zj0gH3HuYVajmsxesYKg==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/keyring": "^13.2.3", + "@polkadot/types": "15.1.1", + "@polkadot/types-support": "15.1.1", + "@polkadot/util": "^13.2.3", + "@polkadot/util-crypto": "^13.2.3", + "@polkadot/x-fetch": "^13.2.3", + "@polkadot/x-global": "^13.2.3", + "@polkadot/x-ws": "^13.2.3", + "eventemitter3": "^5.0.1", + "mock-socket": "^9.3.1", + "nock": "^13.5.5", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@substrate/connect": "0.8.11" + } + }, + "node_modules/@polkadot/types": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/types/-/types-15.1.1.tgz", + "integrity": "sha512-n6lg/quhLp3Zmt/6gHAg2uoSmMmXk3NR19I7qCyeDJ30pP1UhOjtmuWOQDl6SwSEwuHtudLp3p2nCJsymXjgsw==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/keyring": "^13.2.3", + "@polkadot/types-augment": "15.1.1", + "@polkadot/types-codec": "15.1.1", + "@polkadot/types-create": "15.1.1", + "@polkadot/util": "^13.2.3", + "@polkadot/util-crypto": "^13.2.3", + "rxjs": "^7.8.1", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/types-augment": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/types-augment/-/types-augment-15.1.1.tgz", + "integrity": "sha512-6v/FsN/JYCupyGYW+MbS0iOCiWvf6PXJ5+m8ORYYYDPFgQqaQPxKMKWJpnO0s9cCR33QcyNYhErPGuZ62UMJjw==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/types": "15.1.1", + "@polkadot/types-codec": "15.1.1", + "@polkadot/util": "^13.2.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/types-codec": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/types-codec/-/types-codec-15.1.1.tgz", + "integrity": "sha512-cm99CFvDf4UXmw7DeMkRqa/hf7wEgjJZoZZW/B12Js0ObwRmSXMk/gDbyiT6hqPnQ81sU726E72p39DolaEatQ==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/util": "^13.2.3", + "@polkadot/x-bigint": "^13.2.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/types-create": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/types-create/-/types-create-15.1.1.tgz", + "integrity": "sha512-AOgz+UsUqsGSENrc+p/dHyXH2TC9qVtUTAxlqaHfOnwqjMWfEqc78mc5a1mk0a+RqxmIHw8nQNSdBdhv+UdtyQ==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/types-codec": "15.1.1", + "@polkadot/util": "^13.2.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/types-known": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/types-known/-/types-known-15.1.1.tgz", + "integrity": "sha512-L934pYxXdHB3GHlVu57ihO6llhxuggSuQZuJ9kHunG0I6tezXLIgAhwaPgACMVbmBYlkJPqm4Nr6pC3kpIsGow==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/networks": "^13.2.3", + "@polkadot/types": "15.1.1", + "@polkadot/types-codec": "15.1.1", + "@polkadot/types-create": "15.1.1", + "@polkadot/util": "^13.2.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/types-support": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/types-support/-/types-support-15.1.1.tgz", + "integrity": "sha512-uyn5N7XERHosVq0+aCpEwYnkUroOr7OX8B8/00UkgmfVOXskp/cukEVcGlmI/YGAS+9+V2BZN2GBX7Lz0eeKmw==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/util": "^13.2.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/util": { + "version": "13.4.3", + "resolved": "https://registry.npmjs.org/@polkadot/util/-/util-13.4.3.tgz", + "integrity": "sha512-6v2zvg8l7W22XvjYf7qv9tPQdYl2E6aXY94M4TZKsXZxmlS5BoG+A9Aq0+Gw8zBUjupjEmUkA6Y//msO8Zisug==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/x-bigint": "13.4.3", + "@polkadot/x-global": "13.4.3", + "@polkadot/x-textdecoder": "13.4.3", + "@polkadot/x-textencoder": "13.4.3", + "@types/bn.js": "^5.1.6", + "bn.js": "^5.2.1", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/util-crypto": { + "version": "13.4.3", + "resolved": "https://registry.npmjs.org/@polkadot/util-crypto/-/util-crypto-13.4.3.tgz", + "integrity": "sha512-Ml0mjhKVetMrRCIosmVNMa6lbFPa3fSAeOggf34NsDIIQOKt9FL644iGz1ZSMOnBwN9qk2qHYmcFMTDXX2yKVQ==", + "license": "Apache-2.0", + "dependencies": { + "@noble/curves": "^1.3.0", + "@noble/hashes": "^1.3.3", + "@polkadot/networks": "13.4.3", + "@polkadot/util": "13.4.3", + "@polkadot/wasm-crypto": "^7.4.1", + "@polkadot/wasm-util": "^7.4.1", + "@polkadot/x-bigint": "13.4.3", + "@polkadot/x-randomvalues": "13.4.3", + "@scure/base": "^1.1.7", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@polkadot/util": "13.4.3" + } + }, + "node_modules/@polkadot/wasm-bridge": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-bridge/-/wasm-bridge-7.4.1.tgz", + "integrity": "sha512-tdkJaV453tezBxhF39r4oeG0A39sPKGDJmN81LYLf+Fihb7astzwju+u75BRmDrHZjZIv00un3razJEWCxze6g==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/wasm-util": "7.4.1", + "tslib": "^2.7.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@polkadot/util": "*", + "@polkadot/x-randomvalues": "*" + } + }, + "node_modules/@polkadot/wasm-crypto": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto/-/wasm-crypto-7.4.1.tgz", + "integrity": "sha512-kHN/kF7hYxm1y0WeFLWeWir6oTzvcFmR4N8fJJokR+ajYbdmrafPN+6iLgQVbhZnDdxyv9jWDuRRsDnBx8tPMQ==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/wasm-bridge": "7.4.1", + "@polkadot/wasm-crypto-asmjs": "7.4.1", + "@polkadot/wasm-crypto-init": "7.4.1", + "@polkadot/wasm-crypto-wasm": "7.4.1", + "@polkadot/wasm-util": "7.4.1", + "tslib": "^2.7.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@polkadot/util": "*", + "@polkadot/x-randomvalues": "*" + } + }, + "node_modules/@polkadot/wasm-crypto-asmjs": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-asmjs/-/wasm-crypto-asmjs-7.4.1.tgz", + "integrity": "sha512-pwU8QXhUW7IberyHJIQr37IhbB6DPkCG5FhozCiNTq4vFBsFPjm9q8aZh7oX1QHQaiAZa2m2/VjIVE+FHGbvHQ==", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.7.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@polkadot/util": "*" + } + }, + "node_modules/@polkadot/wasm-crypto-init": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-init/-/wasm-crypto-init-7.4.1.tgz", + "integrity": "sha512-AVka33+f7MvXEEIGq5U0dhaA2SaXMXnxVCQyhJTaCnJ5bRDj0Xlm3ijwDEQUiaDql7EikbkkRtmlvs95eSUWYQ==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/wasm-bridge": "7.4.1", + "@polkadot/wasm-crypto-asmjs": "7.4.1", + "@polkadot/wasm-crypto-wasm": "7.4.1", + "@polkadot/wasm-util": "7.4.1", + "tslib": "^2.7.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@polkadot/util": "*", + "@polkadot/x-randomvalues": "*" + } + }, + "node_modules/@polkadot/wasm-crypto-wasm": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-wasm/-/wasm-crypto-wasm-7.4.1.tgz", + "integrity": "sha512-PE1OAoupFR0ZOV2O8tr7D1FEUAwaggzxtfs3Aa5gr+yxlSOaWUKeqsOYe1KdrcjmZVV3iINEAXxgrbzCmiuONg==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/wasm-util": "7.4.1", + "tslib": "^2.7.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@polkadot/util": "*" + } + }, + "node_modules/@polkadot/wasm-util": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-util/-/wasm-util-7.4.1.tgz", + "integrity": "sha512-RAcxNFf3zzpkr+LX/ItAsvj+QyM56TomJ0xjUMo4wKkHjwsxkz4dWJtx5knIgQz/OthqSDMR59VNEycQeNuXzA==", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.7.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@polkadot/util": "*" + } + }, + "node_modules/@polkadot/x-bigint": { + "version": "13.4.3", + "resolved": "https://registry.npmjs.org/@polkadot/x-bigint/-/x-bigint-13.4.3.tgz", + "integrity": "sha512-8NbjF5Q+5lflhvDFve58wULjCVcvXa932LKFtI5zL2gx5VDhMgyfkNcYRjHB18Ecl21963JuGzvGVTZNkh/i6g==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/x-global": "13.4.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/x-fetch": { + "version": "13.4.3", + "resolved": "https://registry.npmjs.org/@polkadot/x-fetch/-/x-fetch-13.4.3.tgz", + "integrity": "sha512-EwhcwROqWa7mvNTbLVNH71Hbyp5PW5j9lV2UpII5MZzRO95eYwV4oP/xgtTxC+60nC8lrvzAw0JxEHrmNzmtlg==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/x-global": "13.4.3", + "node-fetch": "^3.3.2", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/x-global": { + "version": "13.4.3", + "resolved": "https://registry.npmjs.org/@polkadot/x-global/-/x-global-13.4.3.tgz", + "integrity": "sha512-6c98kxZdoGRct3ua9Dz6/qz8wb3XFRUkaY+4+RzIgehKMPhu19pGWTrzmbJSyY9FtIpThuWKuDaBEvd5KgSxjA==", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/x-randomvalues": { + "version": "13.4.3", + "resolved": "https://registry.npmjs.org/@polkadot/x-randomvalues/-/x-randomvalues-13.4.3.tgz", + "integrity": "sha512-pskXP/S2jROZ6aASExsUFlNp7GbJvQikKogvyvMMCzNIbUYLxpLuquLRa3MOORx2c0SNsENg90cx/zHT+IjPRQ==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/x-global": "13.4.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@polkadot/util": "13.4.3", + "@polkadot/wasm-util": "*" + } + }, + "node_modules/@polkadot/x-textdecoder": { + "version": "13.4.3", + "resolved": "https://registry.npmjs.org/@polkadot/x-textdecoder/-/x-textdecoder-13.4.3.tgz", + "integrity": "sha512-k7Wg6csAPxfNtpBt3k5yUuPHYmRl/nl7H2OMr40upMjbZXbQ1RJW9Z3GBkLmQczG7NwwfAXHwQE9FYOMUtbuRQ==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/x-global": "13.4.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/x-textencoder": { + "version": "13.4.3", + "resolved": "https://registry.npmjs.org/@polkadot/x-textencoder/-/x-textencoder-13.4.3.tgz", + "integrity": "sha512-byl2LbN1rnEXKmnsCzEDaIjSIHAr+1ciSe2yj3M0K+oWEEcaFZEovJaf/uoyzkcjn+/l8rDv3nget6mPuQ/DSw==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/x-global": "13.4.3", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polkadot/x-ws": { + "version": "13.4.3", + "resolved": "https://registry.npmjs.org/@polkadot/x-ws/-/x-ws-13.4.3.tgz", + "integrity": "sha512-GS0I6MYLD/xNAAjODZi/pbG7Ba0e/5sbvDIrT01iKH3SPGN+PZoyAsc04t2IOXA6QmPa1OBHnaU3N4K8gGmJ+w==", + "license": "Apache-2.0", + "dependencies": { + "@polkadot/x-global": "13.4.3", + "tslib": "^2.8.0", + "ws": "^8.18.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.34.8.tgz", + "integrity": "sha512-8y7ED8gjxITUltTUEJLQdgpbPh1sUQ0kMTmufRF/Ns5tI9TNMNlhWtmPKKHCU0SilX+3MJkZ0zERYYGIVBYHIA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.34.8.tgz", + "integrity": "sha512-SCXcP0ZpGFIe7Ge+McxY5zKxiEI5ra+GT3QRxL0pMMtxPfpyLAKleZODi1zdRHkz5/BhueUrYtYVgubqe9JBNQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rx-state/core": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/@rx-state/core/-/core-0.1.4.tgz", + "integrity": "sha512-Z+3hjU2xh1HisLxt+W5hlYX/eGSDaXXP+ns82gq/PLZpkXLu0uwcNUh9RLY3Clq4zT+hSsA3vcpIGt6+UAb8rQ==", + "license": "MIT", + "peerDependencies": { + "rxjs": ">=7" + } + }, + "node_modules/@scure/base": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.2.4.tgz", + "integrity": "sha512-5Yy9czTO47mqz+/J8GM6GIId4umdCk1wc1q8rKERQulIoc8VP9pzDcghv10Tl2E7R96ZUx/PhND3ESYUQX8NuQ==", + "license": "MIT", + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@scure/bip32": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/@scure/bip32/-/bip32-1.6.2.tgz", + "integrity": "sha512-t96EPDMbtGgtb7onKKqxRLfE5g05k7uHnHRM2xdE6BP/ZmxaLtPek4J4KfVn/90IQNrU1IOAqMgiDtUdtbe3nw==", + "license": "MIT", + "dependencies": { + "@noble/curves": "~1.8.1", + "@noble/hashes": "~1.7.1", + "@scure/base": "~1.2.2" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@scure/bip39": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@scure/bip39/-/bip39-1.5.4.tgz", + "integrity": "sha512-TFM4ni0vKvCfBpohoh+/lY05i9gRbSwXWngAsF4CABQxoaOHijxuaZ2R6cStDQ5CHtHO9aGJTr4ksVJASRRyMA==", + "license": "MIT", + "dependencies": { + "@noble/hashes": "~1.7.1", + "@scure/base": "~1.2.4" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@sec-ant/readable-stream": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz", + "integrity": "sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==", + "license": "MIT" + }, + "node_modules/@sindresorhus/merge-streams": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-4.0.0.tgz", + "integrity": "sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@substrate/connect": { + "version": "0.8.11", + "resolved": "https://registry.npmjs.org/@substrate/connect/-/connect-0.8.11.tgz", + "integrity": "sha512-ofLs1PAO9AtDdPbdyTYj217Pe+lBfTLltdHDs3ds8no0BseoLeAGxpz1mHfi7zB4IxI3YyAiLjH6U8cw4pj4Nw==", + "license": "GPL-3.0-only", + "optional": true, + "dependencies": { + "@substrate/connect-extension-protocol": "^2.0.0", + "@substrate/connect-known-chains": "^1.1.5", + "@substrate/light-client-extension-helpers": "^1.0.0", + "smoldot": "2.0.26" + } + }, + "node_modules/@substrate/connect-extension-protocol": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@substrate/connect-extension-protocol/-/connect-extension-protocol-2.2.2.tgz", + "integrity": "sha512-t66jwrXA0s5Goq82ZtjagLNd7DPGCNjHeehRlE/gcJmJ+G56C0W+2plqOMRicJ8XGR1/YFnUSEqUFiSNbjGrAA==", + "license": "GPL-3.0-only", + "optional": true + }, + "node_modules/@substrate/connect-known-chains": { + "version": "1.9.2", + "resolved": "https://registry.npmjs.org/@substrate/connect-known-chains/-/connect-known-chains-1.9.2.tgz", + "integrity": "sha512-uEmm+rKJQQhhbforvmcg74TsDHKFVBkstjPwblGT1RdHMxUKR7Gq7F8vbkGnr5ce9tMK2Ylil760Z7vtX013hw==", + "license": "GPL-3.0-only", + "optional": true + }, + "node_modules/@substrate/light-client-extension-helpers": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@substrate/light-client-extension-helpers/-/light-client-extension-helpers-1.0.0.tgz", + "integrity": "sha512-TdKlni1mBBZptOaeVrKnusMg/UBpWUORNDv5fdCaJklP4RJiFOzBCrzC+CyVI5kQzsXBisZ+2pXm+rIjS38kHg==", + "license": "MIT", + "optional": true, + "dependencies": { + "@polkadot-api/json-rpc-provider": "^0.0.1", + "@polkadot-api/json-rpc-provider-proxy": "^0.1.0", + "@polkadot-api/observable-client": "^0.3.0", + "@polkadot-api/substrate-client": "^0.1.2", + "@substrate/connect-extension-protocol": "^2.0.0", + "@substrate/connect-known-chains": "^1.1.5", + "rxjs": "^7.8.1" + }, + "peerDependencies": { + "smoldot": "2.x" + } + }, + "node_modules/@substrate/light-client-extension-helpers/node_modules/@polkadot-api/json-rpc-provider": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/@polkadot-api/json-rpc-provider/-/json-rpc-provider-0.0.1.tgz", + "integrity": "sha512-/SMC/l7foRjpykLTUTacIH05H3mr9ip8b5xxfwXlVezXrNVLp3Cv0GX6uItkKd+ZjzVPf3PFrDF2B2/HLSNESA==", + "license": "MIT", + "optional": true + }, + "node_modules/@substrate/light-client-extension-helpers/node_modules/@polkadot-api/json-rpc-provider-proxy": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/@polkadot-api/json-rpc-provider-proxy/-/json-rpc-provider-proxy-0.1.0.tgz", + "integrity": "sha512-8GSFE5+EF73MCuLQm8tjrbCqlgclcHBSRaswvXziJ0ZW7iw3UEMsKkkKvELayWyBuOPa2T5i1nj6gFOeIsqvrg==", + "license": "MIT", + "optional": true + }, + "node_modules/@substrate/light-client-extension-helpers/node_modules/@polkadot-api/metadata-builders": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/@polkadot-api/metadata-builders/-/metadata-builders-0.3.2.tgz", + "integrity": "sha512-TKpfoT6vTb+513KDzMBTfCb/ORdgRnsS3TDFpOhAhZ08ikvK+hjHMt5plPiAX/OWkm1Wc9I3+K6W0hX5Ab7MVg==", + "license": "MIT", + "optional": true, + "dependencies": { + "@polkadot-api/substrate-bindings": "0.6.0", + "@polkadot-api/utils": "0.1.0" + } + }, + "node_modules/@substrate/light-client-extension-helpers/node_modules/@polkadot-api/observable-client": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/@polkadot-api/observable-client/-/observable-client-0.3.2.tgz", + "integrity": "sha512-HGgqWgEutVyOBXoGOPp4+IAq6CNdK/3MfQJmhCJb8YaJiaK4W6aRGrdQuQSTPHfERHCARt9BrOmEvTXAT257Ug==", + "license": "MIT", + "optional": true, + "dependencies": { + "@polkadot-api/metadata-builders": "0.3.2", + "@polkadot-api/substrate-bindings": "0.6.0", + "@polkadot-api/utils": "0.1.0" + }, + "peerDependencies": { + "@polkadot-api/substrate-client": "0.1.4", + "rxjs": ">=7.8.0" + } + }, + "node_modules/@substrate/light-client-extension-helpers/node_modules/@polkadot-api/substrate-bindings": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@polkadot-api/substrate-bindings/-/substrate-bindings-0.6.0.tgz", + "integrity": "sha512-lGuhE74NA1/PqdN7fKFdE5C1gNYX357j1tWzdlPXI0kQ7h3kN0zfxNOpPUN7dIrPcOFZ6C0tRRVrBylXkI6xPw==", + "license": "MIT", + "optional": true, + "dependencies": { + "@noble/hashes": "^1.3.1", + "@polkadot-api/utils": "0.1.0", + "@scure/base": "^1.1.1", + "scale-ts": "^1.6.0" + } + }, + "node_modules/@substrate/light-client-extension-helpers/node_modules/@polkadot-api/substrate-client": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/@polkadot-api/substrate-client/-/substrate-client-0.1.4.tgz", + "integrity": "sha512-MljrPobN0ZWTpn++da9vOvt+Ex+NlqTlr/XT7zi9sqPtDJiQcYl+d29hFAgpaeTqbeQKZwz3WDE9xcEfLE8c5A==", + "license": "MIT", + "optional": true, + "dependencies": { + "@polkadot-api/json-rpc-provider": "0.0.1", + "@polkadot-api/utils": "0.1.0" + } + }, + "node_modules/@substrate/light-client-extension-helpers/node_modules/@polkadot-api/utils": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/@polkadot-api/utils/-/utils-0.1.0.tgz", + "integrity": "sha512-MXzWZeuGxKizPx2Xf/47wx9sr/uxKw39bVJUptTJdsaQn/TGq+z310mHzf1RCGvC1diHM8f593KrnDgc9oNbJA==", + "license": "MIT", + "optional": true + }, + "node_modules/@substrate/ss58-registry": { + "version": "1.51.0", + "resolved": "https://registry.npmjs.org/@substrate/ss58-registry/-/ss58-registry-1.51.0.tgz", + "integrity": "sha512-TWDurLiPxndFgKjVavCniytBIw+t4ViOi7TYp9h/D0NMmkEc9klFTo+827eyEJ0lELpqO207Ey7uGxUa+BS1jQ==", + "license": "Apache-2.0" + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", + "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/bn.js": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-5.1.6.tgz", + "integrity": "sha512-Xh8vSwUeMKeYYrj3cX4lGQgFSF/N03r+tv4AiLl1SucqV+uTQpxRcnM8AkXKHwYP9ZPXOYXRr2KPXpVlIvqh9w==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/bun": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@types/bun/-/bun-1.2.3.tgz", + "integrity": "sha512-054h79ipETRfjtsCW9qJK8Ipof67Pw9bodFWmkfkaUaRiIQ1dIV2VTlheshlBx3mpKr0KeK8VqnMMCtgN9rQtw==", + "dev": true, + "license": "MIT", + "dependencies": { + "bun-types": "1.2.3" + } + }, + "node_modules/@types/chai": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.0.1.tgz", + "integrity": "sha512-5T8ajsg3M/FOncpLYW7sdOcD6yf4+722sze/tc4KQV0P8Z2rAr3SAuHCIkYmYpt8VbcQlnz8SxlOlPQYefe4cA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", + "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", + "license": "MIT" + }, + "node_modules/@types/mocha": { + "version": "10.0.10", + "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-10.0.10.tgz", + "integrity": "sha512-xPyYSz1cMPnJQhl0CLMH68j3gprKZaTjG3s5Vi+fDgx+uhG9NOXwbVt52eFS8ECyXhyKcjDLCBEqBExKuiZb7Q==", + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.14.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.14.1.tgz", + "integrity": "sha512-u0HuPQwe/dHrItgHHpmw3N2fYCR6x4ivMNbPHRkBVP4CvN+kiRrKHWk3i8tXiO/joPwXLMYvF9TTF0eqgHIuOw==", + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/normalize-package-data": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz", + "integrity": "sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==", + "license": "MIT" + }, + "node_modules/@types/ws": { + "version": "8.5.14", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.14.tgz", + "integrity": "sha512-bd/YFLW+URhBzMXurx7lWByOu+xzU9+kb3RboOteXYDfW+tr+JZa99OyNmPINEGB/ahzKrEuc8rcv4gnpJmxTw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/abitype": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/abitype/-/abitype-1.0.8.tgz", + "integrity": "sha512-ZeiI6h3GnW06uYDLx0etQtX/p8E24UaHHBj57RSjK7YBFe7iuVn07EDpOeP451D06sF27VOz9JJPlIKJmXgkEg==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/wevm" + }, + "peerDependencies": { + "typescript": ">=5.0.4", + "zod": "^3 >=3.22.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + }, + "zod": { + "optional": true + } + } + }, + "node_modules/acorn": { + "version": "8.14.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", + "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/aes-js": { + "version": "4.0.0-beta.5", + "resolved": "https://registry.npmjs.org/aes-js/-/aes-js-4.0.0-beta.5.tgz", + "integrity": "sha512-G965FqalsNyrPqgEGON7nIx1e/OVENSgiEIzyC63haUMuvNnwIgIjMs52hlTCKhkBny7A2ORNlfY9Zu+jmGk1Q==", + "license": "MIT" + }, + "node_modules/ansi-colors": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz", + "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true, + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, + "node_modules/assert": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/assert/-/assert-2.1.0.tgz", + "integrity": "sha512-eLHpSK/Y4nhMJ07gDaAzoX/XAKS8PSaojml3M0DM4JpV1LAi5JOJ/p6H/XWrl8L+DzVEvVCW1z3vWAaB9oTsQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "is-nan": "^1.3.2", + "object-is": "^1.1.5", + "object.assign": "^4.1.4", + "util": "^0.12.5" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/bn.js": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.1.tgz", + "integrity": "sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==", + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "license": "ISC" + }, + "node_modules/bun-types": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/bun-types/-/bun-types-1.2.3.tgz", + "integrity": "sha512-P7AeyTseLKAvgaZqQrvp3RqFM3yN9PlcLuSTe7SoJOfZkER73mLdT2vEQi8U64S1YvM/ldcNiQjn0Sn7H9lGgg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "@types/ws": "~8.5.10" + } + }, + "node_modules/bundle-require": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/bundle-require/-/bundle-require-5.1.0.tgz", + "integrity": "sha512-3WrrOuZiyaaZPWiEt4G3+IffISVC9HYlWueJEBWED4ZH4aIAC2PnkdnuRrR94M+w6yGWn4AglWtJtBI8YqvgoA==", + "license": "MIT", + "dependencies": { + "load-tsconfig": "^0.2.3" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "peerDependencies": { + "esbuild": ">=0.18" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.3.tgz", + "integrity": "sha512-YTd+6wGlNlPxSuri7Y6X8tY2dmm12UMH66RpKMhiX6rsk5wXXnYgbUcOt8kiS31/AjfoTOvCsE+w8nZQLQnzHA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/chai": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.2.0.tgz", + "integrity": "sha512-mCuXncKXk5iCLhfhwTc0izo0gtEmpz5CtG2y8GiOINBlMVS6v8TMRc5TaLWKS6692m9+dVVfzgeVxR5UxWHTYw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/chalk": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.4.1.tgz", + "integrity": "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==", + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/cli-cursor": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz", + "integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==", + "license": "MIT", + "dependencies": { + "restore-cursor": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT" + }, + "node_modules/commander": { + "version": "13.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-13.1.0.tgz", + "integrity": "sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==", + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/consola": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.2.tgz", + "integrity": "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==", + "license": "MIT", + "engines": { + "node": "^14.18.0 || >=16.10.0" + } + }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/crypto/-/crypto-1.0.1.tgz", + "integrity": "sha512-VxBKmeNcqQdiUQUW2Tzq0t377b54N2bMtXO/qiLa+6eRRmmC4qT3D4OnTGoT/U6O9aklQ/jTwbOtRMTTY8G0Ig==", + "license": "ISC" + }, + "node_modules/data-uri-to-buffer": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", + "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/deepmerge-ts": { + "version": "7.1.5", + "resolved": "https://registry.npmjs.org/deepmerge-ts/-/deepmerge-ts-7.1.5.tgz", + "integrity": "sha512-HOJkrhaYsweh+W+e74Yn7YStZOilkoPb6fycpwNLKzSPtruFs48nYis0zy5yJz1+ktUhHxoRDJ27RQAWLIJVJw==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/detect-indent": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-7.0.1.tgz", + "integrity": "sha512-Mc7QhQ8s+cLrnUfU/Ji94vG/r8M26m8f++vyres4ZoojaRDpZ1eSIh/EpzLNwlWuvzSZ3UbDFspjFvTDXe6e/g==", + "license": "MIT", + "engines": { + "node": ">=12.20" + } + }, + "node_modules/diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/dotenv": { + "version": "16.4.7", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", + "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "license": "MIT" + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.2.tgz", + "integrity": "sha512-16854zccKPnC+toMywC+uKNeYSv+/eXkevRAfwRD/G9Cleq66m8XFIrigkbvauLLlCfDL45Q2cWegSg53gGBnQ==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.2", + "@esbuild/android-arm": "0.25.2", + "@esbuild/android-arm64": "0.25.2", + "@esbuild/android-x64": "0.25.2", + "@esbuild/darwin-arm64": "0.25.2", + "@esbuild/darwin-x64": "0.25.2", + "@esbuild/freebsd-arm64": "0.25.2", + "@esbuild/freebsd-x64": "0.25.2", + "@esbuild/linux-arm": "0.25.2", + "@esbuild/linux-arm64": "0.25.2", + "@esbuild/linux-ia32": "0.25.2", + "@esbuild/linux-loong64": "0.25.2", + "@esbuild/linux-mips64el": "0.25.2", + "@esbuild/linux-ppc64": "0.25.2", + "@esbuild/linux-riscv64": "0.25.2", + "@esbuild/linux-s390x": "0.25.2", + "@esbuild/linux-x64": "0.25.2", + "@esbuild/netbsd-arm64": "0.25.2", + "@esbuild/netbsd-x64": "0.25.2", + "@esbuild/openbsd-arm64": "0.25.2", + "@esbuild/openbsd-x64": "0.25.2", + "@esbuild/sunos-x64": "0.25.2", + "@esbuild/win32-arm64": "0.25.2", + "@esbuild/win32-ia32": "0.25.2", + "@esbuild/win32-x64": "0.25.2" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ethers": { + "version": "6.13.5", + "resolved": "https://registry.npmjs.org/ethers/-/ethers-6.13.5.tgz", + "integrity": "sha512-+knKNieu5EKRThQJWwqaJ10a6HE9sSehGeqWN65//wE7j47ZpFhKAnHB/JJFibwwg61I/koxaPsXbXpD/skNOQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/ethers-io/" + }, + { + "type": "individual", + "url": "https://www.buymeacoffee.com/ricmoo" + } + ], + "license": "MIT", + "dependencies": { + "@adraffy/ens-normalize": "1.10.1", + "@noble/curves": "1.2.0", + "@noble/hashes": "1.3.2", + "@types/node": "22.7.5", + "aes-js": "4.0.0-beta.5", + "tslib": "2.7.0", + "ws": "8.17.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/ethers/node_modules/@noble/curves": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@noble/curves/-/curves-1.2.0.tgz", + "integrity": "sha512-oYclrNgRaM9SsBUBVbb8M6DTV7ZHRTKugureoYEncY5c65HOmRzvSiTE3y5CYaPYJA/GVkrhXEoF0M3Ya9PMnw==", + "license": "MIT", + "dependencies": { + "@noble/hashes": "1.3.2" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/ethers/node_modules/@noble/hashes": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.3.2.tgz", + "integrity": "sha512-MVC8EAQp7MvEcm30KWENFjgR+Mkmf+D189XJTkFIlwohU5hcBbn1ZkKq7KVTi2Hme3PMGF390DaL52beVrIihQ==", + "license": "MIT", + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/ethers/node_modules/@types/node": { + "version": "22.7.5", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.7.5.tgz", + "integrity": "sha512-jML7s2NAzMWc//QSJ1a3prpk78cOPchGvXJsC3C6R6PSMoooztvRVQEz89gmBTBY1SPMaqo5teB4uNHPdetShQ==", + "license": "MIT", + "dependencies": { + "undici-types": "~6.19.2" + } + }, + "node_modules/ethers/node_modules/tslib": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.7.0.tgz", + "integrity": "sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==", + "license": "0BSD" + }, + "node_modules/ethers/node_modules/undici-types": { + "version": "6.19.8", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz", + "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==", + "license": "MIT" + }, + "node_modules/ethers/node_modules/ws": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/eventemitter3": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz", + "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==", + "license": "MIT" + }, + "node_modules/execa": { + "version": "9.5.2", + "resolved": "https://registry.npmjs.org/execa/-/execa-9.5.2.tgz", + "integrity": "sha512-EHlpxMCpHWSAh1dgS6bVeoLAXGnJNdR93aabr4QCGbzOM73o5XmRfM/e5FUqsw3aagP8S8XEWUWFAxnRBnAF0Q==", + "license": "MIT", + "dependencies": { + "@sindresorhus/merge-streams": "^4.0.0", + "cross-spawn": "^7.0.3", + "figures": "^6.1.0", + "get-stream": "^9.0.0", + "human-signals": "^8.0.0", + "is-plain-obj": "^4.1.0", + "is-stream": "^4.0.1", + "npm-run-path": "^6.0.0", + "pretty-ms": "^9.0.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^4.0.0", + "yoctocolors": "^2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.5.0" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/fetch-blob": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz", + "integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "paypal", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "dependencies": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + }, + "engines": { + "node": "^12.20 || >= 14.13" + } + }, + "node_modules/figures": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-6.1.0.tgz", + "integrity": "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg==", + "license": "MIT", + "dependencies": { + "is-unicode-supported": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "license": "BSD-3-Clause", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/for-each": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/formdata-polyfill": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", + "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", + "license": "MIT", + "dependencies": { + "fetch-blob": "^3.1.2" + }, + "engines": { + "node": ">=12.20.0" + } + }, + "node_modules/fs.promises.exists": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/fs.promises.exists/-/fs.promises.exists-1.1.4.tgz", + "integrity": "sha512-lJzUGWbZn8vhGWBedA+RYjB/BeJ+3458ljUfmplqhIeb6ewzTFWNPCR1HCiYCkXV9zxcHz9zXkJzMsEgDLzh3Q==", + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/fs.promises.exists?sponsor=1" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-east-asian-width": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.3.0.tgz", + "integrity": "sha512-vpeMIQKxczTD/0s2CdEWHcb0eeJe6TFjxb+J5xgX7hScxqrGuyjmv4c1D4A/gelKfyox0gJJwIHF+fLjeaM8kQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-9.0.1.tgz", + "integrity": "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA==", + "license": "MIT", + "dependencies": { + "@sec-ant/readable-stream": "^0.4.1", + "is-stream": "^4.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/hosted-git-info": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-7.0.2.tgz", + "integrity": "sha512-puUZAUKT5m8Zzvs72XWy3HtvVbTWljRE66cP60bxJzAqf2DgICo7lYTY2IHUmLnNpjYvw5bvmoHvPc0QO2a62w==", + "license": "ISC", + "dependencies": { + "lru-cache": "^10.0.1" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/human-signals": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-8.0.1.tgz", + "integrity": "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ==", + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/index-to-position": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/index-to-position/-/index-to-position-1.1.0.tgz", + "integrity": "sha512-XPdx9Dq4t9Qk1mTMbWONJqU7boCoumEH7fRET37HX5+khDUl3J2W6PdALxhILYlIYx2amlwYcRPp28p0tSiojg==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-arguments": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.2.0.tgz", + "integrity": "sha512-7bVbi0huj/wrIAOzb8U1aszg9kdi3KN/CyU19CTI7tAoZYEZoL9yCDXpbXN+uPsuWnP02cyug1gleqq+TU+YCA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-function": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.0.tgz", + "integrity": "sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "get-proto": "^1.0.0", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-interactive": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-2.0.0.tgz", + "integrity": "sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-nan": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/is-nan/-/is-nan-1.3.2.tgz", + "integrity": "sha512-E+zBKpQ2t6MEo1VsonYmluk9NxGrbzpeeLC2xIViuO2EjU2xsXsBPwTr3Ykv9l08UYEVEdWeRZNouaZqF6RN0w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-regex": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-stream": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-4.0.1.tgz", + "integrity": "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-unicode-supported": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-2.1.0.tgz", + "integrity": "sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "license": "ISC" + }, + "node_modules/isows": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/isows/-/isows-1.0.6.tgz", + "integrity": "sha512-lPHCayd40oW98/I0uvgaHKWCSvkzY27LjWLbtzOm64yQ+G3Q5npjjbdppU65iZXkK1Zt+kH9pfegli0AYfwYYw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/wevm" + } + ], + "license": "MIT", + "peerDependencies": { + "ws": "*" + } + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/joycon": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/joycon/-/joycon-3.1.1.tgz", + "integrity": "sha512-34wB/Y7MW7bzjKRjUKTa46I2Z7eV62Rkhva+KkopW7Qvv/OSWBqvkSY7vusOPrNuZcUG3tApvdVgNB8POj3SPw==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==", + "license": "ISC" + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "license": "MIT" + }, + "node_modules/load-tsconfig": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/load-tsconfig/-/load-tsconfig-0.2.5.tgz", + "integrity": "sha512-IXO6OCs9yg8tMKzfPZ1YmheJbZCiEsnBdcB03l0OcfK9prKnJb96siuHCr5Fl37/yo9DnKU+TLpxzTUspw9shg==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.sortby": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", + "integrity": "sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==", + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-symbols/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/log-symbols/node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-symbols/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/loupe": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.3.tgz", + "integrity": "sha512-kkIp7XSkP78ZxJEsSxW3712C6teJVoeHHwgo9zJ380de7IYyJ2ISlxojcH2pC5OFLewESmnRi/+XCDIEEVyoug==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "license": "ISC" + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true, + "license": "ISC" + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/micro-sr25519": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/micro-sr25519/-/micro-sr25519-0.1.0.tgz", + "integrity": "sha512-at5zfxiKNhh07v4GPb8Sc6wCW+jd18FMMgPM0ACIQMcgvMfB9a34mfOlXr5B04J4yFZ6imlvJfRaFbOxMA7ytw==", + "license": "MIT", + "dependencies": { + "@noble/curves": "~1.7.0", + "@noble/hashes": "~1.6.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/micro-sr25519/node_modules/@noble/curves": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@noble/curves/-/curves-1.7.0.tgz", + "integrity": "sha512-UTMhXK9SeDhFJVrHeUJ5uZlI6ajXg10O6Ddocf9S6GjbSBVZsJo88HzKwXznNfGpMTRDyJkqMjNDPYgf0qFWnw==", + "license": "MIT", + "dependencies": { + "@noble/hashes": "1.6.0" + }, + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/micro-sr25519/node_modules/@noble/curves/node_modules/@noble/hashes": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.6.0.tgz", + "integrity": "sha512-YUULf0Uk4/mAA89w+k3+yUYh6NrEvxZa5T6SY3wlMvE2chHkxFUUIDI8/XW1QSC357iA5pSnqt7XEhvFOqmDyQ==", + "license": "MIT", + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/micro-sr25519/node_modules/@noble/hashes": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.6.1.tgz", + "integrity": "sha512-pq5D8h10hHBjyqX+cfBm0i8JUXJ0UhczFc4r74zbuT9XgewFo2E3J1cOaGtdZynILNmQ685YWGzGE1Zv6io50w==", + "license": "MIT", + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/mimic-function": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/mimic-function/-/mimic-function-5.0.1.tgz", + "integrity": "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/mocha": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-11.1.0.tgz", + "integrity": "sha512-8uJR5RTC2NgpY3GrYcgpZrsEd9zKbPDpob1RezyR2upGHRQtHWofmzTMzTMSV6dru3tj5Ukt0+Vnq1qhFEEwAg==", + "license": "MIT", + "dependencies": { + "ansi-colors": "^4.1.3", + "browser-stdout": "^1.3.1", + "chokidar": "^3.5.3", + "debug": "^4.3.5", + "diff": "^5.2.0", + "escape-string-regexp": "^4.0.0", + "find-up": "^5.0.0", + "glob": "^10.4.5", + "he": "^1.2.0", + "js-yaml": "^4.1.0", + "log-symbols": "^4.1.0", + "minimatch": "^5.1.6", + "ms": "^2.1.3", + "serialize-javascript": "^6.0.2", + "strip-json-comments": "^3.1.1", + "supports-color": "^8.1.1", + "workerpool": "^6.5.1", + "yargs": "^17.7.2", + "yargs-parser": "^21.1.1", + "yargs-unparser": "^2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/mocha/node_modules/diff": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/mock-socket": { + "version": "9.3.1", + "resolved": "https://registry.npmjs.org/mock-socket/-/mock-socket-9.3.1.tgz", + "integrity": "sha512-qxBgB7Qa2sEQgHFjj0dSigq7fX4k6Saisd5Nelwp2q8mlbAFh5dHV9JTTlF8viYJLSSWgMCZFUom8PJcMNBoJw==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.8", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", + "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==", + "devOptional": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/nock": { + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.5.6.tgz", + "integrity": "sha512-o2zOYiCpzRqSzPj0Zt/dQ/DqZeYoaQ7TUonc/xUPjCGl9WeHpNbxgVvOquXYAaJzI0M9BXV3HTzG0p8IUAbBTQ==", + "license": "MIT", + "dependencies": { + "debug": "^4.1.0", + "json-stringify-safe": "^5.0.1", + "propagate": "^2.0.0" + }, + "engines": { + "node": ">= 10.13" + } + }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", + "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", + "license": "MIT", + "dependencies": { + "data-uri-to-buffer": "^4.0.0", + "fetch-blob": "^3.1.4", + "formdata-polyfill": "^4.0.10" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/node-fetch" + } + }, + "node_modules/normalize-package-data": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-6.0.2.tgz", + "integrity": "sha512-V6gygoYb/5EmNI+MEGrWkC+e6+Rr7mTmfHrxDbLzxQogBkgzo76rkok0Am6thgSF7Mv2nLOajAJj5vDJZEFn7g==", + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^7.0.0", + "semver": "^7.3.5", + "validate-npm-package-license": "^3.0.4" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-6.0.0.tgz", + "integrity": "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==", + "license": "MIT", + "dependencies": { + "path-key": "^4.0.0", + "unicorn-magic": "^0.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-is": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.6.tgz", + "integrity": "sha512-F8cZ+KfGlSGi09lJT7/Nd6KJZ9ygtvYC0/UYYLI9nmQKLMnydpB9yvbv9K1uSkEu7FU9vYPmVwLg328tX+ot3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz", + "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/onetime": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz", + "integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==", + "license": "MIT", + "dependencies": { + "mimic-function": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/ora/-/ora-8.2.0.tgz", + "integrity": "sha512-weP+BZ8MVNnlCm8c0Qdc1WSWq4Qn7I+9CJGm7Qali6g44e/PUzbjNqJX5NJ9ljlNMosfJvg1fKEGILklK9cwnw==", + "license": "MIT", + "dependencies": { + "chalk": "^5.3.0", + "cli-cursor": "^5.0.0", + "cli-spinners": "^2.9.2", + "is-interactive": "^2.0.0", + "is-unicode-supported": "^2.0.0", + "log-symbols": "^6.0.0", + "stdin-discarder": "^0.2.2", + "string-width": "^7.2.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ora/node_modules/emoji-regex": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.4.0.tgz", + "integrity": "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==", + "license": "MIT" + }, + "node_modules/ora/node_modules/log-symbols": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-6.0.0.tgz", + "integrity": "sha512-i24m8rpwhmPIS4zscNzK6MSEhk0DUWa/8iYQWxhffV8jkI4Phvs3F+quL5xvS0gdQR0FyTCMMH33Y78dDTzzIw==", + "license": "MIT", + "dependencies": { + "chalk": "^5.3.0", + "is-unicode-supported": "^1.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/log-symbols/node_modules/is-unicode-supported": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-1.3.0.tgz", + "integrity": "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/ox": { + "version": "0.6.7", + "resolved": "https://registry.npmjs.org/ox/-/ox-0.6.7.tgz", + "integrity": "sha512-17Gk/eFsFRAZ80p5eKqv89a57uXjd3NgIf1CaXojATPBuujVc/fQSVhBeAU9JCRB+k7J50WQAyWTxK19T9GgbA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/wevm" + } + ], + "license": "MIT", + "dependencies": { + "@adraffy/ens-normalize": "^1.10.1", + "@noble/curves": "^1.6.0", + "@noble/hashes": "^1.5.0", + "@scure/bip32": "^1.5.0", + "@scure/bip39": "^1.4.0", + "abitype": "^1.0.6", + "eventemitter3": "5.0.1" + }, + "peerDependencies": { + "typescript": ">=5.4.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/ox/node_modules/@adraffy/ens-normalize": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/@adraffy/ens-normalize/-/ens-normalize-1.11.0.tgz", + "integrity": "sha512-/3DDPKHqqIqxUULp8yP4zODUY1i+2xvVWsv8A79xGWdCAG+8sb0hRh0Rk2QyOJUnnbyPUAZYcpBuRe3nS2OIUg==", + "license": "MIT" + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "license": "BlueOak-1.0.0" + }, + "node_modules/parse-json": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-8.3.0.tgz", + "integrity": "sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.26.2", + "index-to-position": "^1.1.0", + "type-fest": "^4.39.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-ms": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-ms/-/parse-ms-4.0.0.tgz", + "integrity": "sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/pathval": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz", + "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/polkadot-api": { + "version": "1.9.12", + "resolved": "https://registry.npmjs.org/polkadot-api/-/polkadot-api-1.9.12.tgz", + "integrity": "sha512-gYhpef5YnLEPZ3Uxeha5sHIIejINONSGBXTgFyEWsYi4y2DEUlv2ISlNZ9/0AGG6b6ZFDd56mLop/Fohl8vA4Q==", + "license": "MIT", + "dependencies": { + "@polkadot-api/cli": "0.11.9", + "@polkadot-api/ink-contracts": "0.2.6", + "@polkadot-api/json-rpc-provider": "0.0.4", + "@polkadot-api/known-chains": "0.7.3", + "@polkadot-api/logs-provider": "0.0.6", + "@polkadot-api/metadata-builders": "0.10.2", + "@polkadot-api/metadata-compatibility": "0.2.0", + "@polkadot-api/observable-client": "0.8.6", + "@polkadot-api/pjs-signer": "0.6.5", + "@polkadot-api/polkadot-sdk-compat": "2.3.2", + "@polkadot-api/polkadot-signer": "0.1.6", + "@polkadot-api/signer": "0.1.15", + "@polkadot-api/sm-provider": "0.1.7", + "@polkadot-api/smoldot": "0.3.8", + "@polkadot-api/substrate-bindings": "0.11.1", + "@polkadot-api/substrate-client": "0.3.0", + "@polkadot-api/utils": "0.1.2", + "@polkadot-api/ws-provider": "0.4.0", + "@rx-state/core": "^0.1.4" + }, + "bin": { + "papi": "bin/cli.mjs", + "polkadot-api": "bin/cli.mjs" + }, + "peerDependencies": { + "rxjs": ">=7.8.0" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", + "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/postcss": { + "version": "8.5.3", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz", + "integrity": "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==", + "devOptional": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.8", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/prettier": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.2.tgz", + "integrity": "sha512-lc6npv5PH7hVqozBR7lkBNOGXV9vMwROAPlumdBkX0wTbbzPu/U1hk5yL8p2pt4Xoc+2mkT8t/sow2YrV/M5qg==", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/pretty-ms": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/pretty-ms/-/pretty-ms-9.2.0.tgz", + "integrity": "sha512-4yf0QO/sllf/1zbZWYnvWw3NxCQwLXKzIj0G849LSufP15BXKM0rbD2Z3wVnkMfjdn/CB0Dpp444gYAACdsplg==", + "license": "MIT", + "dependencies": { + "parse-ms": "^4.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/propagate": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/propagate/-/propagate-2.0.1.tgz", + "integrity": "sha512-vGrhOavPSTz4QVNuBNdcNXePNdNMaO1xj9yBeH1ScQPjk/rhg9sSlCXPhMkFuaNNW/syTvYqsnbIJxMBfRbbag==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/read-pkg": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-9.0.1.tgz", + "integrity": "sha512-9viLL4/n1BJUCT1NXVTdS1jtm80yDEgR5T4yCelII49Mbj0v1rZdKqj7zCiYdbB0CuCgdrvHcNogAKTFPBocFA==", + "license": "MIT", + "dependencies": { + "@types/normalize-package-data": "^2.4.3", + "normalize-package-data": "^6.0.0", + "parse-json": "^8.0.0", + "type-fest": "^4.6.0", + "unicorn-magic": "^0.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/read-pkg/node_modules/unicorn-magic": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.1.0.tgz", + "integrity": "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/restore-cursor": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz", + "integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==", + "license": "MIT", + "dependencies": { + "onetime": "^7.0.0", + "signal-exit": "^4.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/rollup": { + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.34.8.tgz", + "integrity": "sha512-489gTVMzAYdiZHFVA/ig/iYFllCcWFHMvUHI1rpFmkoUtRlQxqh6/yiNqnYibjMZ2b/+FUQwldG+aLsEt6bglQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.6" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.34.8", + "@rollup/rollup-android-arm64": "4.34.8", + "@rollup/rollup-darwin-arm64": "4.34.8", + "@rollup/rollup-darwin-x64": "4.34.8", + "@rollup/rollup-freebsd-arm64": "4.34.8", + "@rollup/rollup-freebsd-x64": "4.34.8", + "@rollup/rollup-linux-arm-gnueabihf": "4.34.8", + "@rollup/rollup-linux-arm-musleabihf": "4.34.8", + "@rollup/rollup-linux-arm64-gnu": "4.34.8", + "@rollup/rollup-linux-arm64-musl": "4.34.8", + "@rollup/rollup-linux-loongarch64-gnu": "4.34.8", + "@rollup/rollup-linux-powerpc64le-gnu": "4.34.8", + "@rollup/rollup-linux-riscv64-gnu": "4.34.8", + "@rollup/rollup-linux-s390x-gnu": "4.34.8", + "@rollup/rollup-linux-x64-gnu": "4.34.8", + "@rollup/rollup-linux-x64-musl": "4.34.8", + "@rollup/rollup-win32-arm64-msvc": "4.34.8", + "@rollup/rollup-win32-ia32-msvc": "4.34.8", + "@rollup/rollup-win32-x64-msvc": "4.34.8", + "fsevents": "~2.3.2" + } + }, + "node_modules/rxjs": { + "version": "7.8.2", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", + "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safe-regex-test": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-regex": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/scale-ts": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/scale-ts/-/scale-ts-1.6.1.tgz", + "integrity": "sha512-PBMc2AWc6wSEqJYBDPcyCLUj9/tMKnLX70jLOSndMtcUoLQucP/DM0vnQo1wJAYjTrQiq8iG9rD0q6wFzgjH7g==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "license": "BSD-3-Clause", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/smoldot": { + "version": "2.0.26", + "resolved": "https://registry.npmjs.org/smoldot/-/smoldot-2.0.26.tgz", + "integrity": "sha512-F+qYmH4z2s2FK+CxGj8moYcd1ekSIKH8ywkdqlOz88Dat35iB1DIYL11aILN46YSGMzQW/lbJNS307zBSDN5Ig==", + "license": "GPL-3.0-or-later WITH Classpath-exception-2.0", + "optional": true, + "dependencies": { + "ws": "^8.8.1" + } + }, + "node_modules/sort-keys": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-5.1.0.tgz", + "integrity": "sha512-aSbHV0DaBcr7u0PVHXzM6NbZNAtrr9sF6+Qfs9UUVG7Ll3jQ6hHi8F/xqIIcn2rvIVbr0v/2zyjSdwSV47AgLQ==", + "license": "MIT", + "dependencies": { + "is-plain-obj": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/source-map": { + "version": "0.8.0-beta.0", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.8.0-beta.0.tgz", + "integrity": "sha512-2ymg6oRBpebeZi9UUNsgQ89bhx01TcTkmNTGnNO88imTmbSgy4nfujrgVEFKWpMTEGA11EDkTt7mqObTPdigIA==", + "license": "BSD-3-Clause", + "dependencies": { + "whatwg-url": "^7.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "devOptional": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/spdx-correct": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz", + "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", + "license": "Apache-2.0", + "dependencies": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-exceptions": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz", + "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==", + "license": "CC-BY-3.0" + }, + "node_modules/spdx-expression-parse": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", + "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "license": "MIT", + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-license-ids": { + "version": "3.0.21", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.21.tgz", + "integrity": "sha512-Bvg/8F5XephndSK3JffaRqdT+gyhfqIPwDHpX80tJrF8QQRYMo8sNMeaZ2Dp5+jhwKnUmIOyFFQfHRkjJm5nXg==", + "license": "CC0-1.0" + }, + "node_modules/stdin-discarder": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/stdin-discarder/-/stdin-discarder-0.2.2.tgz", + "integrity": "sha512-UhDfHmA92YAlNnCfhmq0VeNL5bDbiZGg7sZ2IvPsXubGkiNa9EC+tUTsjBRsYUAz87btI6/1wf4XoVvQ3uRnmQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-4.0.0.tgz", + "integrity": "sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/sucrase": { + "version": "3.35.0", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", + "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "^10.3.10", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/sucrase/node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.12", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.12.tgz", + "integrity": "sha512-qkf4trmKSIiMTs/E63cxH+ojC2unam7rJ0WrauAzpT3ECNTxGRMlaXxVbfxMUC/w0LaYk6jQ4y/nGR9uBO3tww==", + "license": "MIT", + "dependencies": { + "fdir": "^6.4.3", + "picomatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.4.3", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.3.tgz", + "integrity": "sha512-PMXmW2y1hDDfTSRc9gaXIuCCRpuoz3Kaz8cUelp3smouvfT632ozg2vrT6lJsHKKOF59YLbOGfAWGUcKEfRMQw==", + "license": "MIT", + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", + "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz", + "integrity": "sha512-dTpowEjclQ7Kgx5SdBkqRzVhERQXov8/l9Ft9dVM9fmg0W0KQSVaXX9T4i6twCPNtYiZM53lpSSUAwJbFPOHxA==", + "license": "MIT", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/tree-kill": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz", + "integrity": "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==", + "license": "MIT", + "bin": { + "tree-kill": "cli.js" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "license": "Apache-2.0" + }, + "node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/tsc-prog": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tsc-prog/-/tsc-prog-2.3.0.tgz", + "integrity": "sha512-ycET2d75EgcX7y8EmG4KiZkLAwUzbY4xRhA6NU0uVbHkY4ZjrAAuzTMxXI85kOwATqPnBI5C/7y7rlpY0xdqHA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "typescript": ">=4" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/tsup": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/tsup/-/tsup-8.4.0.tgz", + "integrity": "sha512-b+eZbPCjz10fRryaAA7C8xlIHnf8VnsaRqydheLIqwG/Mcpfk8Z5zp3HayX7GaTygkigHl5cBUs+IhcySiIexQ==", + "license": "MIT", + "dependencies": { + "bundle-require": "^5.1.0", + "cac": "^6.7.14", + "chokidar": "^4.0.3", + "consola": "^3.4.0", + "debug": "^4.4.0", + "esbuild": "^0.25.0", + "joycon": "^3.1.1", + "picocolors": "^1.1.1", + "postcss-load-config": "^6.0.1", + "resolve-from": "^5.0.0", + "rollup": "^4.34.8", + "source-map": "0.8.0-beta.0", + "sucrase": "^3.35.0", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.11", + "tree-kill": "^1.2.2" + }, + "bin": { + "tsup": "dist/cli-default.js", + "tsup-node": "dist/cli-node.js" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@microsoft/api-extractor": "^7.36.0", + "@swc/core": "^1", + "postcss": "^8.4.12", + "typescript": ">=4.5.0" + }, + "peerDependenciesMeta": { + "@microsoft/api-extractor": { + "optional": true + }, + "@swc/core": { + "optional": true + }, + "postcss": { + "optional": true + }, + "typescript": { + "optional": true + } + } + }, + "node_modules/tsup/node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/tsup/node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "license": "MIT", + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/type-fest": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.40.0.tgz", + "integrity": "sha512-ABHZ2/tS2JkvH1PEjxFDTUWC8dB5OsIGZP4IFLhR293GqT5Y5qB1WwL2kMPYhQW9DVgVD8Hd7I8gjwPIf5GFkw==", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.8.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", + "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "license": "MIT" + }, + "node_modules/unicorn-magic": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", + "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/util": { + "version": "0.12.5", + "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz", + "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "which-typed-array": "^1.1.2" + } + }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true, + "license": "MIT" + }, + "node_modules/validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "license": "Apache-2.0", + "dependencies": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "node_modules/viem": { + "version": "2.23.4", + "resolved": "https://registry.npmjs.org/viem/-/viem-2.23.4.tgz", + "integrity": "sha512-UQquuolKlS1w5H5e0Fd1KKoUlIPJryIEBzY5AUhGyV1ka+9O6+3uYVhUzj6RbvGK0PtsMKn2ddwPZFwjNDVU/A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/wevm" + } + ], + "license": "MIT", + "dependencies": { + "@noble/curves": "1.8.1", + "@noble/hashes": "1.7.1", + "@scure/bip32": "1.6.2", + "@scure/bip39": "1.5.4", + "abitype": "1.0.8", + "isows": "1.0.6", + "ox": "0.6.7", + "ws": "8.18.0" + }, + "peerDependencies": { + "typescript": ">=5.0.4" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/viem/node_modules/ws": { + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", + "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/vite": { + "version": "5.4.14", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.14.tgz", + "integrity": "sha512-EK5cY7Q1D8JNhSaPKVK4pwBFvaTmZxEnoKXLG/U9gmdDcihQGNzFlgIvaxezFR4glP1LsuiedwMBqCXH3wZccA==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/webidl-conversions": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz", + "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==", + "license": "BSD-2-Clause" + }, + "node_modules/whatwg-url": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz", + "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==", + "license": "MIT", + "dependencies": { + "lodash.sortby": "^4.7.0", + "tr46": "^1.0.1", + "webidl-conversions": "^4.0.2" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.18", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.18.tgz", + "integrity": "sha512-qEcY+KJYlWyLH9vNbsr6/5j59AXk5ni5aakf8ldzBvGde6Iz4sxZGkJyWSAueTG7QhOvNRYb1lDdFmL5Td0QKA==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/workerpool": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.5.1.tgz", + "integrity": "sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA==", + "license": "Apache-2.0" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/write-file-atomic": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-5.0.1.tgz", + "integrity": "sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw==", + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/write-json-file": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/write-json-file/-/write-json-file-6.0.0.tgz", + "integrity": "sha512-MNHcU3f9WxnNyR6MxsYSj64Jz0+dwIpisWKWq9gqLj/GwmA9INg3BZ3vt70/HB3GEwrnDQWr4RPrywnhNzmUFA==", + "license": "MIT", + "dependencies": { + "detect-indent": "^7.0.1", + "is-plain-obj": "^4.1.0", + "sort-keys": "^5.0.0", + "write-file-atomic": "^5.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/write-package": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/write-package/-/write-package-7.1.0.tgz", + "integrity": "sha512-DqUx8GI3r9BFWwU2DPKddL1E7xWfbFED82mLVhGXKlFEPe8IkBftzO7WfNwHtk7oGDHDeuH/o8VMpzzfMwmLUA==", + "license": "MIT", + "dependencies": { + "deepmerge-ts": "^7.1.0", + "read-pkg": "^9.0.1", + "sort-keys": "^5.0.0", + "type-fest": "^4.23.0", + "write-json-file": "^6.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ws": { + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.1.tgz", + "integrity": "sha512-RKW2aJZMXeMxVpnZ6bck+RswznaxmzdULiBr6KY7XkTnW8uvt0iT9H5DkHUChXrc+uurzwa0rVI16n/Xzjdz1w==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "license": "MIT", + "dependencies": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser/node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yoctocolors": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/yoctocolors/-/yoctocolors-2.1.1.tgz", + "integrity": "sha512-GQHQqAopRhwU8Kt1DDM8NjibDXHC8eoh1erhGAJPEyveY9qqVeXvVikNKrDz69sHowPMorbPUrH/mx8c50eiBQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/evm-tests/package.json b/evm-tests/package.json index a96a2c4a0c..0e90cdb976 100644 --- a/evm-tests/package.json +++ b/evm-tests/package.json @@ -1,28 +1,28 @@ { "scripts": { - "test": "mocha --timeout 999999 --require ts-node/register test/*test.ts" + "test": "mocha --timeout 999999 --retries 3 --file src/setup.ts --require ts-node/register test/*test.ts" }, "keywords": [], "author": "", "license": "ISC", "dependencies": { - "@polkadot-api/descriptors": "file:.papi/descriptors", "@polkadot-labs/hdkd": "^0.0.10", "@polkadot-labs/hdkd-helpers": "^0.0.11", "@polkadot/api": "15.1.1", + "@types/mocha": "^10.0.10", "crypto": "^1.0.1", "dotenv": "16.4.7", "ethers": "^6.13.5", + "mocha": "^11.1.0", "polkadot-api": "^1.9.5", + "scale-ts": "^1.6.1", "viem": "2.23.4" }, "devDependencies": { "@types/bun": "^1.1.13", "@types/chai": "^5.0.1", - "@types/mocha": "^10.0.10", "assert": "^2.1.0", "chai": "^5.2.0", - "mocha": "^11.1.0", "prettier": "^3.3.3", "ts-node": "^10.9.2", "typescript": "^5.7.2", diff --git a/evm-tests/run-ci.sh b/evm-tests/run-ci.sh new file mode 100755 index 0000000000..cd7acb14af --- /dev/null +++ b/evm-tests/run-ci.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +echo "start run-ci.sh" + +scripts/localnet.sh &>/dev/null & + +i=1 +while [ $i -le 1000 ]; do + if nc -z localhost 9944; then + echo "node subtensor is running after $i seconds" + break + fi + sleep 1 + i=$((i + 1)) +done + +# port not available exit with error +if [ "$i" -eq 1000 ]; then + exit 1 +fi + +cd evm-tests + +yarn + +bash get-metadata.sh + +sleep 5 + +yarn run test +TEST_EXIT_CODE=$? + +if [ $TEST_EXIT_CODE -ne 0 ]; then + echo "Tests failed with exit code $TEST_EXIT_CODE" + pkill node-subtensor + exit $TEST_EXIT_CODE +fi + +pkill node-subtensor + +exit 0 \ No newline at end of file diff --git a/evm-tests/src/contracts/staking.ts b/evm-tests/src/contracts/staking.ts index af4422ca96..0ba37c5a94 100644 --- a/evm-tests/src/contracts/staking.ts +++ b/evm-tests/src/contracts/staking.ts @@ -287,5 +287,71 @@ export const IStakingV2ABI = [ "outputs": [], "stateMutability": "nonpayable", "type": "function" - } + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "limit_price", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "allow_partial", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "addStakeLimit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "limit_price", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "allow_partial", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "removeStakeLimit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, ]; \ No newline at end of file diff --git a/evm-tests/src/contracts/subnet.ts b/evm-tests/src/contracts/subnet.ts index 9b6fe00596..eacdaf3aca 100644 --- a/evm-tests/src/contracts/subnet.ts +++ b/evm-tests/src/contracts/subnet.ts @@ -572,6 +572,43 @@ export const ISubnetABI = [ stateMutability: "view", type: "function", }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getYuma3Enabled", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "bool", + name: "yuma3Enabled", + type: "bool", + }, + ], + name: "setYuma3Enabled", + outputs: [], + stateMutability: "payable", + type: "function", + }, { inputs: [ { @@ -886,4 +923,4 @@ export const ISubnetABI = [ stateMutability: "payable", type: "function" }, -]; \ No newline at end of file +]; diff --git a/evm-tests/src/contracts/uidLookup.ts b/evm-tests/src/contracts/uidLookup.ts new file mode 100644 index 0000000000..06c68805e6 --- /dev/null +++ b/evm-tests/src/contracts/uidLookup.ts @@ -0,0 +1,45 @@ +export const IUID_LOOKUP_ADDRESS = "0x0000000000000000000000000000000000000806"; + +export const IUIDLookupABI = [ + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16" + }, + { + internalType: "address", + name: "evm_address", + type: "address" + }, + { + internalType: "uint16", + name: "limit", + type: "uint16" + } + ], + name: "uidLookup", + outputs: [ + { + components: [ + { + internalType: "uint16", + name: "uid", + type: "uint16" + }, + { + internalType: "uint64", + name: "block_associated", + type: "uint64" + } + ], + internalType: "struct LookupItem[]", + name: "", + type: "tuple[]" + } + ], + stateMutability: "view", + type: "function" + } +]; diff --git a/evm-tests/src/eth.ts b/evm-tests/src/eth.ts index ea3ebb9976..a34e33bc2d 100644 --- a/evm-tests/src/eth.ts +++ b/evm-tests/src/eth.ts @@ -13,5 +13,4 @@ export async function estimateTransactionCost(provider: Provider, tx: Transactio export function getContract(contractAddress: string, abi: {}[], wallet: Wallet) { const contract = new ethers.Contract(contractAddress, abi, wallet); return contract - -} \ No newline at end of file +} diff --git a/evm-tests/src/setup.ts b/evm-tests/src/setup.ts new file mode 100644 index 0000000000..1ef872cd5a --- /dev/null +++ b/evm-tests/src/setup.ts @@ -0,0 +1,19 @@ + +import { createClient, TypedApi, PolkadotClient, Binary } from 'polkadot-api'; +import { SUB_LOCAL_URL } from "./config" +import { getWsProvider } from 'polkadot-api/ws-provider/web'; + +let client: PolkadotClient | undefined = undefined + +export async function getClient() { + if (client === undefined) { + const provider = getWsProvider(SUB_LOCAL_URL); + client = createClient(provider); + } + return client; +} + +after(() => { + client?.destroy() +}); + diff --git a/evm-tests/src/substrate.ts b/evm-tests/src/substrate.ts index ddfdfb626d..bd6d725d48 100644 --- a/evm-tests/src/substrate.ts +++ b/evm-tests/src/substrate.ts @@ -9,22 +9,16 @@ import { getPolkadotSigner } from "polkadot-api/signer" import { randomBytes } from 'crypto'; import { Keyring } from '@polkadot/keyring'; import { SS58_PREFIX, TX_TIMEOUT } from "./config"; - +import { getClient } from "./setup" let api: TypedApi | undefined = undefined // define url string as type to extend in the future // export type ClientUrlType = 'ws://localhost:9944' | 'wss://test.finney.opentensor.ai:443' | 'wss://dev.chain.opentensor.ai:443' | 'wss://archive.chain.opentensor.ai'; export type ClientUrlType = 'ws://localhost:9944' -export async function getClient(url: ClientUrlType) { - const provider = getWsProvider(url); - const client = createClient(provider); - return client -} - export async function getDevnetApi() { if (api === undefined) { - let client = await getClient('ws://localhost:9944') + let client = await getClient() api = client.getTypedApi(devnet) } return api @@ -126,6 +120,29 @@ export function convertPublicKeyToMultiAddress(publicKey: Uint8Array, ss58Format return MultiAddress.Id(address); } +export async function waitForTransactionWithRetry( + api: TypedApi, + tx: Transaction<{}, string, string, void>, + signer: PolkadotSigner, + ) { + let success = false; + let retries = 0; + + // set max retries times + while (!success && retries < 5) { + await waitForTransactionCompletion(api, tx, signer) + .then(() => {success = true}) + .catch((error) => { + console.log(`transaction error ${error}`); + }); + await new Promise((resolve) => setTimeout(resolve, 1000)); + retries += 1; + } + + if (!success) { + console.log("Transaction failed after 5 retries"); + } + } export async function waitForTransactionCompletion(api: TypedApi, tx: Transaction<{}, string, string, void>, signer: PolkadotSigner,) { const transactionPromise = await getTransactionWatchPromise(tx, signer) @@ -156,6 +173,9 @@ export async function getTransactionWatchPromise(tx: Transaction<{}, string, str if (value.type === "finalized") { console.log("Transaction is finalized in block:", value.txHash); subscription.unsubscribe(); + if (!value.ok) { + console.log("Transaction threw an error:", value.dispatchError) + } // Resolve the promise when the transaction is finalized resolve(); diff --git a/evm-tests/src/subtensor.ts b/evm-tests/src/subtensor.ts index 48dc5c83c7..3111d90544 100644 --- a/evm-tests/src/subtensor.ts +++ b/evm-tests/src/subtensor.ts @@ -2,9 +2,10 @@ import * as assert from "assert"; import { devnet, MultiAddress } from '@polkadot-api/descriptors'; import { TypedApi, TxCallData } from 'polkadot-api'; import { KeyPair } from "@polkadot-labs/hdkd-helpers" -import { getAliceSigner, waitForTransactionCompletion, getSignerFromKeypair } from './substrate' +import { getAliceSigner, waitForTransactionCompletion, getSignerFromKeypair, waitForTransactionWithRetry } from './substrate' import { convertH160ToSS58, convertPublicKeyToSs58 } from './address-utils' import { tao } from './balance-math' +import internal from "stream"; // create a new subnet and return netuid export async function addNewSubnetwork(api: TypedApi, hotkey: KeyPair, coldkey: KeyPair) { @@ -15,18 +16,16 @@ export async function addNewSubnetwork(api: TypedApi, hotkey: Key if (rateLimit !== BigInt(0)) { const internalCall = api.tx.AdminUtils.sudo_set_network_rate_limit({ rate_limit: BigInt(0) }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) } const signer = getSignerFromKeypair(coldkey) const registerNetworkTx = api.tx.SubtensorModule.register_network({ hotkey: convertPublicKeyToSs58(hotkey.publicKey) }) - await waitForTransactionCompletion(api, registerNetworkTx, signer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, registerNetworkTx, signer) - assert.equal(totalNetworks + 1, await api.query.SubtensorModule.TotalNetworks.getValue()) + const newTotalNetworks = await api.query.SubtensorModule.TotalNetworks.getValue() + // could create multiple subnetworks during retry, just return the first created one + assert.ok(newTotalNetworks > totalNetworks) return totalNetworks } @@ -37,9 +36,7 @@ export async function forceSetBalanceToSs58Address(api: TypedApi, const internalCall = api.tx.Balances.force_set_balance({ who: MultiAddress.Id(ss58Address), new_free: balance }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) const balanceOnChain = (await api.query.System.Account.getValue(ss58Address)).data.free // check the balance except for sudo account becasue of tx fee @@ -64,9 +61,7 @@ export async function setCommitRevealWeightsEnabled(api: TypedApi const internalCall = api.tx.AdminUtils.sudo_set_commit_reveal_weights_enabled({ netuid: netuid, enabled: enabled }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) assert.equal(enabled, await api.query.SubtensorModule.CommitRevealWeightsEnabled.getValue(netuid)) } @@ -80,9 +75,7 @@ export async function setWeightsSetRateLimit(api: TypedApi, netui const internalCall = api.tx.AdminUtils.sudo_set_weights_set_rate_limit({ netuid: netuid, weights_set_rate_limit: rateLimit }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) assert.equal(rateLimit, await api.query.SubtensorModule.WeightsSetRateLimit.getValue(netuid)) } @@ -98,9 +91,7 @@ export async function setTempo(api: TypedApi, netuid: number, tem const internalCall = api.tx.AdminUtils.sudo_set_tempo({ netuid: netuid, tempo: tempo }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) assert.equal(tempo, await api.query.SubtensorModule.Tempo.getValue(netuid)) } @@ -114,9 +105,7 @@ export async function setCommitRevealWeightsInterval(api: TypedApi { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) assert.equal(interval, await api.query.SubtensorModule.RevealPeriodEpochs.getValue(netuid)) } @@ -131,9 +120,7 @@ export async function forceSetChainID(api: TypedApi, chainId: big const internalCall = api.tx.AdminUtils.sudo_set_evm_chain_id({ chain_id: chainId }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) assert.equal(chainId, await api.query.EVMChainId.ChainId.getValue()) } @@ -147,19 +134,23 @@ export async function disableWhiteListCheck(api: TypedApi, disabl const internalCall = api.tx.EVM.disable_whitelist({ disabled: disabled }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) assert.equal(disabled, await api.query.EVM.DisableWhitelistCheck.getValue()) } export async function burnedRegister(api: TypedApi, netuid: number, ss58Address: string, keypair: KeyPair) { + const registered = await api.query.SubtensorModule.Uids.getValue(netuid, ss58Address); + // just return if already registered + if (registered !== undefined) { + console.log("hotkey ", ss58Address, " already registered in netuid ", netuid) + return; + } + + await new Promise((resolve) => setTimeout(resolve, 1000)); const uids = await api.query.SubtensorModule.SubnetworkN.getValue(netuid) const signer = getSignerFromKeypair(keypair) const tx = api.tx.SubtensorModule.burned_register({ hotkey: ss58Address, netuid: netuid }) - await waitForTransactionCompletion(api, tx, signer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, signer) assert.equal(uids + 1, await api.query.SubtensorModule.SubnetworkN.getValue(netuid)) } @@ -171,9 +162,7 @@ export async function sendProxyCall(api: TypedApi, calldata: TxCa real: MultiAddress.Id(ss58Address), force_proxy_type: undefined }); - await waitForTransactionCompletion(api, tx, signer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, signer) } @@ -188,10 +177,7 @@ export async function setTxRateLimit(api: TypedApi, txRateLimit: const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - assert.equal(txRateLimit, await api.query.SubtensorModule.TxRateLimit.getValue()) + await waitForTransactionWithRetry(api, tx, alice) } export async function setMaxAllowedValidators(api: TypedApi, netuid: number, maxAllowedValidators: number) { @@ -208,9 +194,7 @@ export async function setMaxAllowedValidators(api: TypedApi, netu }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) assert.equal(maxAllowedValidators, await api.query.SubtensorModule.MaxAllowedValidators.getValue(netuid)) } @@ -227,9 +211,7 @@ export async function setSubnetOwnerCut(api: TypedApi, subnetOwne }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) assert.equal(subnetOwnerCut, await api.query.SubtensorModule.SubnetOwnerCut.getValue()) } @@ -247,9 +229,7 @@ export async function setActivityCutoff(api: TypedApi, netuid: nu }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) assert.equal(activityCutoff, await api.query.SubtensorModule.ActivityCutoff.getValue(netuid)) } @@ -267,9 +247,7 @@ export async function setMaxAllowedUids(api: TypedApi, netuid: nu }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) assert.equal(maxAllowedUids, await api.query.SubtensorModule.MaxAllowedUids.getValue(netuid)) } @@ -286,39 +264,32 @@ export async function setMinDelegateTake(api: TypedApi, minDelega }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, alice) assert.equal(minDelegateTake, await api.query.SubtensorModule.MinDelegateTake.getValue()) } export async function becomeDelegate(api: TypedApi, ss58Address: string, keypair: KeyPair) { - const singer = getSignerFromKeypair(keypair) + const signer = getSignerFromKeypair(keypair) const tx = api.tx.SubtensorModule.become_delegate({ hotkey: ss58Address }) - await waitForTransactionCompletion(api, tx, singer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, signer) } export async function addStake(api: TypedApi, netuid: number, ss58Address: string, amount_staked: bigint, keypair: KeyPair) { - const singer = getSignerFromKeypair(keypair) + const signer = getSignerFromKeypair(keypair) let tx = api.tx.SubtensorModule.add_stake({ netuid: netuid, hotkey: ss58Address, amount_staked: amount_staked }) - await waitForTransactionCompletion(api, tx, singer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - + await waitForTransactionWithRetry(api, tx, signer) } export async function setWeight(api: TypedApi, netuid: number, dests: number[], weights: number[], version_key: bigint, keypair: KeyPair) { - const singer = getSignerFromKeypair(keypair) + const signer = getSignerFromKeypair(keypair) let tx = api.tx.SubtensorModule.set_weights({ netuid: netuid, dests: dests, @@ -326,20 +297,50 @@ export async function setWeight(api: TypedApi, netuid: number, de version_key: version_key }) - await waitForTransactionCompletion(api, tx, singer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - + await waitForTransactionWithRetry(api, tx, signer) } export async function rootRegister(api: TypedApi, ss58Address: string, keypair: KeyPair) { - const singer = getSignerFromKeypair(keypair) + const signer = getSignerFromKeypair(keypair) let tx = api.tx.SubtensorModule.root_register({ hotkey: ss58Address }) - await waitForTransactionCompletion(api, tx, singer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, signer) +} + +export async function setSubtokenEnable(api: TypedApi, netuid: number, subtokenEnable: boolean) { + const signer = getAliceSigner() + let internalTx = api.tx.AdminUtils.sudo_set_subtoken_enabled({ + netuid: netuid, + subtoken_enabled: subtokenEnable + }) + let tx = api.tx.Sudo.sudo({ call: internalTx.decodedCall }) + + await waitForTransactionWithRetry(api, tx, signer) +} + +export async function startCall(api: TypedApi, netuid: number, keypair: KeyPair) { + const registerBlock = Number(await api.query.SubtensorModule.NetworkRegisteredAt.getValue(netuid)) + let currentBlock = await api.query.System.Number.getValue() + const duration = Number(await api.constants.SubtensorModule.DurationOfStartCall) + + while (currentBlock - registerBlock <= duration) { + await new Promise((resolve) => setTimeout(resolve, 2000)); + currentBlock = await api.query.System.Number.getValue() + } + // wait for chain to run coinbase + await new Promise((resolve) => setTimeout(resolve, 2000)); + + const signer = getSignerFromKeypair(keypair) + let tx = api.tx.SubtensorModule.start_call({ + netuid: netuid, + }) + + await waitForTransactionWithRetry(api, tx, signer) + await new Promise((resolve) => setTimeout(resolve, 1000)); + const callStarted = await api.query.SubtensorModule.FirstEmissionBlockNumber + .getValue(netuid); + assert.notEqual(callStarted, undefined); } \ No newline at end of file diff --git a/evm-tests/src/utils.ts b/evm-tests/src/utils.ts index 36e922b49e..1ba191d833 100644 --- a/evm-tests/src/utils.ts +++ b/evm-tests/src/utils.ts @@ -2,6 +2,8 @@ import { defineChain, http, publicActions, createPublicClient } from "viem" import { privateKeyToAccount, generatePrivateKey } from 'viem/accounts' import { ethers } from "ethers" import { ETH_LOCAL_URL } from "./config" +import { FixedSizeBinary } from "polkadot-api"; +import { hexToU8a } from "@polkadot/util"; export type ClientUrlType = 'http://localhost:9944'; @@ -52,4 +54,16 @@ export function generateRandomEthersWallet() { const wallet = new ethers.Wallet(account.privateKey, provider); return wallet; -} \ No newline at end of file +} + +export function convertToFixedSizeBinary(hexString: string, size: T): FixedSizeBinary { + // Convert hex string to a byte array + const byteArray = hexToU8a(hexString); + + // Ensure the byte array is exactly the specified size + if (byteArray.length !== size) { + throw new Error(`The provided string "${hexString}" does not convert to exactly ${size} bytes.`); + } + + return new FixedSizeBinary(byteArray); +} diff --git a/evm-tests/test/eth.chain-id.test.ts b/evm-tests/test/eth.chain-id.test.ts index 09174c1212..2e1c18d3d4 100644 --- a/evm-tests/test/eth.chain-id.test.ts +++ b/evm-tests/test/eth.chain-id.test.ts @@ -2,7 +2,7 @@ import * as assert from "assert"; import * as chai from "chai"; -import { getDevnetApi, waitForTransactionCompletion, getRandomSubstrateKeypair } from "../src/substrate" +import { getDevnetApi, waitForTransactionWithRetry, getRandomSubstrateKeypair } from "../src/substrate" import { generateRandomEthWallet, getPublicClient } from "../src/utils"; import { convertPublicKeyToSs58 } from "../src/address-utils" import { ETH_LOCAL_URL } from "../src/config"; @@ -64,9 +64,7 @@ describe("Test the EVM chain ID", () => { ) let tx = api.tx.AdminUtils.sudo_set_evm_chain_id({ chain_id: BigInt(100) }) - await waitForTransactionCompletion(api, tx, signer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, signer) // extrinsic should be failed and chain ID not updated. chainId = await ethClient.getChainId(); diff --git a/evm-tests/test/eth.incremental.deploy.test.ts b/evm-tests/test/eth.incremental.deploy.test.ts index c22187538d..49571b508a 100644 --- a/evm-tests/test/eth.incremental.deploy.test.ts +++ b/evm-tests/test/eth.incremental.deploy.test.ts @@ -14,7 +14,7 @@ import { toViemAddress } from "../src/address-utils"; import { ethers } from "ethers" import { disableWhiteListCheck, forceSetBalanceToEthAddress } from "../src/subtensor"; -describe("bridge token contract deployment", () => { +describe("incremental smart contract deployment", () => { // init eth part const wallet = generateRandomEthersWallet(); let publicClient: PublicClient; diff --git a/evm-tests/test/eth.substrate-transfer.test.ts b/evm-tests/test/eth.substrate-transfer.test.ts index 9e3a2b2050..f039120114 100644 --- a/evm-tests/test/eth.substrate-transfer.test.ts +++ b/evm-tests/test/eth.substrate-transfer.test.ts @@ -1,6 +1,6 @@ import * as assert from "assert"; -import { getDevnetApi, waitForTransactionCompletion, getRandomSubstrateSigner, } from "../src/substrate" +import { getDevnetApi, waitForTransactionCompletion, getRandomSubstrateSigner, waitForTransactionWithRetry} from "../src/substrate" import { getPublicClient } from "../src/utils"; import { ETH_LOCAL_URL, IBALANCETRANSFER_ADDRESS, IBalanceTransferABI } from "../src/config"; import { devnet, MultiAddress } from "@polkadot-api/descriptors" @@ -66,10 +66,7 @@ describe("Balance transfers between substrate and EVM", () => { const transferBalance = tao(1) const tx = api.tx.Balances.transfer_keep_alive({ value: transferBalance, dest: MultiAddress.Id(ss58Address) }) - await waitForTransactionCompletion(api, tx, signer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - + await waitForTransactionWithRetry(api, tx, signer) const senderBalanceAfterTransfer = (await api.query.System.Account.getValue(ss58Address)).data.free const receiverBalanceAfterTranser = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) @@ -112,9 +109,7 @@ describe("Balance transfers between substrate and EVM", () => { const tx = api.tx.EVM.withdraw({ address: ethAddresss, value: tao(1) }) const txFee = (await tx.getPaymentInfo(ss58Address)).partial_fee - await waitForTransactionCompletion(api, tx, signer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); + await waitForTransactionWithRetry(api, tx, signer) const senderBalanceAfterWithdraw = (await api.query.System.Account.getValue(ss58Address)).data.free @@ -155,10 +150,7 @@ describe("Balance transfers between substrate and EVM", () => { // txFee not accurate const txFee = (await tx.getPaymentInfo(ss58Address)).partial_fee - await waitForTransactionCompletion(api, tx, signer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - + await waitForTransactionWithRetry(api, tx, signer) const receiverBalanceAfterCall = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) assert.equal(receiverBalanceAfterCall, receiverBalance + raoToEth(tao(1))) diff --git a/evm-tests/test/metagraph.precompile.test.ts b/evm-tests/test/metagraph.precompile.test.ts index 94c0df8861..18ab4bd421 100644 --- a/evm-tests/test/metagraph.precompile.test.ts +++ b/evm-tests/test/metagraph.precompile.test.ts @@ -1,15 +1,15 @@ import * as assert from "assert"; -import { getAliceSigner, getClient, getDevnetApi, waitForTransactionCompletion, convertPublicKeyToMultiAddress, getRandomSubstrateKeypair, getSignerFromKeypair } from "../src/substrate" +import { getAliceSigner, getDevnetApi, convertPublicKeyToMultiAddress, getRandomSubstrateKeypair, getSignerFromKeypair, waitForTransactionWithRetry } from "../src/substrate" import { getPublicClient, } from "../src/utils"; -import { ETH_LOCAL_URL, SUB_LOCAL_URL, } from "../src/config"; +import { ETH_LOCAL_URL } from "../src/config"; import { devnet } from "@polkadot-api/descriptors" import { PublicClient } from "viem"; import { PolkadotSigner, TypedApi } from "polkadot-api"; import { toViemAddress, convertPublicKeyToSs58 } from "../src/address-utils" import { IMetagraphABI, IMETAGRAPH_ADDRESS } from "../src/contracts/metagraph" -describe("Test the EVM chain ID", () => { +describe("Test the Metagraph precompile", () => { // init substrate part const hotkey = getRandomSubstrateKeypair(); const coldkey = getRandomSubstrateKeypair(); @@ -26,7 +26,6 @@ describe("Test the EVM chain ID", () => { before(async () => { // init variables got from await and async publicClient = await getPublicClient(ETH_LOCAL_URL) - const subClient = await getClient(SUB_LOCAL_URL) api = await getDevnetApi() alice = await getAliceSigner(); @@ -35,7 +34,7 @@ describe("Test the EVM chain ID", () => { const internalCall = api.tx.Balances.force_set_balance({ who: multiAddress, new_free: BigInt(1e12) }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) + await waitForTransactionWithRetry(api, tx, alice) .then(() => { }) .catch((error) => { console.log(`transaction error ${error}`) }); } @@ -45,14 +44,14 @@ describe("Test the EVM chain ID", () => { const internalCall = api.tx.Balances.force_set_balance({ who: multiAddress, new_free: BigInt(1e12) }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - await waitForTransactionCompletion(api, tx, alice) + await waitForTransactionWithRetry(api, tx, alice) .then(() => { }) .catch((error) => { console.log(`transaction error ${error}`) }); } const signer = getSignerFromKeypair(coldkey) const registerNetworkTx = api.tx.SubtensorModule.register_network({ hotkey: convertPublicKeyToSs58(hotkey.publicKey) }) - await waitForTransactionCompletion(api, registerNetworkTx, signer) + await waitForTransactionWithRetry(api, registerNetworkTx, signer) .then(() => { }) .catch((error) => { console.log(`transaction error ${error}`) }); @@ -64,7 +63,7 @@ describe("Test the EVM chain ID", () => { await api.query.SubtensorModule.SubnetworkN.getValue(subnetId) if (uid_count === 0) { const tx = api.tx.SubtensorModule.burned_register({ hotkey: convertPublicKeyToSs58(hotkey.publicKey), netuid: subnetId }) - await waitForTransactionCompletion(api, tx, signer) + await waitForTransactionWithRetry(api, tx, signer) .then(() => { }) .catch((error) => { console.log(`transaction error ${error}`) }); } diff --git a/evm-tests/test/neuron.precompile.emission-check.test.ts b/evm-tests/test/neuron.precompile.emission-check.test.ts index ac609c1e27..e54cb1ec88 100644 --- a/evm-tests/test/neuron.precompile.emission-check.test.ts +++ b/evm-tests/test/neuron.precompile.emission-check.test.ts @@ -1,8 +1,8 @@ import * as assert from "assert"; -import { getAliceSigner, getClient, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" import { getPublicClient, } from "../src/utils"; -import { ETH_LOCAL_URL, SUB_LOCAL_URL, } from "../src/config"; +import { ETH_LOCAL_URL } from "../src/config"; import { devnet } from "@polkadot-api/descriptors" import { PublicClient } from "viem"; import { PolkadotSigner, TypedApi } from "polkadot-api"; @@ -10,9 +10,9 @@ import { convertPublicKeyToSs58, } from "../src/address-utils" import { ethers } from "ethers" import { INEURON_ADDRESS, INeuronABI } from "../src/contracts/neuron" import { generateRandomEthersWallet } from "../src/utils" -import { forceSetBalanceToSs58Address, forceSetBalanceToEthAddress, addNewSubnetwork } from "../src/subtensor" +import { forceSetBalanceToSs58Address, forceSetBalanceToEthAddress, addNewSubnetwork, startCall, setSubtokenEnable } from "../src/subtensor" -describe("Test the EVM chain ID", () => { +describe("Test the Neuron precompile with emission", () => { // init eth part const wallet = generateRandomEthersWallet(); @@ -30,7 +30,6 @@ describe("Test the EVM chain ID", () => { before(async () => { // init variables got from await and async publicClient = await getPublicClient(ETH_LOCAL_URL) - const subClient = await getClient(SUB_LOCAL_URL) api = await getDevnetApi() alice = await getAliceSigner(); await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) @@ -40,11 +39,13 @@ describe("Test the EVM chain ID", () => { await forceSetBalanceToEthAddress(api, wallet.address) const netuid = await addNewSubnetwork(api, hotkey2, coldkey) + await startCall(api, netuid, coldkey) console.log("test on subnet ", netuid) }) it("Burned register and check emission", async () => { let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + const uid = await api.query.SubtensorModule.SubnetworkN.getValue(netuid) const contract = new ethers.Contract(INEURON_ADDRESS, INeuronABI, wallet); diff --git a/evm-tests/test/neuron.precompile.reveal-weights.test.ts b/evm-tests/test/neuron.precompile.reveal-weights.test.ts index 85125f0956..8045ac18f1 100644 --- a/evm-tests/test/neuron.precompile.reveal-weights.test.ts +++ b/evm-tests/test/neuron.precompile.reveal-weights.test.ts @@ -12,7 +12,8 @@ import { convertH160ToPublicKey } from "../src/address-utils" import { blake2AsU8a } from "@polkadot/util-crypto" import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, setCommitRevealWeightsEnabled, setWeightsSetRateLimit, burnedRegister, - setTempo, setCommitRevealWeightsInterval + setTempo, setCommitRevealWeightsInterval, + startCall } from "../src/subtensor" // hardcode some values for reveal hash @@ -64,6 +65,7 @@ describe("Test neuron precompile reveal weights", () => { await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) await forceSetBalanceToEthAddress(api, wallet.address) let netuid = await addNewSubnetwork(api, hotkey, coldkey) + await startCall(api, netuid, coldkey) console.log("test the case on subnet ", netuid) @@ -97,9 +99,10 @@ describe("Test neuron precompile reveal weights", () => { if (weightsCommit === undefined) { throw new Error("submit weights failed") } - assert.ok(weightsCommit.length > 0) + else { assert.ok(weightsCommit.length > 0) } }) + // Temporarily disable it, there is a type error in CI. it("EVM neuron reveal weights via call precompile", async () => { let totalNetworks = await api.query.SubtensorModule.TotalNetworks.getValue() const netuid = totalNetworks - 1 @@ -129,11 +132,16 @@ describe("Test neuron precompile reveal weights", () => { ss58Address ) + if (neuron_uid === undefined) { + throw new Error("neuron_uid not available onchain or invalid type") + } + const weights = await api.query.SubtensorModule.Weights.getValue(netuid, neuron_uid) - if (weights === undefined) { - throw new Error("weights not available onchain") + if (weights === undefined || !Array.isArray(weights)) { + throw new Error("weights not available onchain or invalid type") } + for (const weight of weights) { assert.equal(weight[0], neuron_uid) assert.ok(weight[1] !== undefined) diff --git a/evm-tests/test/neuron.precompile.serve.axon-prometheus.test.ts b/evm-tests/test/neuron.precompile.serve.axon-prometheus.test.ts index aee84f130c..a80d79d486 100644 --- a/evm-tests/test/neuron.precompile.serve.axon-prometheus.test.ts +++ b/evm-tests/test/neuron.precompile.serve.axon-prometheus.test.ts @@ -1,13 +1,12 @@ import * as assert from "assert"; -import { getAliceSigner, getClient, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" -import { SUB_LOCAL_URL, } from "../src/config"; +import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" import { devnet } from "@polkadot-api/descriptors" import { PolkadotSigner, TypedApi } from "polkadot-api"; import { convertPublicKeyToSs58, convertH160ToSS58 } from "../src/address-utils" import { ethers } from "ethers" import { INEURON_ADDRESS, INeuronABI } from "../src/contracts/neuron" import { generateRandomEthersWallet } from "../src/utils" -import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister } from "../src/subtensor" +import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister, startCall } from "../src/subtensor" describe("Test neuron precompile Serve Axon Prometheus", () => { // init eth part @@ -25,7 +24,6 @@ describe("Test neuron precompile Serve Axon Prometheus", () => { let alice: PolkadotSigner; before(async () => { // init variables got from await and async - const subClient = await getClient(SUB_LOCAL_URL) api = await getDevnetApi() alice = await getAliceSigner(); @@ -36,6 +34,7 @@ describe("Test neuron precompile Serve Axon Prometheus", () => { await forceSetBalanceToEthAddress(api, wallet2.address) await forceSetBalanceToEthAddress(api, wallet3.address) let netuid = await addNewSubnetwork(api, hotkey, coldkey) + await startCall(api, netuid, coldkey) console.log("test the case on subnet ", netuid) diff --git a/evm-tests/test/neuron.precompile.set-weights.test.ts b/evm-tests/test/neuron.precompile.set-weights.test.ts index 393c2b97b8..1c9f62e773 100644 --- a/evm-tests/test/neuron.precompile.set-weights.test.ts +++ b/evm-tests/test/neuron.precompile.set-weights.test.ts @@ -9,7 +9,8 @@ import { INEURON_ADDRESS, INeuronABI } from "../src/contracts/neuron" import { generateRandomEthersWallet } from "../src/utils" import { forceSetBalanceToSs58Address, forceSetBalanceToEthAddress, addNewSubnetwork, burnedRegister, setCommitRevealWeightsEnabled, - setWeightsSetRateLimit + setWeightsSetRateLimit, + startCall } from "../src/subtensor" describe("Test neuron precompile contract, set weights function", () => { @@ -31,6 +32,7 @@ describe("Test neuron precompile contract, set weights function", () => { await forceSetBalanceToEthAddress(api, wallet.address) const netuid = await addNewSubnetwork(api, hotkey, coldkey) + await startCall(api, netuid, coldkey) console.log("test on subnet ", netuid) await burnedRegister(api, netuid, convertH160ToSS58(wallet.address), coldkey) @@ -53,13 +55,17 @@ describe("Test neuron precompile contract, set weights function", () => { const tx = await contract.setWeights(netuid, dests, weights, version_key); await tx.wait(); - const weightsOnChain = await api.query.SubtensorModule.Weights.getValue(netuid, uid) + if (uid === undefined) { + throw new Error("uid not get on chain") + } else { + const weightsOnChain = await api.query.SubtensorModule.Weights.getValue(netuid, uid) - weightsOnChain.forEach((weight, _) => { - const uidInWeight = weight[0]; - const value = weight[1]; - assert.equal(uidInWeight, uid) - assert.ok(value > 0) - }); + weightsOnChain.forEach((weight, _) => { + const uidInWeight = weight[0]; + const value = weight[1]; + assert.equal(uidInWeight, uid) + assert.ok(value > 0) + }); + } }) }); \ No newline at end of file diff --git a/evm-tests/test/staking.precompile.add-remove.test.ts b/evm-tests/test/staking.precompile.add-remove.test.ts index 5387e62428..91bece4c0e 100644 --- a/evm-tests/test/staking.precompile.add-remove.test.ts +++ b/evm-tests/test/staking.precompile.add-remove.test.ts @@ -10,12 +10,13 @@ import { convertH160ToPublicKey } from "../src/address-utils" import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister, sendProxyCall, + startCall, } from "../src/subtensor" import { ETH_LOCAL_URL } from "../src/config"; import { ISTAKING_ADDRESS, ISTAKING_V2_ADDRESS, IStakingABI, IStakingV2ABI } from "../src/contracts/staking" import { PublicClient } from "viem"; -describe("Test neuron precompile reveal weights", () => { +describe("Test neuron precompile add remove stake", () => { // init eth part const wallet1 = generateRandomEthersWallet(); const wallet2 = generateRandomEthersWallet(); @@ -41,6 +42,7 @@ describe("Test neuron precompile reveal weights", () => { await forceSetBalanceToEthAddress(api, wallet1.address) await forceSetBalanceToEthAddress(api, wallet2.address) let netuid = await addNewSubnetwork(api, hotkey, coldkey) + await startCall(api, netuid, coldkey) console.log("test the case on subnet ", netuid) diff --git a/evm-tests/test/staking.precompile.limit.test.ts b/evm-tests/test/staking.precompile.limit.test.ts new file mode 100644 index 0000000000..759aaecce2 --- /dev/null +++ b/evm-tests/test/staking.precompile.limit.test.ts @@ -0,0 +1,113 @@ +import * as assert from "assert"; +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate"; +import { devnet } from "@polkadot-api/descriptors"; +import { TypedApi } from "polkadot-api"; +import { + convertH160ToSS58, + convertPublicKeyToSs58, +} from "../src/address-utils"; +import { tao, raoToEth } from "../src/balance-math"; +import { + addNewSubnetwork, + addStake, + forceSetBalanceToEthAddress, + forceSetBalanceToSs58Address, + startCall, +} from "../src/subtensor"; +import { ethers } from "ethers"; +import { generateRandomEthersWallet } from "../src/utils"; +import { ISTAKING_V2_ADDRESS, IStakingV2ABI } from "../src/contracts/staking"; +import { log } from "console"; + +describe("Test staking precompile add remove limit methods", () => { + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + const wallet1 = generateRandomEthersWallet(); + + let api: TypedApi; + + before(async () => { + api = await getDevnetApi(); + await forceSetBalanceToSs58Address( + api, + convertPublicKeyToSs58(hotkey.publicKey), + ); + await forceSetBalanceToSs58Address( + api, + convertPublicKeyToSs58(coldkey.publicKey), + ); + await forceSetBalanceToEthAddress(api, wallet1.address); + await addNewSubnetwork(api, hotkey, coldkey); + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1; + await startCall(api, netuid, coldkey); + console.log("will test in subnet: ", netuid); + }); + + it("Staker add limit", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1; + let ss58Address = convertH160ToSS58(wallet1.address); + + const alpha = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid, + ); + + const contract = new ethers.Contract( + ISTAKING_V2_ADDRESS, + IStakingV2ABI, + wallet1, + ); + + const tx = await contract.addStakeLimit( + hotkey.publicKey, + tao(2000), + tao(1000), + true, + netuid, + ); + await tx.wait(); + + const alphaAfterAddStake = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid, + ); + + assert.ok(alphaAfterAddStake > alpha); + }); + + it("Staker remove limit", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1; + let ss58Address = convertH160ToSS58(wallet1.address); + + const alpha = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid, + ); + + const contract = new ethers.Contract( + ISTAKING_V2_ADDRESS, + IStakingV2ABI, + wallet1, + ); + + const tx = await contract.removeStakeLimit( + hotkey.publicKey, + tao(100), + tao(1), + true, + netuid, + ); + await tx.wait(); + + const alphaAfterRemoveStake = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid, + ); + + assert.ok(alphaAfterRemoveStake < alpha); + }); +}); diff --git a/evm-tests/test/staking.precompile.reward.test.ts b/evm-tests/test/staking.precompile.reward.test.ts index 3600a6d08d..3735329ff2 100644 --- a/evm-tests/test/staking.precompile.reward.test.ts +++ b/evm-tests/test/staking.precompile.reward.test.ts @@ -7,10 +7,11 @@ import { tao } from "../src/balance-math" import { forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister, setTxRateLimit, setTempo, setWeightsSetRateLimit, setSubnetOwnerCut, setMaxAllowedUids, - setMinDelegateTake, becomeDelegate, setActivityCutoff, addStake, setWeight, rootRegister + setMinDelegateTake, becomeDelegate, setActivityCutoff, addStake, setWeight, rootRegister, + startCall } from "../src/subtensor" -describe("Test neuron precompile reveal weights", () => { +describe("Test neuron precompile reward", () => { const hotkey = getRandomSubstrateKeypair(); const coldkey = getRandomSubstrateKeypair(); @@ -35,6 +36,7 @@ describe("Test neuron precompile reveal weights", () => { // await forceSetBalanceToEthAddress(api, wallet1.address) // await forceSetBalanceToEthAddress(api, wallet2.address) let netuid = await addNewSubnetwork(api, hotkey, coldkey) + await startCall(api, netuid, coldkey) console.log("test the case on subnet ", netuid) diff --git a/evm-tests/test/staking.precompile.stake-get.test.ts b/evm-tests/test/staking.precompile.stake-get.test.ts index 37a23d8db2..460aeabf32 100644 --- a/evm-tests/test/staking.precompile.stake-get.test.ts +++ b/evm-tests/test/staking.precompile.stake-get.test.ts @@ -6,6 +6,7 @@ import { convertPublicKeyToSs58 } from "../src/address-utils" import { tao } from "../src/balance-math" import { forceSetBalanceToSs58Address, addNewSubnetwork, addStake, + startCall } from "../src/subtensor" import { ethers } from "ethers"; import { generateRandomEthersWallet } from "../src/utils" @@ -23,7 +24,9 @@ describe("Test staking precompile get methods", () => { api = await getDevnetApi() await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) - let netuid = await addNewSubnetwork(api, hotkey, coldkey) + await addNewSubnetwork(api, hotkey, coldkey) + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + await startCall(api, netuid, coldkey) console.log("will test in subnet: ", netuid) }) diff --git a/evm-tests/test/subnet.precompile.hyperparameter.test.ts b/evm-tests/test/subnet.precompile.hyperparameter.test.ts index 1805b85ce9..57efd64f77 100644 --- a/evm-tests/test/subnet.precompile.hyperparameter.test.ts +++ b/evm-tests/test/subnet.precompile.hyperparameter.test.ts @@ -57,386 +57,476 @@ describe("Test the Subnet precompile contract", () => { assert.ok(totalNetwork + 1 === totalNetworkAfterAdd) }); - it("Can set subnet parameter", async () => { + it("Can set servingRateLimit parameter", async () => { const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); const netuid = totalNetwork - 1; - // servingRateLimit hyperparameter - { - const newValue = 100; - const tx = await contract.setServingRateLimit(netuid, newValue); - await tx.wait(); + const newValue = 100; + const tx = await contract.setServingRateLimit(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.ServingRateLimit.getValue(netuid) + + + let valueFromContract = Number( + await contract.getServingRateLimit(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) + + + // minDifficulty hyperparameter + // + // disabled: only by sudo + // + // newValue = 101; + // tx = await contract.setMinDifficulty(netuid, newValue); + // await tx.wait(); - let onchainValue = await api.query.SubtensorModule.ServingRateLimit.getValue(netuid) + // await usingApi(async (api) => { + // onchainValue = Number( + // await api.query.subtensorModule.minDifficulty(netuid) + // ); + // }); + + // valueFromContract = Number(await contract.getMinDifficulty(netuid)); + + // expect(valueFromContract).to.eq(newValue); + // expect(valueFromContract).to.eq(onchainValue); + + it("Can set maxDifficulty parameter", async () => { + + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; + + const newValue = 102; + const tx = await contract.setMaxDifficulty(netuid, newValue); + await tx.wait(); + let onchainValue = await api.query.SubtensorModule.MaxDifficulty.getValue(netuid) - let valueFromContract = Number( - await contract.getServingRateLimit(netuid) - ); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + let valueFromContract = Number( + await contract.getMaxDifficulty(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) + + + it("Can set weightsVersionKey parameter", async () => { + + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; + + const newValue = 103; + const tx = await contract.setWeightsVersionKey(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.WeightsVersionKey.getValue(netuid) + + + let valueFromContract = Number( + await contract.getWeightsVersionKey(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) - // minDifficulty hyperparameter - // - // disabled: only by sudo - // - // newValue = 101; - // tx = await contract.setMinDifficulty(netuid, newValue); - // await tx.wait(); + // need sudo as origin now + // it("Can set weightsSetRateLimit parameter", async () => { - // await usingApi(async (api) => { - // onchainValue = Number( - // await api.query.subtensorModule.minDifficulty(netuid) - // ); - // }); + // const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + // const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + // const netuid = totalNetwork - 1; - // valueFromContract = Number(await contract.getMinDifficulty(netuid)); + // const newValue = 104; + // const tx = await contract.setWeightsSetRateLimit(netuid, newValue); + // await tx.wait(); + + // let onchainValue = await api.query.SubtensorModule.WeightsSetRateLimit.getValue(netuid) + + + // let valueFromContract = Number( + // await contract.getWeightsSetRateLimit(netuid) + // ); + + // assert.equal(valueFromContract, newValue) + // assert.equal(valueFromContract, onchainValue); + // }) + + it("Can set adjustmentAlpha parameter", async () => { + + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; + + const newValue = 105; + const tx = await contract.setAdjustmentAlpha(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.AdjustmentAlpha.getValue(netuid) + + + let valueFromContract = Number( + await contract.getAdjustmentAlpha(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) + + it("Can set maxWeightLimit parameter", async () => { + + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; + + const newValue = 106; + const tx = await contract.setMaxWeightLimit(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.MaxWeightsLimit.getValue(netuid) + + + let valueFromContract = Number( + await contract.getMaxWeightLimit(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) + + it("Can set immunityPeriod parameter", async () => { + + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - // expect(valueFromContract).to.eq(newValue); - // expect(valueFromContract).to.eq(onchainValue); + const newValue = 107; + const tx = await contract.setImmunityPeriod(netuid, newValue); + await tx.wait(); - // maxDifficulty hyperparameter + let onchainValue = await api.query.SubtensorModule.ImmunityPeriod.getValue(netuid) - { - const newValue = 102; - const tx = await contract.setMaxDifficulty(netuid, newValue); - await tx.wait(); - let onchainValue = await api.query.SubtensorModule.MaxDifficulty.getValue(netuid) + let valueFromContract = Number( + await contract.getImmunityPeriod(netuid) + ); + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) - let valueFromContract = Number( - await contract.getMaxDifficulty(netuid) - ); + it("Can set minAllowedWeights parameter", async () => { - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - // weightsVersionKey hyperparameter - { - const newValue = 103; - const tx = await contract.setWeightsVersionKey(netuid, newValue); - await tx.wait(); + const newValue = 108; + const tx = await contract.setMinAllowedWeights(netuid, newValue); + await tx.wait(); - let onchainValue = await api.query.SubtensorModule.WeightsVersionKey.getValue(netuid) + let onchainValue = await api.query.SubtensorModule.MinAllowedWeights.getValue(netuid) - let valueFromContract = Number( - await contract.getWeightsVersionKey(netuid) - ); + let valueFromContract = Number( + await contract.getMinAllowedWeights(netuid) + ); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } - // weightsSetRateLimit hyperparameter - { - const newValue = 104; - const tx = await contract.setWeightsSetRateLimit(netuid, newValue); - await tx.wait(); + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) - let onchainValue = await api.query.SubtensorModule.WeightsSetRateLimit.getValue(netuid) + it("Can set kappa parameter", async () => { + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - let valueFromContract = Number( - await contract.getWeightsSetRateLimit(netuid) - ); + const newValue = 109; + const tx = await contract.setKappa(netuid, newValue); + await tx.wait(); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + let onchainValue = await api.query.SubtensorModule.Kappa.getValue(netuid) - // adjustmentAlpha hyperparameter - { - const newValue = 105; - const tx = await contract.setAdjustmentAlpha(netuid, newValue); - await tx.wait(); - let onchainValue = await api.query.SubtensorModule.AdjustmentAlpha.getValue(netuid) + let valueFromContract = Number( + await contract.getKappa(netuid) + ); + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) - let valueFromContract = Number( - await contract.getAdjustmentAlpha(netuid) - ); + it("Can set rho parameter", async () => { - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - // maxWeightLimit hyperparameter - { - const newValue = 106; - const tx = await contract.setMaxWeightLimit(netuid, newValue); - await tx.wait(); + const newValue = 110; + const tx = await contract.setRho(netuid, newValue); + await tx.wait(); - let onchainValue = await api.query.SubtensorModule.MaxWeightsLimit.getValue(netuid) + let onchainValue = await api.query.SubtensorModule.Rho.getValue(netuid) - let valueFromContract = Number( - await contract.getMaxWeightLimit(netuid) - ); + let valueFromContract = Number( + await contract.getRho(netuid) + ); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } - // immunityPeriod hyperparameter - { - const newValue = 107; - const tx = await contract.setImmunityPeriod(netuid, newValue); - await tx.wait(); + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) - let onchainValue = await api.query.SubtensorModule.ImmunityPeriod.getValue(netuid) + it("Can set activityCutoff parameter", async () => { + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; + const newValue = await api.query.SubtensorModule.MinActivityCutoff.getValue() + 1; + const tx = await contract.setActivityCutoff(netuid, newValue); + await tx.wait(); - let valueFromContract = Number( - await contract.getImmunityPeriod(netuid) - ); + let onchainValue = await api.query.SubtensorModule.ActivityCutoff.getValue(netuid) - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } - // minAllowedWeights hyperparameter - { - const newValue = 108; - const tx = await contract.setMinAllowedWeights(netuid, newValue); - await tx.wait(); + let valueFromContract = Number( + await contract.getActivityCutoff(netuid) + ); - let onchainValue = await api.query.SubtensorModule.MinAllowedWeights.getValue(netuid) + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) + it("Can set networkRegistrationAllowed parameter", async () => { - let valueFromContract = Number( - await contract.getMinAllowedWeights(netuid) - ); + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + const newValue = true; + const tx = await contract.setNetworkRegistrationAllowed(netuid, newValue); + await tx.wait(); - // kappa hyperparameter - { - const newValue = 109; - const tx = await contract.setKappa(netuid, newValue); - await tx.wait(); + let onchainValue = await api.query.SubtensorModule.NetworkRegistrationAllowed.getValue(netuid) - let onchainValue = await api.query.SubtensorModule.Kappa.getValue(netuid) + let valueFromContract = Boolean( + await contract.getNetworkRegistrationAllowed(netuid) + ); - let valueFromContract = Number( - await contract.getKappa(netuid) - ); + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + it("Can set networkPowRegistrationAllowed parameter", async () => { - // rho hyperparameter - { - const newValue = 110; - const tx = await contract.setRho(netuid, newValue); - await tx.wait(); + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - let onchainValue = await api.query.SubtensorModule.Rho.getValue(netuid) + const newValue = true; + const tx = await contract.setNetworkPowRegistrationAllowed(netuid, newValue); + await tx.wait(); + let onchainValue = await api.query.SubtensorModule.NetworkPowRegistrationAllowed.getValue(netuid) - let valueFromContract = Number( - await contract.getRho(netuid) - ); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + let valueFromContract = Boolean( + await contract.getNetworkPowRegistrationAllowed(netuid) + ); - // activityCutoff hyperparameter - { - const newValue = 111; - const tx = await contract.setActivityCutoff(netuid, newValue); - await tx.wait(); + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) - let onchainValue = await api.query.SubtensorModule.ActivityCutoff.getValue(netuid) + // minBurn hyperparameter. only sudo can set it now + // newValue = 112; + // tx = await contract.setMinBurn(netuid, newValue); + // await tx.wait(); - let valueFromContract = Number( - await contract.getActivityCutoff(netuid) - ); + // await usingApi(async (api) => { + // onchainValue = Number( + // await api.query.subtensorModule.minBurn(netuid) + // ); + // }); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + // valueFromContract = Number(await contract.getMinBurn(netuid)); - // networkRegistrationAllowed hyperparameter - { - const newValue = true; - const tx = await contract.setNetworkRegistrationAllowed(netuid, newValue); - await tx.wait(); + // expect(valueFromContract).to.eq(newValue); + // expect(valueFromContract).to.eq(onchainValue); - let onchainValue = await api.query.SubtensorModule.NetworkRegistrationAllowed.getValue(netuid) + // maxBurn hyperparameter. only sudo can set it now + // it("Can set maxBurn parameter", async () => { + // const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + // const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + // const netuid = totalNetwork - 1; - let valueFromContract = Boolean( - await contract.getNetworkRegistrationAllowed(netuid) - ); + // const newValue = 113; + // const tx = await contract.setMaxBurn(netuid, newValue); + // await tx.wait(); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + // let onchainValue = await api.query.SubtensorModule.MaxBurn.getValue(netuid) - // networkPowRegistrationAllowed hyperparameter - { - const newValue = true; - const tx = await contract.setNetworkPowRegistrationAllowed(netuid, newValue); - await tx.wait(); - let onchainValue = await api.query.SubtensorModule.NetworkPowRegistrationAllowed.getValue(netuid) + // let valueFromContract = Number( + // await contract.getMaxBurn(netuid) + // ); + // assert.equal(valueFromContract, newValue) + // assert.equal(valueFromContract, onchainValue); + // }) - let valueFromContract = Boolean( - await contract.getNetworkPowRegistrationAllowed(netuid) - ); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + // difficulty hyperparameter (disabled: sudo only) + // newValue = 114; - // minBurn hyperparameter. only sudo can set it now - // newValue = 112; + // tx = await contract.setDifficulty(netuid, newValue); + // await tx.wait(); - // tx = await contract.setMinBurn(netuid, newValue); - // await tx.wait(); + // await usingApi(async (api) => { + // onchainValue = Number( + // await api.query.subtensorModule.difficulty(netuid) + // ); + // }); - // await usingApi(async (api) => { - // onchainValue = Number( - // await api.query.subtensorModule.minBurn(netuid) - // ); - // }); + // valueFromContract = Number(await contract.getDifficulty(netuid)); - // valueFromContract = Number(await contract.getMinBurn(netuid)); + // expect(valueFromContract).to.eq(newValue); + // expect(valueFromContract).to.eq(onchainValue); - // expect(valueFromContract).to.eq(newValue); - // expect(valueFromContract).to.eq(onchainValue); + it("Can set bondsMovingAverage parameter", async () => { - // maxBurn hyperparameter - { - const newValue = 113; - const tx = await contract.setMaxBurn(netuid, newValue); - await tx.wait(); + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - let onchainValue = await api.query.SubtensorModule.MaxBurn.getValue(netuid) + const newValue = 115; + const tx = await contract.setBondsMovingAverage(netuid, newValue); + await tx.wait(); + let onchainValue = await api.query.SubtensorModule.BondsMovingAverage.getValue(netuid) - let valueFromContract = Number( - await contract.getMaxBurn(netuid) - ); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + let valueFromContract = Number( + await contract.getBondsMovingAverage(netuid) + ); + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) - // difficulty hyperparameter (disabled: sudo only) - // newValue = 114; + it("Can set commitRevealWeightsEnabled parameter", async () => { - // tx = await contract.setDifficulty(netuid, newValue); - // await tx.wait(); + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - // await usingApi(async (api) => { - // onchainValue = Number( - // await api.query.subtensorModule.difficulty(netuid) - // ); - // }); + const newValue = true; + const tx = await contract.setCommitRevealWeightsEnabled(netuid, newValue); + await tx.wait(); - // valueFromContract = Number(await contract.getDifficulty(netuid)); + let onchainValue = await api.query.SubtensorModule.CommitRevealWeightsEnabled.getValue(netuid) - // expect(valueFromContract).to.eq(newValue); - // expect(valueFromContract).to.eq(onchainValue); - // bondsMovingAverage hyperparameter - { - const newValue = 115; - const tx = await contract.setBondsMovingAverage(netuid, newValue); - await tx.wait(); + let valueFromContract = Boolean( + await contract.getCommitRevealWeightsEnabled(netuid) + ); - let onchainValue = await api.query.SubtensorModule.BondsMovingAverage.getValue(netuid) + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) + it("Can set liquidAlphaEnabled parameter", async () => { - let valueFromContract = Number( - await contract.getBondsMovingAverage(netuid) - ); + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + const newValue = true; + const tx = await contract.setLiquidAlphaEnabled(netuid, newValue); + await tx.wait(); + let onchainValue = await api.query.SubtensorModule.LiquidAlphaOn.getValue(netuid) - // commitRevealWeightsEnabled hyperparameter - { - const newValue = true; - const tx = await contract.setCommitRevealWeightsEnabled(netuid, newValue); - await tx.wait(); - let onchainValue = await api.query.SubtensorModule.CommitRevealWeightsEnabled.getValue(netuid) + let valueFromContract = Boolean( + await contract.getLiquidAlphaEnabled(netuid) + ); + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) - let valueFromContract = Boolean( - await contract.getCommitRevealWeightsEnabled(netuid) - ); + it("Can set yuma3Enabled hyperparameter", async () => + { + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + const newValue = true; + const tx = await contract.setYuma3Enabled(netuid, newValue); + await tx.wait(); - // liquidAlphaEnabled hyperparameter - { - const newValue = true; - const tx = await contract.setLiquidAlphaEnabled(netuid, newValue); - await tx.wait(); + let onchainValue = await api.query.SubtensorModule.Yuma3On.getValue(netuid) - let onchainValue = await api.query.SubtensorModule.LiquidAlphaOn.getValue(netuid) + let valueFromContract = Boolean( + await contract.getYuma3Enabled(netuid) + ); + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) - let valueFromContract = Boolean( - await contract.getLiquidAlphaEnabled(netuid) - ); + it("Can set alphaValues parameter", async () => { + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + const newValue = [118, 52429]; + const tx = await contract.setAlphaValues(netuid, newValue[0], newValue[1]); + await tx.wait(); - // alphaValues hyperparameter - { - const newValue = [118, 52429]; - const tx = await contract.setAlphaValues(netuid, newValue[0], newValue[1]); - await tx.wait(); + let onchainValue = await api.query.SubtensorModule.AlphaValues.getValue(netuid) - let onchainValue = await api.query.SubtensorModule.AlphaValues.getValue(netuid) + let value = await contract.getAlphaValues(netuid) + let valueFromContract = [Number(value[0]), Number(value[1])] - let value = await contract.getAlphaValues(netuid) - let valueFromContract = [Number(value[0]), Number(value[1])] + assert.equal(valueFromContract[0], newValue[0]) + assert.equal(valueFromContract[1], newValue[1]) + assert.equal(valueFromContract[0], onchainValue[0]); + assert.equal(valueFromContract[1], onchainValue[1]); + }) - assert.equal(valueFromContract[0], newValue[0]) - assert.equal(valueFromContract[1], newValue[1]) - assert.equal(valueFromContract[0], onchainValue[0]); - assert.equal(valueFromContract[1], onchainValue[1]); - } + it("Can set commitRevealWeightsInterval parameter", async () => { + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; - // commitRevealWeightsInterval hyperparameter - { - const newValue = 119; - const tx = await contract.setCommitRevealWeightsInterval(netuid, newValue); - await tx.wait(); + const newValue = 119; + const tx = await contract.setCommitRevealWeightsInterval(netuid, newValue); + await tx.wait(); - let onchainValue = await api.query.SubtensorModule.RevealPeriodEpochs.getValue(netuid) + let onchainValue = await api.query.SubtensorModule.RevealPeriodEpochs.getValue(netuid) - let valueFromContract = Number( - await contract.getCommitRevealWeightsInterval(netuid) - ); + let valueFromContract = Number( + await contract.getCommitRevealWeightsInterval(netuid) + ); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - } + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); }) -}); \ No newline at end of file +}) diff --git a/evm-tests/test/uid.precompile.lookup.test.ts b/evm-tests/test/uid.precompile.lookup.test.ts new file mode 100644 index 0000000000..6e702d612e --- /dev/null +++ b/evm-tests/test/uid.precompile.lookup.test.ts @@ -0,0 +1,90 @@ +import * as assert from "assert"; + +import { getAliceSigner, getDevnetApi, waitForTransactionCompletion, getRandomSubstrateKeypair, getSignerFromKeypair } from "../src/substrate" +import { convertToFixedSizeBinary, generateRandomEthersWallet, getPublicClient } from "../src/utils"; +import { ETH_LOCAL_URL } from "../src/config"; +import { devnet } from "@polkadot-api/descriptors" +import { hexToU8a } from "@polkadot/util"; +import { u64 } from "scale-ts"; +import { PublicClient } from "viem"; +import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { toViemAddress, convertPublicKeyToSs58 } from "../src/address-utils" +import { IUIDLookupABI, IUID_LOOKUP_ADDRESS } from "../src/contracts/uidLookup" +import { keccak256 } from 'ethers'; +import { addNewSubnetwork, forceSetBalanceToSs58Address, startCall } from "../src/subtensor"; + +describe("Test the UID Lookup precompile", () => { + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + const evmWallet = generateRandomEthersWallet(); + let publicClient: PublicClient; + + let api: TypedApi + + let alice: PolkadotSigner; + + let uid: number; + let blockNumber: number; + let netuid: number; + let blockNumberAssociated: bigint; + + before(async () => { + publicClient = await getPublicClient(ETH_LOCAL_URL) + api = await getDevnetApi() + alice = await getAliceSigner(); + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(alice.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + + netuid = await addNewSubnetwork(api, hotkey, coldkey) + await startCall(api, netuid, coldkey) + + const maybeUid = await api.query.SubtensorModule.Uids.getValue(netuid, convertPublicKeyToSs58(hotkey.publicKey)) + + if (maybeUid === undefined) { + throw new Error("UID should be defined") + } + uid = maybeUid + + // Associate EVM key + blockNumber = await api.query.System.Number.getValue(); + const blockNumberBytes = u64.enc(BigInt(blockNumber)); + const blockNumberHash = hexToU8a(keccak256(blockNumberBytes)); + const concatenatedArray = new Uint8Array([...hotkey.publicKey, ...blockNumberHash]); + const signature = await evmWallet.signMessage(concatenatedArray); + const associateEvmKeyTx = api.tx.SubtensorModule.associate_evm_key({ + netuid: netuid, + hotkey: convertPublicKeyToSs58(hotkey.publicKey), + evm_key: convertToFixedSizeBinary(evmWallet.address, 20), + block_number: BigInt(blockNumber), + signature: convertToFixedSizeBinary(signature, 65) + }); + const signer = getSignerFromKeypair(coldkey); + await waitForTransactionCompletion(api, associateEvmKeyTx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + + const storedEvmKey = await api.query.SubtensorModule.AssociatedEvmAddress.getValue(netuid, uid) + assert.notEqual(storedEvmKey, undefined, "storedEvmKey should be defined") + if (storedEvmKey !== undefined) { + assert.equal(storedEvmKey[0].asHex(), convertToFixedSizeBinary(evmWallet.address, 20).asHex()) + blockNumberAssociated = storedEvmKey[1] + } + }) + + it("UID lookup via precompile contract works correctly", async () => { + // Get UID for the EVM address + const uidArray = await publicClient.readContract({ + abi: IUIDLookupABI, + address: toViemAddress(IUID_LOOKUP_ADDRESS), + functionName: "uidLookup", + args: [netuid, evmWallet.address, 1024] + }) + + assert.notEqual(uidArray, undefined, "UID should be defined") + assert.ok(Array.isArray(uidArray), `UID should be an array, got ${typeof uidArray}`) + assert.ok(uidArray.length > 0, "UID array should not be empty") + assert.deepStrictEqual(uidArray[0], { uid: uid, block_associated: blockNumberAssociated }) + }) +}); diff --git a/hyperparameters.md b/hyperparameters.md index c8d2ce1106..31d7261608 100644 --- a/hyperparameters.md +++ b/hyperparameters.md @@ -7,6 +7,7 @@ TxRateLimit: u64 = 1; // [1 @ 64,888] ### netuid 1 (text_prompting) ```rust Rho: u16 = 10; +AlphaSigmoidSteepness: u16 = 10.0 Kappa: u16 = 32_767; // 0.5 = 65535/2 MaxAllowedUids: u16 = 1024; Issuance: u64 = 0; @@ -33,6 +34,7 @@ MaxRegistrationsPerBlock: u16 = 1; PruningScore : u16 = u16::MAX; BondsMovingAverage: u64 = 900_000; BondsPenalty: u16 = 0; +BondsResetOn: bool = false; WeightsVersionKey: u64 = 1020; MinDifficulty: u64 = 10_000_000; MaxDifficulty: u64 = u64::MAX / 4; @@ -46,6 +48,7 @@ WeightsSetRateLimit: u64 = 100; ### netuid 3 (causallmnext) ```rust Rho: u16 = 10; +AlphaSigmoidSteepness: u16 = 10.0 Kappa: u16 = 32_767; // 0.5 = 65535/2 MaxAllowedUids: u16 = 4096; Issuance: u64 = 0; @@ -72,6 +75,7 @@ MaxRegistrationsPerBlock: u16 = 1; PruningScore : u16 = u16::MAX; BondsMovingAverage: u64 = 900_000; BondsPenalty: u16 = 0; +BondsResetOn: bool = false; WeightsVersionKey: u64 = 400; MinDifficulty: u64 = 10_000_000; MaxDifficulty: u64 = u64::MAX / 4; diff --git a/node/Cargo.toml b/node/Cargo.toml index 6cea8f6950..52ccf20de3 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -99,6 +99,7 @@ fc-api = { workspace = true } fc-rpc = { workspace = true } fc-rpc-core = { workspace = true } fp-rpc = { workspace = true } +fc-aura = { workspace = true } fc-mapping-sync = { workspace = true } fp-consensus = { workspace = true } thiserror = { workspace = true } diff --git a/node/src/command.rs b/node/src/command.rs index 5b88e03f9a..aea800596e 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -181,9 +181,10 @@ pub fn run() -> sc_cli::Result<()> { // This switch needs to be in the client, since the client decides // which sub-commands it wants to support. match cmd { - BenchmarkCmd::Pallet(cmd) => { - cmd.run_with_spec::, ()>(Some(config.chain_spec)) - } + BenchmarkCmd::Pallet(cmd) => cmd + .run_with_spec::, crate::client::HostFunctions>(Some( + config.chain_spec, + )), BenchmarkCmd::Block(cmd) => cmd.run(client), BenchmarkCmd::Storage(cmd) => { let db = backend.expose_db(); diff --git a/node/src/ethereum.rs b/node/src/ethereum.rs index 158bd84807..c708efd714 100644 --- a/node/src/ethereum.rs +++ b/node/src/ethereum.rs @@ -1,8 +1,9 @@ +use fc_aura::AuraConsensusDataProvider; pub use fc_consensus::FrontierBlockImport; use fc_rpc::{ Debug, DebugApiServer, Eth, EthApiServer, EthConfig, EthDevSigner, EthFilter, EthFilterApiServer, EthPubSub, EthPubSubApiServer, EthSigner, EthTask, Net, NetApiServer, Web3, - Web3ApiServer, pending::AuraConsensusDataProvider, + Web3ApiServer, }; pub use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool}; /// Frontier DB backend type. diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index c5794e0279..022ea815f9 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -145,7 +145,7 @@ mod benchmarks { pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] - _(RawOrigin::Root, 1u16/*netuid*/, 300u16/*activity_cutoff*/)/*sudo_set_activity_cutoff*/; + _(RawOrigin::Root, 1u16/*netuid*/, 361u16/*activity_cutoff*/)/*sudo_set_activity_cutoff*/; } #[benchmark] diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 19bbbee73b..2b41539816 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -2,11 +2,8 @@ // extern crate alloc; -pub use pallet::*; -pub mod weights; -pub use weights::WeightInfo; - use frame_system::pallet_prelude::BlockNumberFor; +pub use pallet::*; // - we could replace it with Vec<(AuthorityId, u64)>, but we would need // `sp_consensus_grandpa` for `AuthorityId` anyway // - we could use a type parameter for `AuthorityId`, but there is @@ -66,9 +63,6 @@ pub mod pallet { /// The maximum number of authorities that the pallet can hold. type MaxAuthorities: Get; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; - /// Unit of assets type Balance: Balance; } @@ -83,6 +77,13 @@ pub mod pallet { /// Indicates if the precompile operation is enabled or not. enabled: bool, }, + /// Event emitted when the Yuma3 enable is toggled. + Yuma3EnableToggled { + /// The network identifier. + netuid: u16, + /// Indicates if the Yuma3 enable was enabled or disabled. + enabled: bool, + }, } // Errors inform users that something went wrong. @@ -94,6 +95,8 @@ pub mod pallet { MaxValidatorsLargerThanMaxUIds, /// The maximum number of subnet validators must be more than the current number of UIDs already in the subnet. MaxAllowedUIdsLessThanCurrentUIds, + /// The maximum value for bonds moving average is reached + BondsMovingAverageMaxReached, } /// Enum for specifying the type of precompile operation. #[derive(Encode, Decode, TypeInfo, Clone, PartialEq, Eq, Debug, Copy)] @@ -108,6 +111,8 @@ pub mod pallet { Metagraph, /// Enum for neuron precompile Neuron, + /// Enum for UID lookup precompile + UidLookup, } #[pallet::type_value] @@ -134,7 +139,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Aura pallet to change the authorities. #[pallet::call_index(0)] - #[pallet::weight(::WeightInfo::swap_authorities(new_authorities.len() as u32))] + #[pallet::weight(Weight::from_parts(6_265_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)))] pub fn swap_authorities( origin: OriginFor, new_authorities: BoundedVec<::AuthorityId, T::MaxAuthorities>, @@ -153,7 +160,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the default take. #[pallet::call_index(1)] - #[pallet::weight(::WeightInfo::sudo_set_default_take())] + #[pallet::weight(Weight::from_parts(6_942_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_default_take(origin: OriginFor, default_take: u16) -> DispatchResult { ensure_root(origin)?; pallet_subtensor::Pallet::::set_max_delegate_take(default_take); @@ -177,7 +186,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the serving rate limit. #[pallet::call_index(3)] - #[pallet::weight(::WeightInfo::sudo_set_serving_rate_limit())] + #[pallet::weight(Weight::from_parts(7_815_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_serving_rate_limit( origin: OriginFor, netuid: u16, @@ -197,7 +208,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the minimum difficulty. #[pallet::call_index(4)] - #[pallet::weight(::WeightInfo::sudo_set_min_difficulty())] + #[pallet::weight(Weight::from_parts(19_780_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_difficulty( origin: OriginFor, netuid: u16, @@ -222,7 +235,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the maximum difficulty. #[pallet::call_index(5)] - #[pallet::weight(::WeightInfo::sudo_set_max_difficulty())] + #[pallet::weight(Weight::from_parts(20_050_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_difficulty( origin: OriginFor, netuid: u16, @@ -247,7 +262,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the weights version key. #[pallet::call_index(6)] - #[pallet::weight(::WeightInfo::sudo_set_weights_version_key())] + #[pallet::weight(Weight::from_parts(19_990_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_weights_version_key( origin: OriginFor, netuid: u16, @@ -295,7 +312,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the weights set rate limit. #[pallet::call_index(7)] - #[pallet::weight(::WeightInfo::sudo_set_weights_set_rate_limit())] + #[pallet::weight(Weight::from_parts(20_050_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_weights_set_rate_limit( origin: OriginFor, netuid: u16, @@ -323,7 +342,9 @@ pub mod pallet { /// It is only callable by the root account, not changeable by the subnet owner. /// The extrinsic will call the Subtensor pallet to set the adjustment interval. #[pallet::call_index(8)] - #[pallet::weight(::WeightInfo::sudo_set_adjustment_interval())] + #[pallet::weight(Weight::from_parts(20_010_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_adjustment_interval( origin: OriginFor, netuid: u16, @@ -378,7 +399,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the adjustment beta. #[pallet::call_index(12)] - #[pallet::weight(::WeightInfo::sudo_set_max_weight_limit())] + #[pallet::weight(Weight::from_parts(19_240_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_weight_limit( origin: OriginFor, netuid: u16, @@ -403,7 +426,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the immunity period. #[pallet::call_index(13)] - #[pallet::weight(::WeightInfo::sudo_set_immunity_period())] + #[pallet::weight(Weight::from_parts(19_380_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_immunity_period( origin: OriginFor, netuid: u16, @@ -428,7 +453,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the minimum allowed weights. #[pallet::call_index(14)] - #[pallet::weight(::WeightInfo::sudo_set_min_allowed_weights())] + #[pallet::weight(Weight::from_parts(19_770_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_allowed_weights( origin: OriginFor, netuid: u16, @@ -453,7 +480,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the maximum allowed UIDs for a subnet. #[pallet::call_index(15)] - #[pallet::weight(::WeightInfo::sudo_set_max_allowed_uids())] + #[pallet::weight(Weight::from_parts(23_820_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_allowed_uids( origin: OriginFor, netuid: u16, @@ -481,7 +510,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the kappa. #[pallet::call_index(16)] - #[pallet::weight(::WeightInfo::sudo_set_kappa())] + #[pallet::weight(Weight::from_parts(19_590_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_kappa(origin: OriginFor, netuid: u16, kappa: u16) -> DispatchResult { pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; @@ -498,7 +529,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the rho. #[pallet::call_index(17)] - #[pallet::weight(::WeightInfo::sudo_set_rho())] + #[pallet::weight(Weight::from_parts(16_420_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_rho(origin: OriginFor, netuid: u16, rho: u16) -> DispatchResult { pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; @@ -515,7 +548,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the activity cutoff. #[pallet::call_index(18)] - #[pallet::weight(::WeightInfo::sudo_set_activity_cutoff())] + #[pallet::weight(Weight::from_parts(22_600_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_activity_cutoff( origin: OriginFor, netuid: u16, @@ -547,8 +582,8 @@ pub mod pallet { /// The extrinsic will call the Subtensor pallet to set the network registration allowed. #[pallet::call_index(19)] #[pallet::weight(( - Weight::from_parts(4_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + Weight::from_parts(8_696_000, 0) + .saturating_add(T::DbWeight::get().reads(0)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, Pays::No @@ -603,7 +638,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the target registrations per interval. #[pallet::call_index(21)] - #[pallet::weight(::WeightInfo::sudo_set_target_registrations_per_interval())] + #[pallet::weight(Weight::from_parts(19_830_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_target_registrations_per_interval( origin: OriginFor, netuid: u16, @@ -631,7 +668,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the minimum burn. #[pallet::call_index(22)] - #[pallet::weight(::WeightInfo::sudo_set_min_burn())] + #[pallet::weight(Weight::from_parts(19_840_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_burn( origin: OriginFor, netuid: u16, @@ -656,13 +695,15 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the maximum burn. #[pallet::call_index(23)] - #[pallet::weight(::WeightInfo::sudo_set_max_burn())] + #[pallet::weight(Weight::from_parts(19_740_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_burn( origin: OriginFor, netuid: u16, max_burn: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + ensure_root(origin)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -681,7 +722,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the difficulty. #[pallet::call_index(24)] - #[pallet::weight(::WeightInfo::sudo_set_difficulty())] + #[pallet::weight(Weight::from_parts(20_280_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_difficulty( origin: OriginFor, netuid: u16, @@ -705,7 +748,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the maximum allowed validators. #[pallet::call_index(25)] - #[pallet::weight(::WeightInfo::sudo_set_max_allowed_validators())] + #[pallet::weight(Weight::from_parts(25_210_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_allowed_validators( origin: OriginFor, netuid: u16, @@ -738,13 +783,22 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the bonds moving average. #[pallet::call_index(26)] - #[pallet::weight(::WeightInfo::sudo_set_bonds_moving_average())] + #[pallet::weight(Weight::from_parts(20_270_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_bonds_moving_average( origin: OriginFor, netuid: u16, bonds_moving_average: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; + + if pallet_subtensor::Pallet::::ensure_subnet_owner(origin, netuid).is_ok() { + ensure!( + bonds_moving_average <= 975000, + Error::::BondsMovingAverageMaxReached + ) + } ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -763,7 +817,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the bonds penalty. #[pallet::call_index(60)] - #[pallet::weight(::WeightInfo::sudo_set_bonds_penalty())] + #[pallet::weight(Weight::from_parts(20_030_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_bonds_penalty( origin: OriginFor, netuid: u16, @@ -788,7 +844,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the maximum registrations per block. #[pallet::call_index(27)] - #[pallet::weight(::WeightInfo::sudo_set_max_registrations_per_block())] + #[pallet::weight(Weight::from_parts(19_680_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_registrations_per_block( origin: OriginFor, netuid: u16, @@ -859,7 +917,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the tempo. #[pallet::call_index(30)] - #[pallet::weight(::WeightInfo::sudo_set_tempo())] + #[pallet::weight(Weight::from_parts(19_900_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_tempo(origin: OriginFor, netuid: u16, tempo: u16) -> DispatchResult { ensure_root(origin)?; ensure!( @@ -1077,7 +1137,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the value. #[pallet::call_index(49)] - #[pallet::weight(::WeightInfo::sudo_set_commit_reveal_weights_enabled())] + #[pallet::weight(Weight::from_parts(19_480_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_commit_reveal_weights_enabled( origin: OriginFor, netuid: u16, @@ -1276,7 +1338,9 @@ pub mod pallet { /// # Weight /// Weight is handled by the `#[pallet::weight]` attribute. #[pallet::call_index(57)] - #[pallet::weight(::WeightInfo::sudo_set_commit_reveal_weights_interval())] + #[pallet::weight(Weight::from_parts(20_490_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_commit_reveal_weights_interval( origin: OriginFor, netuid: u16, @@ -1310,7 +1374,9 @@ pub mod pallet { /// # Weight /// Weight is handled by the `#[pallet::weight]` attribute. #[pallet::call_index(58)] - #[pallet::weight(::WeightInfo::sudo_set_evm_chain_id())] + #[pallet::weight(Weight::from_parts(27_199_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_evm_chain_id(origin: OriginFor, chain_id: u64) -> DispatchResult { // Ensure the call is made by the root account ensure_root(origin)?; @@ -1335,7 +1401,9 @@ pub mod pallet { /// No change should be signaled while any change is pending. Returns an error if a change /// is already pending. #[pallet::call_index(59)] - #[pallet::weight(::WeightInfo::swap_authorities(next_authorities.len() as u32))] + #[pallet::weight(Weight::from_parts(11_550_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn schedule_grandpa_change( origin: OriginFor, // grandpa ID is always the same type, so we don't need to parametrize it via `Config` @@ -1476,6 +1544,133 @@ pub mod pallet { ); Ok(()) } + + /// + /// + /// # Arguments + /// * `origin` - The origin of the call, which must be the root account. + /// * `netuid` - The unique identifier for the subnet. + /// * `steepness` - The new steepness for the alpha sigmoid function. + /// + /// # Errors + /// * `BadOrigin` - If the caller is not the root account. + /// # Weight + /// Weight is handled by the `#[pallet::weight]` attribute. + #[pallet::call_index(68)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_alpha_sigmoid_steepness( + origin: OriginFor, + netuid: u16, + steepness: u16, + ) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::Pallet::::set_alpha_sigmoid_steepness(netuid, steepness); + + log::debug!( + "AlphaSigmoidSteepnessSet( netuid: {:?}, steepness: {:?} )", + netuid, + steepness + ); + Ok(()) + } + + /// Enables or disables Yuma3 for a given subnet. + /// + /// # Parameters + /// - `origin`: The origin of the call, which must be the root account or subnet owner. + /// - `netuid`: The unique identifier for the subnet. + /// - `enabled`: A boolean flag to enable or disable Yuma3. + /// + /// # Weight + /// This function has a fixed weight of 0 and is classified as an operational transaction that does not incur any fees. + #[pallet::call_index(69)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_yuma3_enabled( + origin: OriginFor, + netuid: u16, + enabled: bool, + ) -> DispatchResult { + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::set_yuma3_enabled(netuid, enabled); + + Self::deposit_event(Event::Yuma3EnableToggled { netuid, enabled }); + log::debug!( + "Yuma3EnableToggled( netuid: {:?}, Enabled: {:?} ) ", + netuid, + enabled + ); + Ok(()) + } + + /// Sets or updates the hotkey account associated with the owner of a specific subnet. + /// + /// This function allows either the root origin or the current subnet owner to set or update + /// the hotkey for a given subnet. The subnet must already exist. To prevent abuse, the call is + /// rate-limited to once per configured interval (default: one week) per subnet. + /// + /// # Parameters + /// - `origin`: The dispatch origin of the call. Must be either root or the current owner of the subnet. + /// - `netuid`: The unique identifier of the subnet whose owner hotkey is being set. + /// - `hotkey`: The new hotkey account to associate with the subnet owner. + /// + /// # Returns + /// - `DispatchResult`: Returns `Ok(())` if the hotkey was successfully set, or an appropriate error otherwise. + /// + /// # Errors + /// - `Error::SubnetNotExists`: If the specified subnet does not exist. + /// - `Error::TxRateLimitExceeded`: If the function is called more frequently than the allowed rate limit. + /// + /// # Access Control + /// Only callable by: + /// - Root origin, or + /// - The coldkey account that owns the subnet. + /// + /// # Storage + /// - Updates [`SubnetOwnerHotkey`] for the given `netuid`. + /// - Reads and updates [`LastRateLimitedBlock`] for rate-limiting. + /// - Reads [`DefaultSetSNOwnerHotkeyRateLimit`] to determine the interval between allowed updates. + /// + /// # Rate Limiting + /// This function is rate-limited to one call per subnet per interval (e.g., one week). + #[pallet::call_index(67)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_sn_owner_hotkey( + origin: OriginFor, + netuid: u16, + hotkey: T::AccountId, + ) -> DispatchResult { + pallet_subtensor::Pallet::::do_set_sn_owner_hotkey(origin, netuid, &hotkey) + } + + /// Enables or disables subtoken trading for a given subnet. + /// + /// # Arguments + /// * `origin` - The origin of the call, which must be the root account. + /// * `netuid` - The unique identifier of the subnet. + /// * `subtoken_enabled` - A boolean indicating whether subtoken trading should be enabled or disabled. + /// + /// # Errors + /// * `BadOrigin` - If the caller is not the root account. + /// + /// # Weight + /// Weight is handled by the `#[pallet::weight]` attribute. + #[pallet::call_index(66)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_subtoken_enabled( + origin: OriginFor, + netuid: u16, + subtoken_enabled: bool, + ) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::SubtokenEnabled::::set(netuid, subtoken_enabled); + + log::debug!( + "SubtokenEnabled( netuid: {:?}, subtoken_enabled: {:?} )", + netuid, + subtoken_enabled + ); + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 99c11b7165..f8b3e6a9b6 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -80,6 +80,7 @@ parameter_types! { pub const TransactionByteFee: Balance = 100; pub const SDebug:u64 = 1; pub const InitialRho: u16 = 30; + pub const InitialAlphaSigmoidSteepness: u16 = 10; pub const InitialKappa: u16 = 32_767; pub const InitialTempo: u16 = 0; pub const SelfOwnership: u64 = 2; @@ -87,6 +88,7 @@ parameter_types! { pub const InitialMaxAllowedUids: u16 = 2; pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialBondsPenalty: u16 = u16::MAX; + pub const InitialBondsResetOn: bool = false; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; pub const InitialDefaultDelegateTake: u16 = 11_796; // 18% honest number. @@ -129,9 +131,11 @@ parameter_types! { pub const InitialAlphaHigh: u16 = 58982; // Represents 0.9 as per the production default pub const InitialAlphaLow: u16 = 45875; // Represents 0.7 as per the production default pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn + pub const InitialYuma3On: bool = false; // Default value for Yuma3On // pub const InitialHotkeyEmissionTempo: u64 = 1; // (DEPRECATED) // pub const InitialNetworkMaxStake: u64 = u64::MAX; // (DEPRECATED) pub const InitialColdkeySwapScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // 5 days + pub const InitialColdkeySwapRescheduleDuration: u64 = 24 * 60 * 60 / 12; // 1 day pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // 5 days pub const InitialTaoWeight: u64 = u64::MAX/10; // 10% global weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks @@ -157,6 +161,7 @@ impl pallet_subtensor::Config for Test { type InitialAdjustmentAlpha = InitialAdjustmentAlpha; type InitialTargetRegistrationsPerInterval = InitialTargetRegistrationsPerInterval; type InitialRho = InitialRho; + type InitialAlphaSigmoidSteepness = InitialAlphaSigmoidSteepness; type InitialKappa = InitialKappa; type InitialMaxAllowedUids = InitialMaxAllowedUids; type InitialValidatorPruneLen = InitialValidatorPruneLen; @@ -167,6 +172,7 @@ impl pallet_subtensor::Config for Test { type InitialPruningScore = InitialPruningScore; type InitialBondsMovingAverage = InitialBondsMovingAverage; type InitialBondsPenalty = InitialBondsPenalty; + type InitialBondsResetOn = InitialBondsResetOn; type InitialMaxAllowedValidators = InitialMaxAllowedValidators; type InitialDefaultDelegateTake = InitialDefaultDelegateTake; type InitialMinDelegateTake = InitialMinDelegateTake; @@ -195,8 +201,10 @@ impl pallet_subtensor::Config for Test { type AlphaHigh = InitialAlphaHigh; type AlphaLow = InitialAlphaLow; type LiquidAlphaOn = InitialLiquidAlphaOn; + type Yuma3On = InitialYuma3On; type Preimages = (); type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; + type InitialColdkeySwapRescheduleDuration = InitialColdkeySwapRescheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; @@ -285,7 +293,6 @@ impl crate::Config for Test { type Aura = (); type Grandpa = GrandpaInterfaceImpl; type Balance = Balance; - type WeightInfo = (); } parameter_types! { @@ -311,7 +318,6 @@ impl pallet_scheduler::Config for Test { impl pallet_evm_chain_id::Config for Test {} impl pallet_drand::Config for Test { type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_drand::weights::SubstrateWeight; type AuthorityId = TestAuthId; type Verifier = pallet_drand::verifier::QuicknetVerifier; type UnsignedPriority = ConstU64<{ 1 << 20 }>; diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 2f4c3f2b51..bb813ce117 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -1711,3 +1711,72 @@ fn test_sudo_set_ema_halving() { assert_eq!(value_after_2, to_be_set); }); } + +// cargo test --package pallet-admin-utils --lib -- tests::test_set_sn_owner_hotkey --exact --show-output +#[test] +fn test_set_sn_owner_hotkey_owner() { + new_test_ext().execute_with(|| { + let netuid: u16 = 1; + let hotkey: U256 = U256::from(3); + let bad_origin_coldkey: U256 = U256::from(4); + add_network(netuid, 10); + + let owner = U256::from(10); + pallet_subtensor::SubnetOwner::::insert(netuid, owner); + + // Non-owner and non-root cannot set the sn owner hotkey + assert_eq!( + AdminUtils::sudo_set_sn_owner_hotkey( + <::RuntimeOrigin>::signed(bad_origin_coldkey), + netuid, + hotkey + ), + Err(DispatchError::BadOrigin) + ); + + // SN owner can set the hotkey + assert_ok!(AdminUtils::sudo_set_sn_owner_hotkey( + <::RuntimeOrigin>::signed(owner), + netuid, + hotkey + )); + + // Check the value + let actual_hotkey = pallet_subtensor::SubnetOwnerHotkey::::get(netuid); + assert_eq!(actual_hotkey, hotkey); + + // Cannot set again (rate limited) + assert_err!( + AdminUtils::sudo_set_sn_owner_hotkey( + <::RuntimeOrigin>::signed(owner), + netuid, + hotkey + ), + pallet_subtensor::Error::::TxRateLimitExceeded + ); + }); +} + +// cargo test --package pallet-admin-utils --lib -- tests::test_set_sn_owner_hotkey_root --exact --show-output +#[test] +fn test_set_sn_owner_hotkey_root() { + new_test_ext().execute_with(|| { + let netuid: u16 = 1; + let hotkey: U256 = U256::from(3); + add_network(netuid, 10); + + let owner = U256::from(10); + pallet_subtensor::SubnetOwner::::insert(netuid, owner); + + // Root can set the hotkey + assert_ok!(AdminUtils::sudo_set_sn_owner_hotkey( + <::RuntimeOrigin>::root(), + netuid, + hotkey + )); + + // Check the value + let actual_hotkey = pallet_subtensor::SubnetOwnerHotkey::::get(netuid); + assert_eq!(actual_hotkey, hotkey); + }); +} diff --git a/pallets/admin-utils/src/weights.rs b/pallets/admin-utils/src/weights.rs deleted file mode 100644 index 6ef9523546..0000000000 --- a/pallets/admin-utils/src/weights.rs +++ /dev/null @@ -1,854 +0,0 @@ - -//! Autogenerated weights for `pallet_admin_utils` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `morpheus`, CPU: `AMD EPYC 7513 32-Core Processor` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("local")`, DB CACHE: `1024` - -// Executed Command: -// ./target/release/node-subtensor -// benchmark -// pallet -// --chain=local -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_admin_utils -// --extrinsic=* -// --steps -// 50 -// --repeat -// 20 -// --output=pallets/admin-utils/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_admin_utils`. -pub trait WeightInfo { - fn swap_authorities(a: u32, ) -> Weight; - fn sudo_set_min_delegate_take() -> Weight; - fn sudo_set_default_take() -> Weight; - fn sudo_set_serving_rate_limit() -> Weight; - fn sudo_set_max_difficulty() -> Weight; - fn sudo_set_min_difficulty() -> Weight; - fn sudo_set_weights_set_rate_limit() -> Weight; - fn sudo_set_weights_version_key() -> Weight; - fn sudo_set_bonds_moving_average() -> Weight; - fn sudo_set_bonds_penalty() -> Weight; - fn sudo_set_max_allowed_validators() -> Weight; - fn sudo_set_difficulty() -> Weight; - fn sudo_set_adjustment_interval() -> Weight; - fn sudo_set_target_registrations_per_interval() -> Weight; - fn sudo_set_activity_cutoff() -> Weight; - fn sudo_set_rho() -> Weight; - fn sudo_set_kappa() -> Weight; - fn sudo_set_max_allowed_uids() -> Weight; - fn sudo_set_min_allowed_weights() -> Weight; - fn sudo_set_validator_prune_len() -> Weight; - fn sudo_set_scaling_law_power() -> Weight; - fn sudo_set_immunity_period() -> Weight; - fn sudo_set_max_weight_limit() -> Weight; - fn sudo_set_max_registrations_per_block() -> Weight; - fn sudo_set_max_burn() -> Weight; - fn sudo_set_min_burn() -> Weight; - fn sudo_set_network_registration_allowed() -> Weight; - fn sudo_set_tempo() -> Weight; - fn sudo_set_commit_reveal_weights_interval() -> Weight; - fn sudo_set_commit_reveal_weights_enabled() -> Weight; - fn sudo_set_evm_chain_id() -> Weight; - fn schedule_grandpa_change(a: u32) -> Weight; -} - -/// Weights for `pallet_admin_utils` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Aura Authorities (r:0 w:1) - /// Proof: Aura Authorities (max_values: Some(1), max_size: Some(1025), added: 1520, mode: MaxEncodedLen) - /// The range of component `a` is `[0, 32]`. - fn swap_authorities(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `632` - // Estimated: `1127` - // Minimum execution time: 11_490_000 picoseconds. - Weight::from_parts(20_410_228, 1127) - // Standard Error: 8_309 - .saturating_add(Weight::from_parts(199_399, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) - fn sudo_set_default_take() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) - fn sudo_set_min_delegate_take() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule ServingRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule ServingRateLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_serving_rate_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 27_700_000 picoseconds. - Weight::from_parts(28_290_000, 655) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxDifficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_450_000 picoseconds. - Weight::from_parts(47_279_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MinDifficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_110_000 picoseconds. - Weight::from_parts(46_909_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsSetRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsSetRateLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_weights_set_rate_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_349_000 picoseconds. - Weight::from_parts(46_970_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsVersionKey (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsVersionKey (max_values: None, max_size: None, mode: Measured) - fn sudo_set_weights_version_key() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_940_000 picoseconds. - Weight::from_parts(47_460_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule BondsMovingAverage (r:0 w:1) - /// Proof Skipped: SubtensorModule BondsMovingAverage (max_values: None, max_size: None, mode: Measured) - fn sudo_set_bonds_moving_average() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_099_000 picoseconds. - Weight::from_parts(47_510_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule BondsPenalty (r:0 w:1) - /// Proof Skipped: SubtensorModule BondsPenalty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_bonds_penalty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_099_000 picoseconds. - Weight::from_parts(47_510_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:1 w:0) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedValidators (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedValidators (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_allowed_validators() -> Weight { - // Proof Size summary in bytes: - // Measured: `1154` - // Estimated: `8412` - // Minimum execution time: 52_599_000 picoseconds. - Weight::from_parts(53_640_000, 8412) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Difficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule Difficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_240_000 picoseconds. - Weight::from_parts(47_130_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule AdjustmentInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule AdjustmentInterval (max_values: None, max_size: None, mode: Measured) - fn sudo_set_adjustment_interval() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_430_000 picoseconds. - Weight::from_parts(46_790_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule TargetRegistrationsPerInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule TargetRegistrationsPerInterval (max_values: None, max_size: None, mode: Measured) - fn sudo_set_target_registrations_per_interval() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_899_000 picoseconds. - Weight::from_parts(47_099_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ActivityCutoff (r:0 w:1) - /// Proof Skipped: SubtensorModule ActivityCutoff (max_values: None, max_size: None, mode: Measured) - fn sudo_set_activity_cutoff() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_029_000 picoseconds. - Weight::from_parts(46_759_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Rho (r:0 w:1) - /// Proof Skipped: SubtensorModule Rho (max_values: None, max_size: None, mode: Measured) - fn sudo_set_rho() -> Weight { - // Proof Size summary in bytes: - // Measured: `903` - // Estimated: `4281` - // Minimum execution time: 30_980_000 picoseconds. - Weight::from_parts(31_820_000, 4281) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Kappa (r:0 w:1) - /// Proof Skipped: SubtensorModule Kappa (max_values: None, max_size: None, mode: Measured) - fn sudo_set_kappa() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_620_000 picoseconds. - Weight::from_parts(46_440_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule SubnetworkN (r:1 w:0) - /// Proof Skipped: SubtensorModule SubnetworkN (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_allowed_uids() -> Weight { - // Proof Size summary in bytes: - // Measured: `1117` - // Estimated: `8301` - // Minimum execution time: 50_270_000 picoseconds. - Weight::from_parts(51_149_000, 8301) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinAllowedWeights (r:0 w:1) - /// Proof Skipped: SubtensorModule MinAllowedWeights (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_allowed_weights() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_990_000 picoseconds. - Weight::from_parts(47_390_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ValidatorPruneLen (r:0 w:1) - /// Proof Skipped: SubtensorModule ValidatorPruneLen (max_values: None, max_size: None, mode: Measured) - fn sudo_set_validator_prune_len() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_939_000 picoseconds. - Weight::from_parts(46_960_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ScalingLawPower (r:0 w:1) - /// Proof Skipped: SubtensorModule ScalingLawPower (max_values: None, max_size: None, mode: Measured) - fn sudo_set_scaling_law_power() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_480_000 picoseconds. - Weight::from_parts(46_590_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ImmunityPeriod (r:0 w:1) - /// Proof Skipped: SubtensorModule ImmunityPeriod (max_values: None, max_size: None, mode: Measured) - fn sudo_set_immunity_period() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_289_000 picoseconds. - Weight::from_parts(46_679_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxWeightsLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxWeightsLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_weight_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_850_000 picoseconds. - Weight::from_parts(46_589_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxRegistrationsPerBlock (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxRegistrationsPerBlock (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_registrations_per_block() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_330_000 picoseconds. - Weight::from_parts(46_490_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxBurn (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_390_000 picoseconds. - Weight::from_parts(46_339_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MinBurn (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_189_000 picoseconds. - Weight::from_parts(46_109_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworkPowRegistrationAllowed (r:0 w:1) - /// Proof Skipped: SubtensorModule NetworkPowRegistrationAllowed (max_values: None, max_size: None, mode: Measured) - fn sudo_set_network_registration_allowed() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 33_600_000 picoseconds. - Weight::from_parts(34_599_000, 655) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Tempo (r:0 w:1) - /// Proof Skipped: SubtensorModule Tempo (max_values: None, max_size: None, mode: Measured) - fn sudo_set_tempo() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 44_739_000 picoseconds. - Weight::from_parts(45_489_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - fn sudo_set_commit_reveal_weights_interval() -> Weight { - // Proof Size summary in bytes: - // Measured: `456` - // Estimated: `3921` - // Minimum execution time: 19_070_000 picoseconds. - Weight::from_parts(19_380_000, 456) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - fn sudo_set_commit_reveal_weights_enabled() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_450_000 picoseconds. - Weight::from_parts(47_279_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - fn sudo_set_evm_chain_id() -> Weight { - Weight::from_parts(20_200_000, 0) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - - fn schedule_grandpa_change(_a: u32) -> Weight { - // TODO should be replaced by benchmarked weights - Weight::default() - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Aura Authorities (r:0 w:1) - /// Proof: Aura Authorities (max_values: Some(1), max_size: Some(1025), added: 1520, mode: MaxEncodedLen) - /// The range of component `a` is `[0, 32]`. - fn swap_authorities(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `632` - // Estimated: `1127` - // Minimum execution time: 11_490_000 picoseconds. - Weight::from_parts(20_410_228, 1127) - // Standard Error: 8_309 - .saturating_add(Weight::from_parts(199_399, 0).saturating_mul(a.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) - fn sudo_set_default_take() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) - fn sudo_set_min_delegate_take() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule ServingRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule ServingRateLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_serving_rate_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 27_700_000 picoseconds. - Weight::from_parts(28_290_000, 655) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxDifficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_450_000 picoseconds. - Weight::from_parts(47_279_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MinDifficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_110_000 picoseconds. - Weight::from_parts(46_909_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsSetRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsSetRateLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_weights_set_rate_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_349_000 picoseconds. - Weight::from_parts(46_970_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsVersionKey (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsVersionKey (max_values: None, max_size: None, mode: Measured) - fn sudo_set_weights_version_key() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_940_000 picoseconds. - Weight::from_parts(47_460_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule BondsMovingAverage (r:0 w:1) - /// Proof Skipped: SubtensorModule BondsMovingAverage (max_values: None, max_size: None, mode: Measured) - fn sudo_set_bonds_moving_average() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_099_000 picoseconds. - Weight::from_parts(47_510_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule BondsPenalty (r:0 w:1) - /// Proof Skipped: SubtensorModule BondsPenalty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_bonds_penalty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_099_000 picoseconds. - Weight::from_parts(47_510_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:1 w:0) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedValidators (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedValidators (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_allowed_validators() -> Weight { - // Proof Size summary in bytes: - // Measured: `1154` - // Estimated: `8412` - // Minimum execution time: 52_599_000 picoseconds. - Weight::from_parts(53_640_000, 8412) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Difficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule Difficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_240_000 picoseconds. - Weight::from_parts(47_130_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule AdjustmentInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule AdjustmentInterval (max_values: None, max_size: None, mode: Measured) - fn sudo_set_adjustment_interval() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_430_000 picoseconds. - Weight::from_parts(46_790_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule TargetRegistrationsPerInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule TargetRegistrationsPerInterval (max_values: None, max_size: None, mode: Measured) - fn sudo_set_target_registrations_per_interval() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_899_000 picoseconds. - Weight::from_parts(47_099_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ActivityCutoff (r:0 w:1) - /// Proof Skipped: SubtensorModule ActivityCutoff (max_values: None, max_size: None, mode: Measured) - fn sudo_set_activity_cutoff() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_029_000 picoseconds. - Weight::from_parts(46_759_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Rho (r:0 w:1) - /// Proof Skipped: SubtensorModule Rho (max_values: None, max_size: None, mode: Measured) - fn sudo_set_rho() -> Weight { - // Proof Size summary in bytes: - // Measured: `903` - // Estimated: `4281` - // Minimum execution time: 30_980_000 picoseconds. - Weight::from_parts(31_820_000, 4281) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Kappa (r:0 w:1) - /// Proof Skipped: SubtensorModule Kappa (max_values: None, max_size: None, mode: Measured) - fn sudo_set_kappa() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_620_000 picoseconds. - Weight::from_parts(46_440_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule SubnetworkN (r:1 w:0) - /// Proof Skipped: SubtensorModule SubnetworkN (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_allowed_uids() -> Weight { - // Proof Size summary in bytes: - // Measured: `1117` - // Estimated: `8301` - // Minimum execution time: 50_270_000 picoseconds. - Weight::from_parts(51_149_000, 8301) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinAllowedWeights (r:0 w:1) - /// Proof Skipped: SubtensorModule MinAllowedWeights (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_allowed_weights() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_990_000 picoseconds. - Weight::from_parts(47_390_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ValidatorPruneLen (r:0 w:1) - /// Proof Skipped: SubtensorModule ValidatorPruneLen (max_values: None, max_size: None, mode: Measured) - fn sudo_set_validator_prune_len() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_939_000 picoseconds. - Weight::from_parts(46_960_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ScalingLawPower (r:0 w:1) - /// Proof Skipped: SubtensorModule ScalingLawPower (max_values: None, max_size: None, mode: Measured) - fn sudo_set_scaling_law_power() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_480_000 picoseconds. - Weight::from_parts(46_590_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ImmunityPeriod (r:0 w:1) - /// Proof Skipped: SubtensorModule ImmunityPeriod (max_values: None, max_size: None, mode: Measured) - fn sudo_set_immunity_period() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_289_000 picoseconds. - Weight::from_parts(46_679_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxWeightsLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxWeightsLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_weight_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_850_000 picoseconds. - Weight::from_parts(46_589_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxRegistrationsPerBlock (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxRegistrationsPerBlock (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_registrations_per_block() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_330_000 picoseconds. - Weight::from_parts(46_490_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxBurn (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_390_000 picoseconds. - Weight::from_parts(46_339_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MinBurn (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_189_000 picoseconds. - Weight::from_parts(46_109_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworkPowRegistrationAllowed (r:0 w:1) - /// Proof Skipped: SubtensorModule NetworkPowRegistrationAllowed (max_values: None, max_size: None, mode: Measured) - fn sudo_set_network_registration_allowed() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 33_600_000 picoseconds. - Weight::from_parts(34_599_000, 655) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Tempo (r:0 w:1) - /// Proof Skipped: SubtensorModule Tempo (max_values: None, max_size: None, mode: Measured) - fn sudo_set_tempo() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 44_739_000 picoseconds. - Weight::from_parts(45_489_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - fn sudo_set_commit_reveal_weights_interval() -> Weight { - // -- Extrinsic Time -- - // Model: - // Time ~= 19.38 - // µs - // Reads = 1 - // Writes = 1 - // Recorded proof Size = 456 - Weight::from_parts(19_380_000, 456) - .saturating_add(RocksDbWeight::get().reads(1)) - .saturating_add(RocksDbWeight::get().writes(1)) - } - fn sudo_set_commit_reveal_weights_enabled() -> Weight { - // -- Extrinsic Time -- - // Model: - // Time ~= 19.78 - // µs - // Reads = 1 - // Writes = 1 - // Recorded proof Size = 456 - Weight::from_parts(19_780_000, 456) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - fn sudo_set_evm_chain_id() -> Weight { - Weight::from_parts(20_200_000, 0) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - fn schedule_grandpa_change(_a: u32) -> Weight { - // TODO should be replaced by benchmarked weights - Weight::default() - } -} diff --git a/pallets/commitments/src/benchmarking.rs b/pallets/commitments/src/benchmarking.rs index 54247bb9d6..e66f2a07e8 100644 --- a/pallets/commitments/src/benchmarking.rs +++ b/pallets/commitments/src/benchmarking.rs @@ -35,7 +35,6 @@ mod benchmarks { #[benchmark] fn set_commitment() { - // The target user let netuid = 1; let caller: T::AccountId = whitelisted_caller(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -56,5 +55,15 @@ mod benchmarks { ); } + #[benchmark] + fn set_max_space() { + let new_space: u32 = 1_000; + + #[extrinsic_call] + _(RawOrigin::Root, new_space); + + assert_eq!(MaxSpace::::get(), new_space); + } + //impl_benchmark_test_suite!(Commitments, crate::tests::new_test_ext(), crate::tests::Test); } diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 11e1ae76ee..5b1ff7f3a8 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -47,7 +47,7 @@ pub mod pallet { /// Because this pallet emits events, it depends on the runtime's definition of an event. type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// Currency type that will be used to place deposits on neurons + ///Currency type that will be used to reserve deposits for commitments type Currency: ReservableCurrency + Send + Sync; /// Weight information for extrinsics in this pallet. @@ -56,6 +56,9 @@ pub mod pallet { /// Interface to access-limit metadata commitments type CanCommit: CanCommit; + /// Interface to trigger other pallets when metadata is committed + type OnMetadataCommitment: OnMetadataCommitment; + /// The maximum number of additional fields that can be added to a commitment #[pallet::constant] type MaxFields: Get + TypeInfo + 'static; @@ -68,15 +71,11 @@ pub mod pallet { #[pallet::constant] type FieldDeposit: Get>; - /// The rate limit for commitments - #[pallet::constant] - type DefaultRateLimit: Get>; - - /// Used to retreive the given subnet's tempo + /// Used to retrieve the given subnet's tempo type TempoInterface: GetTempoInterface; } - /// Used to retreive the given subnet's tempo + /// Used to retrieve the given subnet's tempo pub trait GetTempoInterface { /// Used to retreive the epoch index for the given subnet. fn get_epoch_index(netuid: u16, cur_block: u64) -> u64; @@ -114,26 +113,14 @@ pub mod pallet { pub enum Error { /// Account passed too many additional fields to their commitment TooManyFieldsInCommitmentInfo, - /// Account is not allow to make commitments to the chain + /// Account is not allowed to make commitments to the chain AccountNotAllowedCommit, - /// Account is trying to commit data too fast, rate limit exceeded - CommitmentSetRateLimitExceeded, /// Space Limit Exceeded for the current interval SpaceLimitExceeded, /// Indicates that unreserve returned a leftover, which is unexpected. UnexpectedUnreserveLeftover, } - #[pallet::type_value] - /// *DEPRECATED* Default value for commitment rate limit. - pub fn DefaultRateLimit() -> BlockNumberFor { - T::DefaultRateLimit::get() - } - - /// *DEPRECATED* The rate limit for commitments - #[pallet::storage] - pub type RateLimit = StorageValue<_, BlockNumberFor, ValueQuery, DefaultRateLimit>; - /// Tracks all CommitmentOf that have at least one timelocked field. #[pallet::storage] #[pallet::getter(fn timelocked_index)] @@ -164,6 +151,19 @@ pub mod pallet { BlockNumberFor, OptionQuery, >; + + #[pallet::storage] + #[pallet::getter(fn last_bonds_reset)] + pub(super) type LastBondsReset = StorageDoubleMap< + _, + Identity, + u16, + Twox64Concat, + T::AccountId, + BlockNumberFor, + OptionQuery, + >; + #[pallet::storage] #[pallet::getter(fn revealed_commitments)] pub(super) type RevealedCommitments = StorageDoubleMap< @@ -198,7 +198,9 @@ pub mod pallet { /// Set the commitment for a given netuid #[pallet::call_index(0)] #[pallet::weight(( - ::WeightInfo::set_commitment(), + Weight::from_parts(38_000_000, 0) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)), DispatchClass::Operational, Pays::No ))] @@ -207,7 +209,7 @@ pub mod pallet { netuid: u16, info: Box>, ) -> DispatchResult { - let who = ensure_signed(origin)?; + let who = ensure_signed(origin.clone())?; ensure!( T::CanCommit::can_commit(netuid, &who), Error::::AccountNotAllowedCommit @@ -238,6 +240,16 @@ pub mod pallet { usage.used_space = 0; } + // check if ResetBondsFlag is set in the fields + for field in info.fields.iter() { + if let Data::ResetBondsFlag = field { + // track when bonds reset was last triggered + >::insert(netuid, &who, cur_block); + T::OnMetadataCommitment::on_metadata_commitment(netuid, &who); + break; + } + } + let max_allowed = MaxSpace::::get() as u64; ensure!( usage.used_space.saturating_add(required_space) <= max_allowed, @@ -306,23 +318,27 @@ pub mod pallet { Ok(()) } - /// Sudo-set the commitment rate limit + /// *DEPRECATED* Sudo-set the commitment rate limit #[pallet::call_index(1)] #[pallet::weight(( - ::WeightInfo::set_rate_limit(), - DispatchClass::Operational, - Pays::No - ))] - pub fn set_rate_limit(origin: OriginFor, rate_limit_blocks: u32) -> DispatchResult { + Weight::from_parts(3_596_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)), + DispatchClass::Operational, + Pays::No + ))] + pub fn set_rate_limit(origin: OriginFor, _rate_limit_blocks: u32) -> DispatchResult { ensure_root(origin)?; - RateLimit::::set(rate_limit_blocks.into()); + // RateLimit::::set(rate_limit_blocks.into()); Ok(()) } /// Sudo-set MaxSpace #[pallet::call_index(2)] #[pallet::weight(( - ::WeightInfo::set_rate_limit(), + Weight::from_parts(3_556_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational, Pays::No ))] @@ -359,6 +375,14 @@ impl CanCommit for () { } } +pub trait OnMetadataCommitment { + fn on_metadata_commitment(netuid: u16, account: &AccountId); +} + +impl OnMetadataCommitment for () { + fn on_metadata_commitment(_: u16, _: &A) {} +} + /************************************************************ CallType definition ************************************************************/ diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index c8f6b1e1b2..4e6aa123bd 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -100,8 +100,8 @@ impl pallet_commitments::Config for Test { type CanCommit = TestCanCommit; type FieldDeposit = ConstU64<0>; type InitialDeposit = ConstU64<0>; - type DefaultRateLimit = ConstU64<0>; type TempoInterface = MockTempoInterface; + type OnMetadataCommitment = (); } pub struct MockTempoInterface; @@ -118,7 +118,6 @@ impl pallet_commitments::GetTempoInterface for MockTempoInterface { impl pallet_drand::Config for Test { type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_drand::weights::SubstrateWeight; type AuthorityId = test_crypto::TestAuthId; type Verifier = pallet_drand::verifier::QuicknetVerifier; type UnsignedPriority = ConstU64<{ 1 << 20 }>; diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index c9b14d188b..62e9444b76 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -3,7 +3,7 @@ use sp_std::prelude::*; #[cfg(test)] use crate::{ - CommitmentInfo, CommitmentOf, Config, Data, Error, Event, MaxSpace, Pallet, RateLimit, + BalanceOf, CommitmentInfo, CommitmentOf, Config, Data, Error, Event, MaxSpace, Pallet, Registration, RevealedCommitments, TimelockedIndex, UsedSpaceOf, mock::{ Balances, DRAND_QUICKNET_SIG_2000_HEX, DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, @@ -34,6 +34,8 @@ fn manual_data_type_info() { Data::ShaThree256(_) => "ShaThree256".to_string(), Data::Raw(bytes) => format!("Raw{}", bytes.len()), Data::TimelockEncrypted { .. } => "TimelockEncrypted".to_string(), + Data::ResetBondsFlag => "ResetBondsFlag".to_string(), + Data::BigRaw(_) => "BigRaw".to_string(), }; if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { let variant = variant @@ -50,6 +52,7 @@ fn manual_data_type_info() { let expected_len = match data { Data::None => 0, Data::Raw(bytes) => bytes.len() as u32, + Data::BigRaw(bytes) => bytes.len() as u32, Data::BlakeTwo256(_) | Data::Sha256(_) | Data::Keccak256(_) @@ -63,6 +66,7 @@ fn manual_data_type_info() { let reveal_round_len = reveal_round.encode().len() as u32; // Typically 8 bytes encrypted_len + reveal_round_len } + Data::ResetBondsFlag => 0, }; assert_eq!( encoded.len() as u32 - 1, // Subtract variant byte @@ -89,6 +93,7 @@ fn manual_data_type_info() { Data::Sha256(Default::default()), Data::Keccak256(Default::default()), Data::ShaThree256(Default::default()), + Data::ResetBondsFlag, ]; // Add Raw instances for all possible sizes @@ -150,39 +155,6 @@ fn set_commitment_too_many_fields_panics() { }); } -// DEPRECATED -// #[test] -// fn set_commitment_rate_limit_exceeded() { -// new_test_ext().execute_with(|| { -// let rate_limit = ::DefaultRateLimit::get(); -// System::::set_block_number(1); -// let info = Box::new(CommitmentInfo { -// fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), -// }); - -// assert_ok!(Pallet::::set_commitment( -// RuntimeOrigin::signed(1), -// 1, -// info.clone() -// )); - -// // Set block number to just before rate limit expires -// System::::set_block_number(rate_limit); -// assert_noop!( -// Pallet::::set_commitment(RuntimeOrigin::signed(1), 1, info.clone()), -// Error::::CommitmentSetRateLimitExceeded -// ); - -// // Set block number to after rate limit -// System::::set_block_number(rate_limit + 1); -// assert_ok!(Pallet::::set_commitment( -// RuntimeOrigin::signed(1), -// 1, -// info -// )); -// }); -// } - #[test] fn set_commitment_updates_deposit() { new_test_ext().execute_with(|| { @@ -226,22 +198,6 @@ fn set_commitment_updates_deposit() { }); } -#[test] -fn set_rate_limit_works() { - new_test_ext().execute_with(|| { - let default_rate_limit: u64 = ::DefaultRateLimit::get(); - assert_eq!(RateLimit::::get(), default_rate_limit); - - assert_ok!(Pallet::::set_rate_limit(RuntimeOrigin::root(), 200)); - assert_eq!(RateLimit::::get(), 200); - - assert_noop!( - Pallet::::set_rate_limit(RuntimeOrigin::signed(1), 300), - sp_runtime::DispatchError::BadOrigin - ); - }); -} - #[test] fn event_emission_works() { new_test_ext().execute_with(|| { @@ -1791,3 +1747,441 @@ fn usage_respects_minimum_of_100_bytes() { ); }); } + +#[test] +fn set_commitment_works_with_multiple_raw_fields() { + new_test_ext().execute_with(|| { + let cur_block = 10u64.into(); + System::::set_block_number(cur_block); + let initial_deposit: BalanceOf = ::InitialDeposit::get(); + let field_deposit: BalanceOf = ::FieldDeposit::get(); + + let field1 = Data::Raw(vec![0u8; 10].try_into().expect("<=128 bytes is OK")); + let field2 = Data::Raw(vec![1u8; 20].try_into().expect("<=128 bytes is OK")); + let field3 = Data::Raw(vec![2u8; 50].try_into().expect("<=128 bytes is OK")); + + let info_multiple = CommitmentInfo { + fields: BoundedVec::try_from(vec![field1.clone(), field2.clone(), field3.clone()]) + .expect("<= MaxFields"), + }; + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(12345), + 99, + Box::new(info_multiple) + )); + + let expected_deposit: BalanceOf = initial_deposit + 3u64 * field_deposit; + let stored = CommitmentOf::::get(99, 12345).expect("Should be stored"); + assert_eq!( + stored.deposit, expected_deposit, + "Deposit must equal initial + 3 * field_deposit" + ); + + assert_eq!(stored.block, cur_block, "Stored block must match cur_block"); + + let usage = UsedSpaceOf::::get(99, 12345).expect("Expected to not panic"); + assert_eq!( + usage.used_space, 100, + "Usage is clamped to 100 when sum of fields is < 100" + ); + + let next_block = 11u64.into(); + System::::set_block_number(next_block); + + let info_two_fields = CommitmentInfo { + fields: BoundedVec::try_from(vec![field1.clone(), field2.clone()]) + .expect("<= MaxFields"), + }; + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(12345), + 99, + Box::new(info_two_fields) + )); + + let expected_deposit2: BalanceOf = initial_deposit + 2u64 * field_deposit; + let stored2 = CommitmentOf::::get(99, 12345).expect("Should be stored"); + assert_eq!( + stored2.deposit, expected_deposit2, + "Deposit must have decreased after removing one field" + ); + + let usage2 = UsedSpaceOf::::get(99, 12345).expect("Expected to not panic"); + let expected_usage2 = 200u64; + assert_eq!( + usage2.used_space, expected_usage2, + "Usage accumulates in the same epoch, respecting the min usage of 100 each time" + ); + + let events = System::::events(); + let found_commitment_event = events.iter().any(|e| { + matches!( + e.event, + RuntimeEvent::Commitments(Event::Commitment { + netuid: 99, + who: 12345 + }) + ) + }); + assert!( + found_commitment_event, + "Expected at least one Event::Commitment to be emitted" + ); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn multiple_timelocked_commitments_reveal_works() { + new_test_ext().execute_with(|| { + // ------------------------------------------- + // 1) Set up initial block number and user + // ------------------------------------------- + let cur_block = 5u64.into(); + System::::set_block_number(cur_block); + + let who = 123; + let netuid = 999; + + // ------------------------------------------- + // 2) Create multiple TLE fields referencing + // two known valid Drand rounds: 1000, 2000 + // ------------------------------------------- + + let round_1000 = 1000; + let round_2000 = 2000; + + // 2.a) TLE #1 => round=1000 + let tle_1_plaintext = b"Timelock #1 => round=1000"; + let ciphertext_1 = produce_ciphertext(tle_1_plaintext, round_1000); + let tle_1 = Data::TimelockEncrypted { + encrypted: ciphertext_1, + reveal_round: round_1000, + }; + + // 2.b) TLE #2 => round=1000 + let tle_2_plaintext = b"Timelock #2 => round=1000"; + let ciphertext_2 = produce_ciphertext(tle_2_plaintext, round_1000); + let tle_2 = Data::TimelockEncrypted { + encrypted: ciphertext_2, + reveal_round: round_1000, + }; + + // 2.c) TLE #3 => round=2000 + let tle_3_plaintext = b"Timelock #3 => round=2000"; + let ciphertext_3 = produce_ciphertext(tle_3_plaintext, round_2000); + let tle_3 = Data::TimelockEncrypted { + encrypted: ciphertext_3, + reveal_round: round_2000, + }; + + // 2.d) TLE #4 => round=2000 + let tle_4_plaintext = b"Timelock #4 => round=2000"; + let ciphertext_4 = produce_ciphertext(tle_4_plaintext, round_2000); + let tle_4 = Data::TimelockEncrypted { + encrypted: ciphertext_4, + reveal_round: round_2000, + }; + + // ------------------------------------------- + // 3) Insert all TLEs in a single CommitmentInfo + // ------------------------------------------- + let fields = vec![tle_1, tle_2, tle_3, tle_4]; + let fields_bounded = BoundedVec::try_from(fields).expect("Must not exceed MaxFields"); + let info = CommitmentInfo { + fields: fields_bounded, + }; + + // ------------------------------------------- + // 4) set_commitment => user is now in TimelockedIndex + // ------------------------------------------- + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info) + )); + assert!( + TimelockedIndex::::get().contains(&(netuid, who)), + "User must appear in TimelockedIndex since they have TLE fields" + ); + + // Confirm the stored fields are as expected + let stored = CommitmentOf::::get(netuid, who).expect("Should be stored"); + assert_eq!( + stored.info.fields.len(), + 4, + "All 4 timelock fields must be stored" + ); + + // ------------------------------------------- + // 5) Insert valid Drand pulse => round=1000 + // ------------------------------------------- + let drand_sig_1000 = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("decode signature"); + insert_drand_pulse(round_1000, &drand_sig_1000); + + // Reveal at block=6 => should remove TLE #1 and TLE #2, leaving TLE #3, #4 + System::::set_block_number(6u64.into()); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + // Check leftover => TLE #3, TLE #4 remain + let leftover_after_1000 = CommitmentOf::::get(netuid, who).expect("Must exist"); + assert_eq!( + leftover_after_1000.info.fields.len(), + 2, + "After revealing round=1000, 2 timelocks remain (#3, #4)" + ); + + // Check partial reveals => TLE #1 & #2 in revealed storage + let revealed_1000 = RevealedCommitments::::get(netuid, who) + .expect("Should have partial reveals"); + assert_eq!( + revealed_1000.len(), + 2, + "We revealed exactly 2 items at round=1000" + ); + { + let (bytes_a, _) = &revealed_1000[0]; + let (bytes_b, _) = &revealed_1000[1]; + let txt_a = sp_std::str::from_utf8(bytes_a).expect("utf-8 expected"); + let txt_b = sp_std::str::from_utf8(bytes_b).expect("utf-8 expected"); + assert!( + txt_a.contains("Timelock #1") || txt_a.contains("Timelock #2"), + "Revealed #1 or #2" + ); + assert!( + txt_b.contains("Timelock #1") || txt_b.contains("Timelock #2"), + "Revealed #1 or #2" + ); + } + + assert!( + TimelockedIndex::::get().contains(&(netuid, who)), + "TLE left" + ); + + // ------------------------------------------- + // 6) Insert valid Drand pulse => round=2000 + // ------------------------------------------- + let drand_sig_2000_hex = + "b6cb8f482a0b15d45936a4c4ea08e98a087e71787caee3f4d07a8a9843b1bc5423c6b3c22f446488b3137eaca799c77e"; + let drand_sig_2000 = hex::decode(drand_sig_2000_hex).expect("decode signature"); + insert_drand_pulse(round_2000, &drand_sig_2000); + + // Reveal at block=7 => should remove TLE #3 and TLE #4 + System::::set_block_number(7u64.into()); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + // After revealing these last two timelocks => leftover is none + let leftover_after_2000 = CommitmentOf::::get(netuid, who); + assert!( + leftover_after_2000.is_none(), + "All timelocks revealed => leftover none => entry removed" + ); + + // Because the user has no timelocks left => removed from TimelockedIndex + assert!( + !TimelockedIndex::::get().contains(&(netuid, who)), + "No TLE left => user removed from index" + ); + + // Check TLE #3 and #4 were appended to revealed + let revealed_final = RevealedCommitments::::get(netuid, who) + .expect("Should exist with final reveals"); + assert_eq!( + revealed_final.len(), + 4, + "We should have all 4 TLE items revealed in total" + ); + + // The final two items in `revealed_final` must be #3, #4 + let (third_bytes, _) = &revealed_final[2]; + let (fourth_bytes, _) = &revealed_final[3]; + let third_txt = sp_std::str::from_utf8(third_bytes).expect("utf-8 expected"); + let fourth_txt = sp_std::str::from_utf8(fourth_bytes).expect("utf-8 expected"); + + assert!( + third_txt.contains("Timelock #3"), + "Expected TLE #3 among final reveals" + ); + assert!( + fourth_txt.contains("Timelock #4"), + "Expected TLE #4 among final reveals" + ); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn mixed_timelocked_and_raw_fields_works() { + new_test_ext().execute_with(|| { + // ------------------------------------------- + // 1) Setup initial block number and user + // ------------------------------------------- + let cur_block = 3u64.into(); + System::::set_block_number(cur_block); + + let who = 77; + let netuid = 501; + + // ------------------------------------------- + // 2) Create raw fields and timelocked fields + // ------------------------------------------- + // We'll use 2 raw fields, and 2 timelocked fields referencing + // 2 Drand rounds (1000 and 2000) that we know have valid signatures. + + // Round constants: + let round_1000 = 1000; + let round_2000 = 2000; + + // (a) Timelock #1 => round=1000 + let tle_1_plaintext = b"TLE #1 => round=1000"; + let ciphertext_1 = produce_ciphertext(tle_1_plaintext, round_1000); + let tle_1 = Data::TimelockEncrypted { + encrypted: ciphertext_1, + reveal_round: round_1000, + }; + + // (b) Timelock #2 => round=2000 + let tle_2_plaintext = b"TLE #2 => round=2000"; + let ciphertext_2 = produce_ciphertext(tle_2_plaintext, round_2000); + let tle_2 = Data::TimelockEncrypted { + encrypted: ciphertext_2, + reveal_round: round_2000, + }; + + // (c) Two Raw fields + let raw_1 = Data::Raw(b"Raw field #1".to_vec().try_into().expect("<= 128 bytes")); + let raw_2 = Data::Raw(b"Raw field #2".to_vec().try_into().expect("<= 128 bytes")); + + // We'll put them in a single vector: [TLE #1, raw_1, TLE #2, raw_2] + let all_fields = vec![tle_1, raw_1.clone(), tle_2, raw_2.clone()]; + let fields_bounded = BoundedVec::try_from(all_fields).expect("<= MaxFields"); + + // ------------------------------------------- + // 3) Submit the single commitment + // ------------------------------------------- + let info = CommitmentInfo { fields: fields_bounded }; + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info) + )); + + // The user should appear in TimelockedIndex because they have timelocked fields. + assert!( + TimelockedIndex::::get().contains(&(netuid, who)), + "User must be in TimelockedIndex with TLE fields" + ); + + // Check the stored data + let stored = CommitmentOf::::get(netuid, who).expect("Should exist in storage"); + assert_eq!( + stored.info.fields.len(), + 4, + "We have 2 raw + 2 TLE fields in total" + ); + + // ------------------------------------------- + // 4) Insert Drand signature for round=1000 => partial reveal + // ------------------------------------------- + let drand_sig_1000 = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("decode signature"); + insert_drand_pulse(round_1000, &drand_sig_1000); + + System::::set_block_number(4u64.into()); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + // => TLE #1 (round=1000) is revealed. TLE #2 (round=2000) remains locked. + // => The two raw fields remain untouched. + let leftover_after_1000 = CommitmentOf::::get(netuid, who).expect("Must still exist"); + assert_eq!( + leftover_after_1000.info.fields.len(), + 3, + "One TLE removed => leftover=3 fields: TLE #2 + raw_1 + raw_2" + ); + + // Make sure user is still in TimelockedIndex (they still have TLE #2) + assert!( + TimelockedIndex::::get().contains(&(netuid, who)), + "Still has leftover TLE #2 => remains in index" + ); + + // Check partial reveal + let revealed_1000 = RevealedCommitments::::get(netuid, who) + .expect("Should have partial reveals"); + assert_eq!( + revealed_1000.len(), + 1, + "We revealed exactly 1 item at round=1000" + ); + let (revealed_bytes_1, _block_1) = &revealed_1000[0]; + let revealed_str_1 = + sp_std::str::from_utf8(revealed_bytes_1).expect("Should parse as UTF-8"); + assert!( + revealed_str_1.contains("TLE #1 => round=1000"), + "Check that TLE #1 was revealed" + ); + + // ------------------------------------------- + // 5) Insert Drand signature for round=2000 => final TLE reveal + // ------------------------------------------- + let drand_sig_2000_hex = + "b6cb8f482a0b15d45936a4c4ea08e98a087e71787caee3f4d07a8a9843b1bc5423c6b3c22f446488b3137eaca799c77e"; + let drand_sig_2000 = hex::decode(drand_sig_2000_hex).expect("decode signature"); + insert_drand_pulse(round_2000, &drand_sig_2000); + + System::::set_block_number(5u64.into()); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + // => TLE #2 is now revealed. The two raw fields remain. + let leftover_after_2000 = CommitmentOf::::get(netuid, who).expect("Still exists"); + let leftover_fields = &leftover_after_2000.info.fields; + assert_eq!( + leftover_fields.len(), + 2, + "Only the 2 raw fields remain after TLE #2 is revealed" + ); + + assert_eq!( + leftover_fields[0], + raw_1, + "Leftover field[0] must match raw_1" + ); + assert_eq!( + leftover_fields[1], + raw_2, + "Leftover field[1] must match raw_2" + ); + + // The user has no leftover timelocks => removed from TimelockedIndex + assert!( + !TimelockedIndex::::get().contains(&(netuid, who)), + "No more TLE => user removed from index" + ); + + // But the record is still present in storage (because raw fields remain) + // => leftover_fields must match our original raw fields. + let [f1, f2] = &leftover_fields[..] else { + panic!("Expected exactly 2 fields leftover"); + }; + assert_eq!(f1, &raw_1, "Raw field #1 remains unaltered"); + assert_eq!(f2, &raw_2, "Raw field #2 remains unaltered"); + + // Check that TLE #2 was appended to revealed data + let revealed_final = RevealedCommitments::::get(netuid, who) + .expect("Should have final reveals"); + assert_eq!( + revealed_final.len(), + 2, + "Now we have 2 revealed TLE items total (TLE #1 and TLE #2)." + ); + let (revealed_bytes_2, _block_2) = &revealed_final[1]; + let revealed_str_2 = + sp_std::str::from_utf8(revealed_bytes_2).expect("Should parse as UTF-8"); + assert!( + revealed_str_2.contains("TLE #2 => round=2000"), + "Check that TLE #2 was revealed" + ); + }); +} diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index 0f1d2302a5..543eb08cd1 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -31,15 +31,18 @@ use sp_runtime::{ use sp_std::{fmt::Debug, iter::once, prelude::*}; use subtensor_macros::freeze_struct; -/// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater -/// than 32-bytes then it will be truncated when encoding. -/// -/// Can also be `None`. +/// Represents stored data which can be: +/// - `Raw`: a direct blob up to 128 bytes +/// - `BigRaw`: a larger blob up to 512 bytes +/// - A cryptographic hash (BlakeTwo256, Sha256, Keccak256, ShaThree256) +/// - A timelock-encrypted blob with a reveal round +/// - A reset flag (`ResetBondsFlag`) +/// Can also be `None`. #[derive(Clone, Eq, PartialEq, RuntimeDebug, MaxEncodedLen)] pub enum Data { /// No data here. None, - /// The data is stored directly. + /// The data is stored directly (up to 128 bytes). Raw(BoundedVec>), /// Only the Blake2 hash of the data is stored. The preimage of the hash may be retrieved /// through some hash-lookup service. @@ -58,6 +61,10 @@ pub enum Data { encrypted: BoundedVec>, reveal_round: u64, }, + /// Flag to trigger bonds reset for subnet + ResetBondsFlag, + /// The data is stored directly (up to 512 bytes). + BigRaw(BoundedVec>), } impl Data { @@ -79,6 +86,8 @@ impl Data { | Data::Keccak256(arr) | Data::ShaThree256(arr) => arr.len() as u64, Data::TimelockEncrypted { encrypted, .. } => encrypted.len() as u64, + Data::ResetBondsFlag => 0, + Data::BigRaw(bytes) => bytes.len() as u64, } } } @@ -108,6 +117,12 @@ impl Decode for Data { reveal_round, } } + 135 => Data::ResetBondsFlag, + 136 => { + let bigvec = + BoundedVec::>::decode(input)?; + Data::BigRaw(bigvec) + } _ => return Err(codec::Error::from("invalid leading byte")), }) } @@ -136,6 +151,12 @@ impl Encode for Data { r.extend_from_slice(&reveal_round.encode()); r } + Data::ResetBondsFlag => vec![135], + Data::BigRaw(bigvec) => { + let mut r = vec![136]; + r.extend_from_slice(&bigvec.encode()); + r + } } } } @@ -321,6 +342,12 @@ impl TypeInfo for Data { }) .field(|f| f.name("reveal_round").ty::()), ) + }) + .variant("ResetBondsFlag", |v| v.index(135)) + .variant("BigRaw", |v| { + v.index(136).fields(Fields::unnamed().field(|f| { + f.ty::>>() + })) }); Type::builder() @@ -348,6 +375,7 @@ pub struct CommitmentInfo> { /// Maximum size of the serialized timelock commitment in bytes pub const MAX_TIMELOCK_COMMITMENT_SIZE_BYTES: u32 = 1024; +pub const MAX_BIGRAW_COMMITMENT_SIZE_BYTES: u32 = 512; /// Contains the decrypted data of a revealed commitment. #[freeze_struct("bf575857b57f9bef")] diff --git a/pallets/commitments/src/weights.rs b/pallets/commitments/src/weights.rs index b91017e050..e1bd05fcc7 100644 --- a/pallets/commitments/src/weights.rs +++ b/pallets/commitments/src/weights.rs @@ -53,7 +53,7 @@ impl WeightInfo for SubstrateWeight { fn set_rate_limit() -> Weight { Weight::from_parts(10_000_000, 2000) .saturating_add(RocksDbWeight::get().reads(1_u64)) - } + } } // For backwards compatibility and tests. @@ -76,5 +76,5 @@ impl WeightInfo for () { fn set_rate_limit() -> Weight { Weight::from_parts(10_000_000, 2000) .saturating_add(RocksDbWeight::get().reads(1_u64)) - } -} \ No newline at end of file + } +} diff --git a/pallets/crowdloan/Cargo.toml b/pallets/crowdloan/Cargo.toml new file mode 100644 index 0000000000..e8d582fa44 --- /dev/null +++ b/pallets/crowdloan/Cargo.toml @@ -0,0 +1,62 @@ +[package] +name = "pallet-crowdloan" +version = "0.1.0" +edition = "2024" +authors = ["Bittensor Nucleus Team"] +license = "Apache-2.0" +homepage = "https://bittensor.com" +description = "FRAME crowdloan pallet" +publish = false +repository = "https://github.com/opentensor/subtensor" + +[lints] +workspace = true + +[dependencies] +subtensor-macros.workspace = true +scale-info = { workspace = true, features = ["derive"] } +codec = { workspace = true, features = ["max-encoded-len"] } +frame-benchmarking = { optional = true, workspace = true } +frame-support.workspace = true +frame-system.workspace = true +sp-runtime.workspace = true +sp-std.workspace = true +log = { workspace = true } + +[dev-dependencies] +pallet-balances = { default-features = true, workspace = true } +pallet-preimage = { default-features = true, workspace = true } +sp-core = { default-features = true, workspace = true } +sp-io = { default-features = true, workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", + "sp-io/std", + "log/std", + "sp-core/std", + "pallet-balances/std", + "pallet-preimage/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-preimage/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", + "pallet-balances/try-runtime", + "pallet-preimage/try-runtime", +] diff --git a/pallets/crowdloan/README.md b/pallets/crowdloan/README.md new file mode 100644 index 0000000000..3d67fee33a --- /dev/null +++ b/pallets/crowdloan/README.md @@ -0,0 +1,19 @@ +# Crowdloan Pallet + +A pallet that enables the creation and management of generic crowdloans for transferring funds and executing an arbitrary call. + +Users of this pallet can create a crowdloan by providing a deposit, a cap, an end block, an optional target address and an optional call. + +Users can contribute to a crowdloan by providing funds to the crowdloan they choose to support. The contribution can be withdrawn while the crowdloan is not finalized. + +Once the crowdloan is finalized, the funds will be transferred to the target address if provided; otherwise, the end user is expected to transfer them manually on-chain if the call is a pallet extrinsic. The call will be dispatched with the current crowdloan ID stored as a temporary item. + +If the crowdloan fails to reach the cap, the creator can decide to refund all contributors and dissolve the crowdloan. The initial deposit will be refunded. + +## Overview + +## Interface + +## Dispatchable Functions + +License: Apache-2.0 diff --git a/pallets/crowdloan/src/benchmarking.rs b/pallets/crowdloan/src/benchmarking.rs new file mode 100644 index 0000000000..0891baf5af --- /dev/null +++ b/pallets/crowdloan/src/benchmarking.rs @@ -0,0 +1,498 @@ +//! Benchmarks for Crowdloan Pallet +#![cfg(feature = "runtime-benchmarks")] +#![allow( + clippy::arithmetic_side_effects, + clippy::indexing_slicing, + clippy::unwrap_used +)] +use crate::{BalanceOf, CrowdloanId, CrowdloanInfo, CurrencyOf, pallet::*}; +use frame_benchmarking::{account, v2::*}; +use frame_support::traits::{Get, StorePreimage, fungible::*}; +use frame_system::{RawOrigin, pallet_prelude::BlockNumberFor}; + +extern crate alloc; + +const SEED: u32 = 0; + +use alloc::{boxed::Box, vec}; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn create() { + let creator: T::AccountId = account::("creator", 0, SEED); + let deposit = T::MinimumDeposit::get(); + let min_contribution = T::AbsoluteMinimumContribution::get(); + let cap = deposit + deposit; + let now = frame_system::Pallet::::block_number(); + let end = now + T::MaximumBlockDuration::get(); + let target_address = account::("target_address", 0, SEED); + let call: Box<::RuntimeCall> = + Box::new(frame_system::Call::::remark { remark: vec![] }.into()); + let _ = CurrencyOf::::set_balance(&creator, deposit); + + #[extrinsic_call] + _( + RawOrigin::Signed(creator.clone()), + deposit, + min_contribution, + cap, + end, + Some(call.clone()), + Some(target_address.clone()), + ); + + // ensure the crowdloan is stored correctly + let crowdloan_id = 0; + let funds_account = Pallet::::funds_account(crowdloan_id); + assert_eq!( + Crowdloans::::get(crowdloan_id), + Some(CrowdloanInfo { + creator: creator.clone(), + deposit, + min_contribution, + cap, + end, + funds_account: funds_account.clone(), + raised: deposit, + target_address: Some(target_address.clone()), + call: Some(T::Preimages::bound(*call).unwrap()), + finalized: false, + contributors_count: 1, + }) + ); + // ensure the creator has been deducted the deposit + assert!(CurrencyOf::::balance(&creator) == 0); + // ensure the initial deposit is stored correctly as contribution + assert_eq!( + Contributions::::get(crowdloan_id, &creator), + Some(deposit) + ); + // ensure the raised amount is updated correctly + assert!(Crowdloans::::get(crowdloan_id).is_some_and(|c| c.raised == deposit)); + // ensure the crowdloan account has the deposit + assert_eq!(CurrencyOf::::balance(&funds_account), deposit); + // ensure the event is emitted + assert_last_event::( + Event::::Created { + crowdloan_id, + creator, + end, + cap, + } + .into(), + ); + // ensure next crowdloan id is incremented + assert_eq!(NextCrowdloanId::::get(), crowdloan_id + 1); + } + + #[benchmark] + fn contribute() { + // create a crowdloan + let creator: T::AccountId = account::("creator", 0, SEED); + let deposit = T::MinimumDeposit::get(); + let min_contribution = T::AbsoluteMinimumContribution::get(); + let cap = deposit + deposit; + let now = frame_system::Pallet::::block_number(); + let end = now + T::MaximumBlockDuration::get(); + let target_address: T::AccountId = account::("target_address", 0, SEED); + let call: Box<::RuntimeCall> = + Box::new(frame_system::Call::::remark { remark: vec![] }.into()); + let _ = CurrencyOf::::set_balance(&creator, deposit); + let _ = Pallet::::create( + RawOrigin::Signed(creator.clone()).into(), + deposit, + min_contribution, + cap, + end, + Some(call), + Some(target_address), + ); + + // setup contributor + let contributor: T::AccountId = account::("contributor", 0, SEED); + let amount: BalanceOf = min_contribution; + let crowdloan_id: CrowdloanId = 0; + let _ = CurrencyOf::::set_balance(&contributor, amount); + + #[extrinsic_call] + _(RawOrigin::Signed(contributor.clone()), crowdloan_id, amount); + + // ensure the contribution is stored correctly + assert_eq!( + Contributions::::get(crowdloan_id, &contributor), + Some(amount) + ); + // ensure the contributor has been deducted the amount + assert!(CurrencyOf::::balance(&contributor) == 0); + // ensure the crowdloan raised amount is updated correctly + assert!(Crowdloans::::get(crowdloan_id).is_some_and(|c| c.raised == deposit + amount)); + // ensure the contribution is present in the crowdloan account + assert_eq!( + CurrencyOf::::balance(&Pallet::::funds_account(crowdloan_id)), + deposit + amount + ); + // ensure the event is emitted + assert_last_event::( + Event::::Contributed { + contributor, + crowdloan_id, + amount, + } + .into(), + ); + } + + #[benchmark] + fn withdraw() { + // create a crowdloan + let creator: T::AccountId = account::("creator", 0, SEED); + let deposit = T::MinimumDeposit::get(); + let min_contribution = T::AbsoluteMinimumContribution::get(); + let cap = deposit + deposit; + let now = frame_system::Pallet::::block_number(); + let end = now + T::MaximumBlockDuration::get(); + let target_address: T::AccountId = account::("target_address", 0, SEED); + let call: Box<::RuntimeCall> = + Box::new(frame_system::Call::::remark { remark: vec![] }.into()); + let _ = CurrencyOf::::set_balance(&creator, deposit); + let _ = Pallet::::create( + RawOrigin::Signed(creator.clone()).into(), + deposit, + min_contribution, + cap, + end, + Some(call), + Some(target_address), + ); + + // create contribution + let contributor: T::AccountId = account::("contributor", 0, SEED); + let amount: BalanceOf = min_contribution; + let crowdloan_id: CrowdloanId = 0; + let _ = CurrencyOf::::set_balance(&contributor, amount); + let _ = Pallet::::contribute( + RawOrigin::Signed(contributor.clone()).into(), + crowdloan_id, + amount, + ); + + // run to the end of the contribution period + frame_system::Pallet::::set_block_number(end); + + #[extrinsic_call] + _(RawOrigin::Signed(contributor.clone()), crowdloan_id); + + // ensure the creator contribution has been removed + assert_eq!(Contributions::::get(crowdloan_id, &contributor), None); + // ensure the contributor has his contribution back in his balance + assert_eq!(CurrencyOf::::balance(&contributor), amount); + // ensure the crowdloan account has been deducted the contribution + assert_eq!( + CurrencyOf::::balance(&Pallet::::funds_account(crowdloan_id)), + deposit + ); + // ensure the crowdloan raised amount is updated correctly + assert!(Crowdloans::::get(crowdloan_id).is_some_and(|c| c.raised == deposit)); + // ensure the event is emitted + assert_last_event::( + Event::::Withdrew { + contributor, + crowdloan_id, + amount, + } + .into(), + ); + } + + #[benchmark] + fn finalize() { + // create a crowdloan + let creator: T::AccountId = account::("creator", 0, SEED); + let deposit = T::MinimumDeposit::get(); + let min_contribution = T::AbsoluteMinimumContribution::get(); + let cap = deposit + deposit; + let now = frame_system::Pallet::::block_number(); + let end = now + T::MaximumBlockDuration::get(); + let target_address: T::AccountId = account::("target_address", 0, SEED); + let call: Box<::RuntimeCall> = + Box::new(frame_system::Call::::remark { remark: vec![] }.into()); + let _ = CurrencyOf::::set_balance(&creator, deposit); + let _ = Pallet::::create( + RawOrigin::Signed(creator.clone()).into(), + deposit, + min_contribution, + cap, + end, + Some(call), + Some(target_address.clone()), + ); + + // create contribution fullfilling the cap + let crowdloan_id: CrowdloanId = 0; + let contributor: T::AccountId = account::("contributor", 0, SEED); + let amount: BalanceOf = cap - deposit; + let _ = CurrencyOf::::set_balance(&contributor, amount); + let _ = Pallet::::contribute( + RawOrigin::Signed(contributor.clone()).into(), + crowdloan_id, + amount, + ); + + // run to the end of the contribution period + frame_system::Pallet::::set_block_number(end); + + #[extrinsic_call] + _(RawOrigin::Signed(creator.clone()), crowdloan_id); + + // ensure the target address has received the raised amount + assert_eq!(CurrencyOf::::balance(&target_address), deposit + amount); + // ensure the crowdloan has been finalized + assert!(Crowdloans::::get(crowdloan_id).is_some_and(|c| c.finalized)); + // ensure the event is emitted + assert_last_event::(Event::::Finalized { crowdloan_id }.into()); + } + + #[benchmark] + fn refund(k: Linear<3, { T::RefundContributorsLimit::get() }>) { + // create a crowdloan + let creator: T::AccountId = account::("creator", 0, SEED); + let deposit = T::MinimumDeposit::get(); + let min_contribution = T::AbsoluteMinimumContribution::get(); + let cap = deposit + deposit; + let now = frame_system::Pallet::::block_number(); + let end = now + T::MaximumBlockDuration::get(); + let target_address: T::AccountId = account::("target_address", 0, SEED); + let call: Box<::RuntimeCall> = + Box::new(frame_system::Call::::remark { remark: vec![] }.into()); + let _ = CurrencyOf::::set_balance(&creator, deposit); + let _ = Pallet::::create( + RawOrigin::Signed(creator.clone()).into(), + deposit, + min_contribution, + cap, + end, + Some(call), + Some(target_address), + ); + + let crowdloan_id: CrowdloanId = 0; + let amount: BalanceOf = min_contribution; + // create the worst case count of contributors k to be refunded minus the creator + // who is already a contributor + let contributors = k - 1; + for i in 0..contributors { + let contributor: T::AccountId = account::("contributor", i, SEED); + let _ = CurrencyOf::::set_balance(&contributor, amount); + let _ = Pallet::::contribute( + RawOrigin::Signed(contributor.clone()).into(), + crowdloan_id, + amount, + ); + } + + // run to the end of the contribution period + frame_system::Pallet::::set_block_number(end); + + #[extrinsic_call] + _(RawOrigin::Signed(creator.clone()), crowdloan_id); + + // ensure the creator has not been refunded and contribution is the actual initial deposit + assert_eq!(CurrencyOf::::balance(&creator), 0); + assert_eq!( + Contributions::::get(crowdloan_id, &creator), + Some(deposit) + ); + // ensure each contributor has been refunded and the contributions is removed + for i in 0..contributors { + let contributor: T::AccountId = account::("contributor", i, SEED); + assert_eq!(CurrencyOf::::balance(&contributor), amount); + assert_eq!(Contributions::::get(crowdloan_id, &contributor), None); + } + // ensure the crowdloan account has been deducted the contributions + assert_eq!( + CurrencyOf::::balance(&Pallet::::funds_account(crowdloan_id)), + deposit + ); + // ensure the raised amount is updated correctly + assert!(Crowdloans::::get(crowdloan_id).is_some_and(|c| c.raised == deposit)); + // ensure the event is emitted + assert_last_event::(Event::::AllRefunded { crowdloan_id }.into()); + } + + #[benchmark] + fn dissolve() { + // create a crowdloan + let creator: T::AccountId = account::("creator", 0, SEED); + let deposit = T::MinimumDeposit::get(); + let min_contribution = T::AbsoluteMinimumContribution::get(); + let cap = deposit + deposit; + let now = frame_system::Pallet::::block_number(); + let end = now + T::MaximumBlockDuration::get(); + let target_address: T::AccountId = account::("target_address", 0, SEED); + let call: Box<::RuntimeCall> = + Box::new(frame_system::Call::::remark { remark: vec![] }.into()); + let _ = CurrencyOf::::set_balance(&creator, deposit); + let _ = Pallet::::create( + RawOrigin::Signed(creator.clone()).into(), + deposit, + min_contribution, + cap, + end, + Some(call), + Some(target_address), + ); + + // run to the end of the contribution period + frame_system::Pallet::::set_block_number(end); + + // refund the contributions + let crowdloan_id: CrowdloanId = 0; + let _ = Pallet::::refund(RawOrigin::Signed(creator.clone()).into(), crowdloan_id); + + #[extrinsic_call] + _(RawOrigin::Signed(creator.clone()), crowdloan_id); + + // ensure the crowdloan has been dissolved + assert!(Crowdloans::::get(crowdloan_id).is_none()); + // ensure the event is emitted + assert_last_event::(Event::::Dissolved { crowdloan_id }.into()); + } + + #[benchmark] + fn update_min_contribution() { + // create a crowdloan + let creator: T::AccountId = account::("creator", 0, SEED); + let deposit = T::MinimumDeposit::get(); + let min_contribution = T::AbsoluteMinimumContribution::get(); + let cap = deposit + deposit; + let end = frame_system::Pallet::::block_number() + T::MaximumBlockDuration::get(); + let call: Box<::RuntimeCall> = + Box::new(frame_system::Call::::remark { remark: vec![] }.into()); + let _ = CurrencyOf::::set_balance(&creator, deposit); + let _ = Pallet::::create( + RawOrigin::Signed(creator.clone()).into(), + deposit, + min_contribution, + cap, + end, + Some(call), + None, + ); + + let crowdloan_id: CrowdloanId = 0; + let new_min_contribution: BalanceOf = min_contribution + min_contribution; + + #[extrinsic_call] + _( + RawOrigin::Signed(creator.clone()), + crowdloan_id, + new_min_contribution, + ); + + // ensure the min contribution is updated correctly + assert!( + Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.min_contribution == new_min_contribution) + ); + // ensure the event is emitted + assert_last_event::( + Event::::MinContributionUpdated { + crowdloan_id, + new_min_contribution, + } + .into(), + ); + } + + #[benchmark] + fn update_end() { + // create a crowdloan + let creator: T::AccountId = account::("creator", 0, SEED); + let deposit = T::MinimumDeposit::get(); + let min_contribution = T::AbsoluteMinimumContribution::get(); + let cap = deposit + deposit; + let now = frame_system::Pallet::::block_number(); + let end = now + T::MinimumBlockDuration::get(); + let call: Box<::RuntimeCall> = + Box::new(frame_system::Call::::remark { remark: vec![] }.into()); + let _ = CurrencyOf::::set_balance(&creator, deposit); + let _ = Pallet::::create( + RawOrigin::Signed(creator.clone()).into(), + deposit, + min_contribution, + cap, + end, + Some(call), + None, + ); + + let crowdloan_id: CrowdloanId = 0; + let new_end: BlockNumberFor = now + T::MaximumBlockDuration::get(); + + #[extrinsic_call] + _(RawOrigin::Signed(creator.clone()), crowdloan_id, new_end); + + // ensure the end is updated correctly + assert!(Crowdloans::::get(crowdloan_id).is_some_and(|c| c.end == new_end)); + // ensure the event is emitted + assert_last_event::( + Event::::EndUpdated { + crowdloan_id, + new_end, + } + .into(), + ); + } + + #[benchmark] + fn update_cap() { + // create a crowdloan + let creator: T::AccountId = account::("creator", 0, SEED); + let deposit = T::MinimumDeposit::get(); + let min_contribution = T::AbsoluteMinimumContribution::get(); + let cap = deposit + deposit; + let end = frame_system::Pallet::::block_number() + T::MaximumBlockDuration::get(); + let call: Box<::RuntimeCall> = + Box::new(frame_system::Call::::remark { remark: vec![] }.into()); + let _ = CurrencyOf::::set_balance(&creator, deposit); + let _ = Pallet::::create( + RawOrigin::Signed(creator.clone()).into(), + deposit, + min_contribution, + cap, + end, + Some(call), + None, + ); + + let crowdloan_id: CrowdloanId = 0; + let new_cap: BalanceOf = cap + cap; + + #[extrinsic_call] + _(RawOrigin::Signed(creator.clone()), crowdloan_id, new_cap); + + // ensure the cap is updated correctly + assert!(Crowdloans::::get(crowdloan_id).is_some_and(|c| c.cap == new_cap)); + // ensure the event is emitted + assert_last_event::( + Event::::CapUpdated { + crowdloan_id, + new_cap, + } + .into(), + ); + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/pallets/crowdloan/src/lib.rs b/pallets/crowdloan/src/lib.rs new file mode 100644 index 0000000000..1d4ed4e263 --- /dev/null +++ b/pallets/crowdloan/src/lib.rs @@ -0,0 +1,891 @@ +//! # Crowdloan Pallet +//! +//! A pallet allowing users to create generic crowdloans and contribute to them, +//! the raised funds are then transferred to a target address and an extrinsic +//! is dispatched, making it reusable for any crowdloan type. +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +use alloc::{boxed::Box, vec}; +use codec::{Decode, Encode}; +use frame_support::{ + PalletId, + dispatch::GetDispatchInfo, + pallet_prelude::*, + sp_runtime::{ + RuntimeDebug, + traits::{AccountIdConversion, Dispatchable, Zero}, + }, + traits::{ + Bounded, Defensive, Get, IsSubType, QueryPreimage, StorePreimage, fungible, fungible::*, + tokens::Preservation, + }, +}; +use frame_system::pallet_prelude::*; +use scale_info::TypeInfo; +use sp_runtime::traits::CheckedSub; +use sp_std::vec::Vec; +use weights::WeightInfo; + +pub use pallet::*; +use subtensor_macros::freeze_struct; + +pub type CrowdloanId = u32; + +mod benchmarking; +mod migrations; +mod mock; +mod tests; +pub mod weights; + +pub type CurrencyOf = ::Currency; + +pub type BalanceOf = + as fungible::Inspect<::AccountId>>::Balance; + +// Define a maximum length for the migration key +type MigrationKeyMaxLen = ConstU32<128>; + +pub type BoundedCallOf = + Bounded<::RuntimeCall, ::Hashing>; + +/// A struct containing the information about a crowdloan. +#[freeze_struct("5db9538284491545")] +#[derive(Encode, Decode, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct CrowdloanInfo { + /// The creator of the crowdloan. + pub creator: AccountId, + /// The initial deposit of the crowdloan from the creator. + pub deposit: Balance, + /// Minimum contribution to the crowdloan. + pub min_contribution: Balance, + /// The end block of the crowdloan. + pub end: BlockNumber, + /// The cap to raise. + pub cap: Balance, + /// The account holding the funds for this crowdloan. Derived on chain but put here for ease of use. + pub funds_account: AccountId, + /// The amount raised so far. + pub raised: Balance, + /// The optional target address to transfer the raised funds to, if not + /// provided, it means the funds will be transferred from on chain logic + /// inside the provided call to dispatch. + pub target_address: Option, + /// The optional call to dispatch when the crowdloan is finalized. + pub call: Option, + /// Whether the crowdloan has been finalized. + pub finalized: bool, + /// The number of contributors to the crowdloan. + pub contributors_count: u32, +} + +pub type CrowdloanInfoOf = CrowdloanInfo< + ::AccountId, + BalanceOf, + BlockNumberFor, + BoundedCallOf, +>; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The overarching call type. + type RuntimeCall: Parameter + + Dispatchable + + GetDispatchInfo + + From> + + IsSubType> + + IsType<::RuntimeCall>; + + /// The currency mechanism. + type Currency: fungible::Balanced + + fungible::Mutate; + + /// The weight information for the pallet. + type WeightInfo: WeightInfo; + + /// The preimage provider which will be used to store the call to dispatch. + type Preimages: QueryPreimage + StorePreimage; + + /// The pallet id that will be used to derive crowdloan account ids. + #[pallet::constant] + type PalletId: Get; + + /// The minimum deposit required to create a crowdloan. + #[pallet::constant] + type MinimumDeposit: Get>; + + /// The absolute minimum contribution required to contribute to a crowdloan. + #[pallet::constant] + type AbsoluteMinimumContribution: Get>; + + /// The minimum block duration for a crowdloan. + #[pallet::constant] + type MinimumBlockDuration: Get>; + + /// The maximum block duration for a crowdloan. + #[pallet::constant] + type MaximumBlockDuration: Get>; + + /// The maximum number of contributors that can be refunded in a single refund. + #[pallet::constant] + type RefundContributorsLimit: Get; + + // The maximum number of contributors that can contribute to a crowdloan. + #[pallet::constant] + type MaxContributors: Get; + } + + /// A map of crowdloan ids to their information. + #[pallet::storage] + pub type Crowdloans = + StorageMap<_, Twox64Concat, CrowdloanId, CrowdloanInfoOf, OptionQuery>; + + /// The next incrementing crowdloan id. + #[pallet::storage] + pub type NextCrowdloanId = StorageValue<_, CrowdloanId, ValueQuery, ConstU32<0>>; + + /// A map of crowdloan ids to their contributors and their contributions. + #[pallet::storage] + pub type Contributions = StorageDoubleMap< + _, + Twox64Concat, + CrowdloanId, + Identity, + T::AccountId, + BalanceOf, + OptionQuery, + >; + + /// The current crowdloan id that will be set during the finalize call, making it + /// temporarily accessible to the dispatched call. + #[pallet::storage] + pub type CurrentCrowdloanId = StorageValue<_, CrowdloanId, OptionQuery>; + + /// Storage for the migration run status. + #[pallet::storage] + pub type HasMigrationRun = + StorageMap<_, Identity, BoundedVec, bool, ValueQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A crowdloan was created. + Created { + crowdloan_id: CrowdloanId, + creator: T::AccountId, + end: BlockNumberFor, + cap: BalanceOf, + }, + /// A contribution was made to an active crowdloan. + Contributed { + crowdloan_id: CrowdloanId, + contributor: T::AccountId, + amount: BalanceOf, + }, + /// A contribution was withdrawn from a failed crowdloan. + Withdrew { + crowdloan_id: CrowdloanId, + contributor: T::AccountId, + amount: BalanceOf, + }, + /// A refund was partially processed for a failed crowdloan. + PartiallyRefunded { crowdloan_id: CrowdloanId }, + /// A refund was fully processed for a failed crowdloan. + AllRefunded { crowdloan_id: CrowdloanId }, + /// A crowdloan was finalized, funds were transferred and the call was dispatched. + Finalized { crowdloan_id: CrowdloanId }, + /// A crowdloan was dissolved. + Dissolved { crowdloan_id: CrowdloanId }, + /// The minimum contribution was updated. + MinContributionUpdated { + crowdloan_id: CrowdloanId, + new_min_contribution: BalanceOf, + }, + /// The end was updated. + EndUpdated { + crowdloan_id: CrowdloanId, + new_end: BlockNumberFor, + }, + /// The cap was updated. + CapUpdated { + crowdloan_id: CrowdloanId, + new_cap: BalanceOf, + }, + } + + #[pallet::error] + pub enum Error { + /// The crowdloan initial deposit is too low. + DepositTooLow, + /// The crowdloan cap is too low. + CapTooLow, + /// The minimum contribution is too low. + MinimumContributionTooLow, + /// The crowdloan cannot end in the past. + CannotEndInPast, + /// The crowdloan block duration is too short. + BlockDurationTooShort, + /// The block duration is too long. + BlockDurationTooLong, + /// The account does not have enough balance to pay for the initial deposit/contribution. + InsufficientBalance, + /// An overflow occurred. + Overflow, + /// The crowdloan id is invalid. + InvalidCrowdloanId, + /// The crowdloan cap has been fully raised. + CapRaised, + /// The contribution period has ended. + ContributionPeriodEnded, + /// The contribution is too low. + ContributionTooLow, + /// The origin of this call is invalid. + InvalidOrigin, + /// The crowdloan has already been finalized. + AlreadyFinalized, + /// The crowdloan contribution period has not ended yet. + ContributionPeriodNotEnded, + /// The contributor has no contribution for this crowdloan. + NoContribution, + /// The crowdloan cap has not been raised. + CapNotRaised, + /// An underflow occurred. + Underflow, + /// Call to dispatch was not found in the preimage storage. + CallUnavailable, + /// The crowdloan is not ready to be dissolved, it still has contributions. + NotReadyToDissolve, + /// The deposit cannot be withdrawn from the crowdloan. + DepositCannotBeWithdrawn, + /// The maximum number of contributors has been reached. + MaxContributorsReached, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let mut weight = frame_support::weights::Weight::from_parts(0, 0); + + weight = weight + // Add the contributors count for each crowdloan + .saturating_add(migrations::migrate_add_contributors_count::()); + + weight + } + } + + #[pallet::call] + impl Pallet { + /// Create a crowdloan that will raise funds up to a maximum cap and if successful, + /// will transfer funds to the target address if provided and dispatch the call + /// (using creator origin). + /// + /// The initial deposit will be transfered to the crowdloan account and will be refunded + /// in case the crowdloan fails to raise the cap. Additionally, the creator will pay for + /// the execution of the call. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `deposit`: The initial deposit from the creator. + /// - `min_contribution`: The minimum contribution required to contribute to the crowdloan. + /// - `cap`: The maximum amount of funds that can be raised. + /// - `end`: The block number at which the crowdloan will end. + /// - `call`: The call to dispatch when the crowdloan is finalized. + /// - `target_address`: The address to transfer the raised funds to if provided. + #[pallet::call_index(0)] + #[pallet::weight({ + let di = call.as_ref().map(|c| c.get_dispatch_info()); + let inner_call_weight = match di { + Some(di) => di.weight, + None => Weight::zero(), + }; + let base_weight = T::WeightInfo::create(); + (base_weight.saturating_add(inner_call_weight), Pays::Yes) + })] + pub fn create( + origin: OriginFor, + #[pallet::compact] deposit: BalanceOf, + #[pallet::compact] min_contribution: BalanceOf, + #[pallet::compact] cap: BalanceOf, + #[pallet::compact] end: BlockNumberFor, + call: Option::RuntimeCall>>, + target_address: Option, + ) -> DispatchResult { + let creator = ensure_signed(origin)?; + let now = frame_system::Pallet::::block_number(); + + // Ensure the deposit is at least the minimum deposit, cap is greater than deposit + // and the minimum contribution is greater than the absolute minimum contribution. + ensure!( + deposit >= T::MinimumDeposit::get(), + Error::::DepositTooLow + ); + ensure!(cap > deposit, Error::::CapTooLow); + ensure!( + min_contribution >= T::AbsoluteMinimumContribution::get(), + Error::::MinimumContributionTooLow + ); + + Self::ensure_valid_end(now, end)?; + + // Ensure the creator has enough balance to pay the initial deposit + ensure!( + CurrencyOf::::balance(&creator) >= deposit, + Error::::InsufficientBalance + ); + + let crowdloan_id = NextCrowdloanId::::get(); + let next_crowdloan_id = crowdloan_id.checked_add(1).ok_or(Error::::Overflow)?; + NextCrowdloanId::::put(next_crowdloan_id); + + // Derive the funds account and keep track of it + let funds_account = Self::funds_account(crowdloan_id); + frame_system::Pallet::::inc_providers(&funds_account); + + // If the call is provided, bound it and store it in the preimage storage + let call = if let Some(call) = call { + Some(T::Preimages::bound(*call)?) + } else { + None + }; + + let crowdloan = CrowdloanInfo { + creator: creator.clone(), + deposit, + min_contribution, + end, + cap, + funds_account, + raised: deposit, + target_address, + call, + finalized: false, + contributors_count: 1, + }; + Crowdloans::::insert(crowdloan_id, &crowdloan); + + // Transfer the deposit to the funds account + CurrencyOf::::transfer( + &creator, + &crowdloan.funds_account, + deposit, + Preservation::Expendable, + )?; + + Contributions::::insert(crowdloan_id, &creator, deposit); + + Self::deposit_event(Event::::Created { + crowdloan_id, + creator, + end, + cap, + }); + + Ok(()) + } + + /// Contribute to an active crowdloan. + /// + /// The contribution will be transfered to the crowdloan account and will be refunded + /// if the crowdloan fails to raise the cap. If the contribution would raise the amount above the cap, + /// the contribution will be set to the amount that is left to be raised. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `crowdloan_id`: The id of the crowdloan to contribute to. + /// - `amount`: The amount to contribute. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::contribute())] + pub fn contribute( + origin: OriginFor, + #[pallet::compact] crowdloan_id: CrowdloanId, + #[pallet::compact] amount: BalanceOf, + ) -> DispatchResult { + let contributor = ensure_signed(origin)?; + let now = frame_system::Pallet::::block_number(); + + let mut crowdloan = Self::ensure_crowdloan_exists(crowdloan_id)?; + + // Ensure crowdloan has not ended and has not raised cap + ensure!(now < crowdloan.end, Error::::ContributionPeriodEnded); + ensure!(crowdloan.raised < crowdloan.cap, Error::::CapRaised); + + // Ensure contribution is at least the minimum contribution + ensure!( + amount >= crowdloan.min_contribution, + Error::::ContributionTooLow + ); + + // Ensure the crowdloan has not reached the maximum number of contributors + ensure!( + crowdloan.contributors_count < T::MaxContributors::get(), + Error::::MaxContributorsReached + ); + + // Ensure contribution does not overflow the actual raised amount + // and it does not exceed the cap + let left_to_raise = crowdloan + .cap + .checked_sub(crowdloan.raised) + .ok_or(Error::::Underflow)?; + + // If the contribution would raise the amount above the cap, + // set the contribution to the amount that is left to be raised + let amount = amount.min(left_to_raise); + + // Ensure contribution does not overflow the actual raised amount + crowdloan.raised = crowdloan + .raised + .checked_add(amount) + .ok_or(Error::::Overflow)?; + + // Compute the new total contribution and ensure it does not overflow, we + // also increment the contributor count if the contribution is new. + let contribution = + if let Some(contribution) = Contributions::::get(crowdloan_id, &contributor) { + contribution + .checked_add(amount) + .ok_or(Error::::Overflow)? + } else { + // We have a new contribution + crowdloan.contributors_count = crowdloan + .contributors_count + .checked_add(1) + .ok_or(Error::::Overflow)?; + amount + }; + + // Ensure contributor has enough balance to pay + ensure!( + CurrencyOf::::balance(&contributor) >= amount, + Error::::InsufficientBalance + ); + + CurrencyOf::::transfer( + &contributor, + &crowdloan.funds_account, + amount, + Preservation::Expendable, + )?; + + Contributions::::insert(crowdloan_id, &contributor, contribution); + Crowdloans::::insert(crowdloan_id, &crowdloan); + + Self::deposit_event(Event::::Contributed { + crowdloan_id, + contributor, + amount, + }); + + Ok(()) + } + + /// Withdraw a contribution from an active (not yet finalized or dissolved) crowdloan. + /// + /// Only contributions over the deposit can be withdrawn by the creator. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `crowdloan_id`: The id of the crowdloan to withdraw from. + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::withdraw())] + pub fn withdraw( + origin: OriginFor, + #[pallet::compact] crowdloan_id: CrowdloanId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + let mut crowdloan = Self::ensure_crowdloan_exists(crowdloan_id)?; + ensure!(!crowdloan.finalized, Error::::AlreadyFinalized); + + // Ensure contributor has balance left in the crowdloan account + let mut amount = Contributions::::get(crowdloan_id, &who).unwrap_or_else(Zero::zero); + ensure!(amount > Zero::zero(), Error::::NoContribution); + + if who == crowdloan.creator { + // Ensure the deposit is kept + amount = amount.saturating_sub(crowdloan.deposit); + ensure!(amount > Zero::zero(), Error::::DepositCannotBeWithdrawn); + Contributions::::insert(crowdloan_id, &who, crowdloan.deposit); + } else { + Contributions::::remove(crowdloan_id, &who); + crowdloan.contributors_count = crowdloan + .contributors_count + .checked_sub(1) + .ok_or(Error::::Underflow)?; + } + + CurrencyOf::::transfer( + &crowdloan.funds_account, + &who, + amount, + Preservation::Expendable, + )?; + + // Update the crowdloan raised amount to reflect the withdrawal. + crowdloan.raised = crowdloan.raised.saturating_sub(amount); + Crowdloans::::insert(crowdloan_id, &crowdloan); + + Self::deposit_event(Event::::Withdrew { + contributor: who, + crowdloan_id, + amount, + }); + + Ok(()) + } + + /// Finalize a successful crowdloan. + /// + /// The call will transfer the raised amount to the target address if it was provided when the crowdloan was created + /// and dispatch the call that was provided using the creator origin. The CurrentCrowdloanId will be set to the + /// crowdloan id being finalized so the dispatched call can access it temporarily by accessing + /// the `CurrentCrowdloanId` storage item. + /// + /// The dispatch origin for this call must be _Signed_ and must be the creator of the crowdloan. + /// + /// Parameters: + /// - `crowdloan_id`: The id of the crowdloan to finalize. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::finalize())] + pub fn finalize( + origin: OriginFor, + #[pallet::compact] crowdloan_id: CrowdloanId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let now = frame_system::Pallet::::block_number(); + + let mut crowdloan = Self::ensure_crowdloan_exists(crowdloan_id)?; + + // Ensure the origin is the creator of the crowdloan and the crowdloan has ended, + // raised the cap and is not finalized. + ensure!(who == crowdloan.creator, Error::::InvalidOrigin); + ensure!(now >= crowdloan.end, Error::::ContributionPeriodNotEnded); + ensure!(crowdloan.raised == crowdloan.cap, Error::::CapNotRaised); + ensure!(!crowdloan.finalized, Error::::AlreadyFinalized); + + // If the target address is provided, transfer the raised amount to it. + if let Some(ref target_address) = crowdloan.target_address { + CurrencyOf::::transfer( + &crowdloan.funds_account, + target_address, + crowdloan.raised, + Preservation::Expendable, + )?; + } + + // If the call is provided, dispatch it. + if let Some(ref call) = crowdloan.call { + // Set the current crowdloan id so the dispatched call + // can access it temporarily + CurrentCrowdloanId::::put(crowdloan_id); + + // Retrieve the call from the preimage storage + let stored_call = match T::Preimages::peek(call) { + Ok((call, _)) => call, + Err(_) => { + // If the call is not found, we drop it from the preimage storage + // because it's not needed anymore + T::Preimages::drop(call); + return Err(Error::::CallUnavailable)?; + } + }; + + // Dispatch the call with creator origin + stored_call + .dispatch(frame_system::RawOrigin::Signed(who).into()) + .map(|_| ()) + .map_err(|e| e.error)?; + + // Clear the current crowdloan id + CurrentCrowdloanId::::kill(); + } + + crowdloan.finalized = true; + Crowdloans::::insert(crowdloan_id, &crowdloan); + + Self::deposit_event(Event::::Finalized { crowdloan_id }); + + Ok(()) + } + + /// Refund a failed crowdloan. + /// + /// The call will try to refund all contributors (excluding the creator) up to the limit defined by the `RefundContributorsLimit`. + /// If the limit is reached, the call will stop and the crowdloan will be marked as partially refunded. + /// It may be needed to dispatch this call multiple times to refund all contributors. + /// + /// The dispatch origin for this call must be _Signed_ and doesn't need to be the creator of the crowdloan. + /// + /// Parameters: + /// - `crowdloan_id`: The id of the crowdloan to refund. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::refund(T::RefundContributorsLimit::get()))] + pub fn refund( + origin: OriginFor, + #[pallet::compact] crowdloan_id: CrowdloanId, + ) -> DispatchResultWithPostInfo { + let now = frame_system::Pallet::::block_number(); + ensure_signed(origin)?; + + let mut crowdloan = Self::ensure_crowdloan_exists(crowdloan_id)?; + + // Ensure the crowdloan has ended and is not finalized + ensure!(now >= crowdloan.end, Error::::ContributionPeriodNotEnded); + ensure!(!crowdloan.finalized, Error::::AlreadyFinalized); + + let mut refunded_contributors: Vec = vec![]; + let mut refund_count = 0; + + // Assume everyone can be refunded + let mut all_refunded = true; + + // We try to refund all contributors (excluding the creator) + let contributions = Contributions::::iter_prefix(crowdloan_id) + .filter(|(contributor, _)| *contributor != crowdloan.creator); + for (contributor, amount) in contributions { + if refund_count >= T::RefundContributorsLimit::get() { + // Not everyone can be refunded + all_refunded = false; + break; + } + + CurrencyOf::::transfer( + &crowdloan.funds_account, + &contributor, + amount, + Preservation::Expendable, + )?; + + refunded_contributors.push(contributor); + crowdloan.raised = crowdloan.raised.saturating_sub(amount); + refund_count = refund_count.checked_add(1).ok_or(Error::::Overflow)?; + } + + crowdloan.contributors_count = crowdloan + .contributors_count + .checked_sub(refund_count) + .ok_or(Error::::Underflow)?; + Crowdloans::::insert(crowdloan_id, &crowdloan); + + // Clear refunded contributors + for contributor in refunded_contributors { + Contributions::::remove(crowdloan_id, &contributor); + } + + if all_refunded { + Self::deposit_event(Event::::AllRefunded { crowdloan_id }); + // The loop didn't run fully, we refund the unused weights. + Ok(Some(T::WeightInfo::refund(refund_count)).into()) + } else { + Self::deposit_event(Event::::PartiallyRefunded { crowdloan_id }); + // The loop ran fully, we don't refund anything. + Ok(().into()) + } + } + + /// Dissolve a crowdloan. + /// + /// The crowdloan will be removed from the storage. + /// All contributions must have been refunded before the crowdloan can be dissolved (except the creator's one). + /// + /// The dispatch origin for this call must be _Signed_ and must be the creator of the crowdloan. + /// + /// Parameters: + /// - `crowdloan_id`: The id of the crowdloan to dissolve. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::dissolve())] + pub fn dissolve( + origin: OriginFor, + #[pallet::compact] crowdloan_id: CrowdloanId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + let crowdloan = Self::ensure_crowdloan_exists(crowdloan_id)?; + ensure!(!crowdloan.finalized, Error::::AlreadyFinalized); + + // Only the creator can dissolve the crowdloan + ensure!(who == crowdloan.creator, Error::::InvalidOrigin); + + // It can only be dissolved if the raised amount is the creator's contribution, + // meaning there is no contributions or every contribution has been refunded + let creator_contribution = Contributions::::get(crowdloan_id, &crowdloan.creator) + .ok_or(Error::::NoContribution)?; + ensure!( + creator_contribution == crowdloan.raised, + Error::::NotReadyToDissolve + ); + + // Refund the creator's contribution + CurrencyOf::::transfer( + &crowdloan.funds_account, + &crowdloan.creator, + creator_contribution, + Preservation::Expendable, + )?; + Contributions::::remove(crowdloan_id, &crowdloan.creator); + + // Clear the call from the preimage storage + if let Some(call) = crowdloan.call { + T::Preimages::drop(&call); + } + + // Remove the crowdloan + let _ = frame_system::Pallet::::dec_providers(&crowdloan.funds_account).defensive(); + Crowdloans::::remove(crowdloan_id); + + Self::deposit_event(Event::::Dissolved { crowdloan_id }); + Ok(()) + } + + /// Update the minimum contribution of a non-finalized crowdloan. + /// + /// The dispatch origin for this call must be _Signed_ and must be the creator of the crowdloan. + /// + /// Parameters: + /// - `crowdloan_id`: The id of the crowdloan to update the minimum contribution of. + /// - `new_min_contribution`: The new minimum contribution. + #[pallet::call_index(6)] + #[pallet::weight(T::WeightInfo::update_min_contribution())] + pub fn update_min_contribution( + origin: OriginFor, + #[pallet::compact] crowdloan_id: CrowdloanId, + #[pallet::compact] new_min_contribution: BalanceOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + let mut crowdloan = Self::ensure_crowdloan_exists(crowdloan_id)?; + ensure!(!crowdloan.finalized, Error::::AlreadyFinalized); + + // Only the creator can update the min contribution. + ensure!(who == crowdloan.creator, Error::::InvalidOrigin); + + // The new min contribution should be greater than absolute minimum contribution. + ensure!( + new_min_contribution >= T::AbsoluteMinimumContribution::get(), + Error::::MinimumContributionTooLow + ); + + crowdloan.min_contribution = new_min_contribution; + Crowdloans::::insert(crowdloan_id, &crowdloan); + + Self::deposit_event(Event::::MinContributionUpdated { + crowdloan_id, + new_min_contribution, + }); + Ok(()) + } + + /// Update the end block of a non-finalized crowdloan. + /// + /// The dispatch origin for this call must be _Signed_ and must be the creator of the crowdloan. + /// + /// Parameters: + /// - `crowdloan_id`: The id of the crowdloan to update the end block of. + /// - `new_end`: The new end block. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::update_end())] + pub fn update_end( + origin: OriginFor, + #[pallet::compact] crowdloan_id: CrowdloanId, + #[pallet::compact] new_end: BlockNumberFor, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let now = frame_system::Pallet::::block_number(); + + let mut crowdloan = Self::ensure_crowdloan_exists(crowdloan_id)?; + ensure!(!crowdloan.finalized, Error::::AlreadyFinalized); + + // Only the creator can update the min contribution. + ensure!(who == crowdloan.creator, Error::::InvalidOrigin); + + Self::ensure_valid_end(now, new_end)?; + + crowdloan.end = new_end; + Crowdloans::::insert(crowdloan_id, &crowdloan); + + Self::deposit_event(Event::::EndUpdated { + crowdloan_id, + new_end, + }); + Ok(()) + } + + /// Update the cap of a non-finalized crowdloan. + /// + /// The dispatch origin for this call must be _Signed_ and must be the creator of the crowdloan. + /// + /// Parameters: + /// - `crowdloan_id`: The id of the crowdloan to update the cap of. + /// - `new_cap`: The new cap. + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::update_cap())] + pub fn update_cap( + origin: OriginFor, + #[pallet::compact] crowdloan_id: CrowdloanId, + #[pallet::compact] new_cap: BalanceOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + // The cap can only be updated if the crowdloan has not been finalized. + let mut crowdloan = Self::ensure_crowdloan_exists(crowdloan_id)?; + ensure!(!crowdloan.finalized, Error::::AlreadyFinalized); + + // Only the creator can update the cap. + ensure!(who == crowdloan.creator, Error::::InvalidOrigin); + + // The new cap should be greater than the actual raised amount. + ensure!(new_cap >= crowdloan.raised, Error::::CapTooLow); + + crowdloan.cap = new_cap; + Crowdloans::::insert(crowdloan_id, &crowdloan); + + Self::deposit_event(Event::::CapUpdated { + crowdloan_id, + new_cap, + }); + Ok(()) + } + } +} + +impl Pallet { + fn funds_account(id: CrowdloanId) -> T::AccountId { + T::PalletId::get().into_sub_account_truncating(id) + } + + fn ensure_crowdloan_exists(crowdloan_id: CrowdloanId) -> Result, Error> { + Crowdloans::::get(crowdloan_id).ok_or(Error::::InvalidCrowdloanId) + } + + // Ensure the provided end block is after the current block and the duration is + // between the minimum and maximum block duration + fn ensure_valid_end(now: BlockNumberFor, end: BlockNumberFor) -> Result<(), Error> { + ensure!(now < end, Error::::CannotEndInPast); + let block_duration = end.checked_sub(&now).ok_or(Error::::Underflow)?; + ensure!( + block_duration >= T::MinimumBlockDuration::get(), + Error::::BlockDurationTooShort + ); + ensure!( + block_duration <= T::MaximumBlockDuration::get(), + Error::::BlockDurationTooLong + ); + Ok(()) + } +} diff --git a/pallets/crowdloan/src/migrations/migrate_add_contributors_count.rs b/pallets/crowdloan/src/migrations/migrate_add_contributors_count.rs new file mode 100644 index 0000000000..3b094843ce --- /dev/null +++ b/pallets/crowdloan/src/migrations/migrate_add_contributors_count.rs @@ -0,0 +1,188 @@ +use alloc::string::String; +use frame_support::{BoundedVec, migration::storage_key_iter, traits::Get, weights::Weight}; +use subtensor_macros::freeze_struct; + +use crate::*; + +mod old_storage { + use super::*; + + #[freeze_struct("84bcbf9b8d3f0ddf")] + #[derive(Encode, Decode, Debug)] + pub struct OldCrowdloanInfo { + pub creator: AccountId, + pub deposit: Balance, + pub min_contribution: Balance, + pub end: BlockNumber, + pub cap: Balance, + pub funds_account: AccountId, + pub raised: Balance, + pub target_address: Option, + pub call: Option, + pub finalized: bool, + } +} + +pub fn migrate_add_contributors_count() -> Weight { + let migration_name = BoundedVec::truncate_from(b"migrate_add_contributors_count".to_vec()); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + let pallet_name = b"Crowdloan"; + let item_name = b"Crowdloans"; + let crowdloans = storage_key_iter::< + CrowdloanId, + old_storage::OldCrowdloanInfo< + T::AccountId, + BalanceOf, + BlockNumberFor, + BoundedCallOf, + >, + Twox64Concat, + >(pallet_name, item_name) + .collect::>(); + weight = weight.saturating_add(T::DbWeight::get().reads(crowdloans.len() as u64)); + + for (id, crowdloan) in crowdloans { + let contributions = Contributions::::iter_key_prefix(id) + .collect::>() + .len(); + weight = weight.saturating_add(T::DbWeight::get().reads(contributions as u64)); + + Crowdloans::::insert( + id, + CrowdloanInfo { + creator: crowdloan.creator, + deposit: crowdloan.deposit, + min_contribution: crowdloan.min_contribution, + end: crowdloan.end, + cap: crowdloan.cap, + funds_account: crowdloan.funds_account, + raised: crowdloan.raised, + target_address: crowdloan.target_address, + call: crowdloan.call, + finalized: crowdloan.finalized, + contributors_count: contributions as u32, + }, + ); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} + +#[cfg(test)] +mod tests { + use frame_support::{Hashable, storage::unhashed::put_raw}; + use sp_core::U256; + use sp_io::hashing::twox_128; + + use super::*; + use crate::mock::{Test, TestState}; + + #[test] + fn test_migrate_add_contributors_count_works() { + TestState::default().build_and_execute(|| { + let pallet_name = twox_128(b"Crowdloan"); + let storage_name = twox_128(b"Crowdloans"); + let prefix = [pallet_name, storage_name].concat(); + + let items = vec![ + ( + old_storage::OldCrowdloanInfo { + creator: U256::from(1), + deposit: 100u64, + min_contribution: 10u64, + end: 100u64, + cap: 1000u64, + funds_account: U256::from(2), + raised: 0u64, + target_address: None, + call: None::>, + finalized: false, + }, + vec![(U256::from(1), 100)], + ), + ( + old_storage::OldCrowdloanInfo { + creator: U256::from(1), + deposit: 100u64, + min_contribution: 10u64, + end: 100u64, + cap: 1000u64, + funds_account: U256::from(2), + raised: 0u64, + target_address: None, + call: None::>, + finalized: false, + }, + vec![ + (U256::from(1), 100), + (U256::from(2), 100), + (U256::from(3), 100), + ], + ), + ( + old_storage::OldCrowdloanInfo { + creator: U256::from(1), + deposit: 100u64, + min_contribution: 10u64, + end: 100u64, + cap: 1000u64, + funds_account: U256::from(2), + raised: 0u64, + target_address: None, + call: None::>, + finalized: false, + }, + vec![ + (U256::from(1), 100), + (U256::from(2), 100), + (U256::from(3), 100), + (U256::from(4), 100), + (U256::from(5), 100), + ], + ), + ]; + + for (id, (crowdloan, contributions)) in items.into_iter().enumerate() { + let key = [prefix.clone(), (id as u32).twox_64_concat()].concat(); + put_raw(&key, &crowdloan.encode()); + + for (contributor, amount) in contributions { + Contributions::::insert(id as u32, contributor, amount); + } + } + + migrate_add_contributors_count::(); + + assert!(Crowdloans::::get(0).is_some_and(|c| c.contributors_count == 1)); + assert!(Crowdloans::::get(1).is_some_and(|c| c.contributors_count == 3)); + assert!(Crowdloans::::get(2).is_some_and(|c| c.contributors_count == 5)); + + assert!(HasMigrationRun::::get(BoundedVec::truncate_from( + b"migrate_add_contributors_count".to_vec() + ))); + }); + } +} diff --git a/pallets/crowdloan/src/migrations/mod.rs b/pallets/crowdloan/src/migrations/mod.rs new file mode 100644 index 0000000000..f6701fb83a --- /dev/null +++ b/pallets/crowdloan/src/migrations/mod.rs @@ -0,0 +1,2 @@ +mod migrate_add_contributors_count; +pub use migrate_add_contributors_count::*; diff --git a/pallets/crowdloan/src/mock.rs b/pallets/crowdloan/src/mock.rs new file mode 100644 index 0000000000..78cf15717c --- /dev/null +++ b/pallets/crowdloan/src/mock.rs @@ -0,0 +1,272 @@ +#![cfg(test)] +#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] +use frame_support::{ + PalletId, derive_impl, parameter_types, + traits::{OnFinalize, OnInitialize, fungible, fungible::*, tokens::Preservation}, + weights::Weight, +}; +use frame_system::{EnsureRoot, pallet_prelude::BlockNumberFor}; +use sp_core::U256; +use sp_runtime::{BuildStorage, traits::IdentityLookup}; + +use crate::{BalanceOf, CrowdloanId, pallet as pallet_crowdloan, weights::WeightInfo}; + +type Block = frame_system::mocking::MockBlock; +pub(crate) type AccountOf = ::AccountId; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system = 1, + Balances: pallet_balances = 2, + Crowdloan: pallet_crowdloan = 3, + Preimage: pallet_preimage = 4, + TestPallet: pallet_test = 5, + } +); + +#[allow(unused)] +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default() + .build_storage() + .expect("Expected to not panic"); + pallet_balances::GenesisConfig:: { + balances: vec![ + (U256::from(1), 10), + (U256::from(2), 10), + (U256::from(3), 10), + (U256::from(4), 10), + (U256::from(5), 3), + ], + } + .assimilate_storage(&mut t) + .expect("Expected to not panic"); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; + type AccountId = U256; + type AccountData = pallet_balances::AccountData; + type Lookup = IdentityLookup; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type AccountStore = System; +} + +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn create() -> Weight { + Weight::zero() + } + fn contribute() -> Weight { + Weight::zero() + } + fn withdraw() -> Weight { + Weight::zero() + } + fn refund(_k: u32) -> Weight { + Weight::zero() + } + fn finalize() -> Weight { + Weight::zero() + } + fn dissolve() -> Weight { + Weight::zero() + } + fn update_min_contribution() -> Weight { + Weight::zero() + } + fn update_end() -> Weight { + Weight::zero() + } + fn update_cap() -> Weight { + Weight::zero() + } +} + +parameter_types! { + pub const PreimageMaxSize: u32 = 4096 * 1024; + pub const PreimageBaseDeposit: u64 = 1; + pub const PreimageByteDeposit: u64 = 1; +} + +impl pallet_preimage::Config for Test { + type WeightInfo = pallet_preimage::weights::SubstrateWeight; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type ManagerOrigin = EnsureRoot>; + type Consideration = (); +} + +parameter_types! { + pub const CrowdloanPalletId: PalletId = PalletId(*b"bt/cloan"); + pub const MinimumDeposit: u64 = 50; + pub const AbsoluteMinimumContribution: u64 = 10; + pub const MinimumBlockDuration: u64 = 20; + pub const MaximumBlockDuration: u64 = 100; + pub const RefundContributorsLimit: u32 = 5; + pub const MaxContributors: u32 = 10; +} + +impl pallet_crowdloan::Config for Test { + type PalletId = CrowdloanPalletId; + type Currency = Balances; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type WeightInfo = TestWeightInfo; + type Preimages = Preimage; + type MinimumDeposit = MinimumDeposit; + type AbsoluteMinimumContribution = AbsoluteMinimumContribution; + type MinimumBlockDuration = MinimumBlockDuration; + type MaximumBlockDuration = MaximumBlockDuration; + type RefundContributorsLimit = RefundContributorsLimit; + type MaxContributors = MaxContributors; +} + +// A test pallet used to test some behavior of the crowdloan pallet +#[allow(unused)] +#[frame_support::pallet(dev_mode)] +pub(crate) mod pallet_test { + use super::*; + use frame_support::{ + dispatch::DispatchResult, + pallet_prelude::{OptionQuery, StorageValue}, + }; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_crowdloan::Config { + type Currency: fungible::Balanced + + fungible::Mutate; + } + + #[pallet::error] + pub enum Error { + ShouldFail, + MissingCurrentCrowdloanId, + CrowdloanDoesNotExist, + } + + #[pallet::storage] + pub type PassedCrowdloanId = StorageValue<_, CrowdloanId, OptionQuery>; + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + pub fn noop(origin: OriginFor) -> DispatchResult { + Ok(()) + } + + #[pallet::call_index(1)] + pub fn transfer_funds(origin: OriginFor, dest: AccountOf) -> DispatchResult { + let crowdloan_id = pallet_crowdloan::CurrentCrowdloanId::::get() + .ok_or(Error::::MissingCurrentCrowdloanId)?; + let crowdloan = pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .ok_or(Error::::CrowdloanDoesNotExist)?; + + PassedCrowdloanId::::put(crowdloan_id); + + ::Currency::transfer( + &crowdloan.funds_account, + &dest, + crowdloan.raised, + Preservation::Expendable, + )?; + + Ok(()) + } + + #[pallet::call_index(2)] + pub fn set_passed_crowdloan_id(origin: OriginFor) -> DispatchResult { + let crowdloan_id = pallet_crowdloan::CurrentCrowdloanId::::get() + .ok_or(Error::::MissingCurrentCrowdloanId)?; + + PassedCrowdloanId::::put(crowdloan_id); + + Ok(()) + } + + #[pallet::call_index(3)] + pub fn failing_extrinsic(origin: OriginFor) -> DispatchResult { + Err(Error::::ShouldFail.into()) + } + } +} + +impl pallet_test::Config for Test { + type Currency = Balances; +} + +pub(crate) struct TestState { + block_number: BlockNumberFor, + balances: Vec<(AccountOf, BalanceOf)>, +} + +impl Default for TestState { + fn default() -> Self { + Self { + block_number: 1, + balances: vec![], + } + } +} + +impl TestState { + pub(crate) fn with_block_number(mut self, block_number: BlockNumberFor) -> Self { + self.block_number = block_number; + self + } + + pub(crate) fn with_balance(mut self, who: AccountOf, balance: BalanceOf) -> Self { + self.balances.push((who, balance)); + self + } + + pub(crate) fn build_and_execute(self, test: impl FnOnce()) { + let mut t = frame_system::GenesisConfig::::default() + .build_storage() + .unwrap(); + + pallet_balances::GenesisConfig:: { + balances: self + .balances + .iter() + .map(|(who, balance)| (*who, *balance)) + .collect::>(), + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(self.block_number)); + ext.execute_with(test); + } +} + +pub(crate) fn last_event() -> RuntimeEvent { + System::events().pop().expect("RuntimeEvent expected").event +} + +pub(crate) fn run_to_block(n: u64) { + while System::block_number() < n { + System::on_finalize(System::block_number()); + Balances::on_finalize(System::block_number()); + System::reset_events(); + System::set_block_number(System::block_number() + 1); + Balances::on_initialize(System::block_number()); + System::on_initialize(System::block_number()); + } +} + +pub(crate) fn noop_call() -> Box { + Box::new(RuntimeCall::TestPallet(pallet_test::Call::::noop {})) +} diff --git a/pallets/crowdloan/src/tests.rs b/pallets/crowdloan/src/tests.rs new file mode 100644 index 0000000000..1e03854b1f --- /dev/null +++ b/pallets/crowdloan/src/tests.rs @@ -0,0 +1,2614 @@ +#![cfg(test)] +#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] + +use frame_support::{StorageDoubleMap, assert_err, assert_ok, traits::StorePreimage}; +use frame_system::pallet_prelude::BlockNumberFor; +use sp_core::U256; +use sp_runtime::DispatchError; + +use crate::{BalanceOf, CrowdloanId, CrowdloanInfo, mock::*, pallet as pallet_crowdloan}; + +#[test] +fn test_create_succeeds() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + let crowdloan_id = 0; + let funds_account = pallet_crowdloan::Pallet::::funds_account(crowdloan_id); + // ensure the crowdloan is stored correctly + let call = pallet_preimage::Pallet::::bound(*noop_call()).unwrap(); + assert_eq!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id), + Some(CrowdloanInfo { + creator, + deposit, + min_contribution, + cap, + end, + funds_account, + raised: deposit, + target_address: None, + call: Some(call), + finalized: false, + contributors_count: 1, + }) + ); + // ensure the crowdloan account has the deposit + assert_eq!(Balances::free_balance(funds_account), deposit); + // ensure the creator has been deducted the deposit + assert_eq!(Balances::free_balance(creator), 100 - deposit); + // ensure the contributions have been updated + assert_eq!( + pallet_crowdloan::Contributions::::iter_prefix(crowdloan_id) + .collect::>(), + vec![(creator, deposit)] + ); + // ensure the raised amount is updated correctly + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.raised == deposit) + ); + // ensure the event is emitted + assert_eq!( + last_event(), + pallet_crowdloan::Event::::Created { + crowdloan_id, + creator, + end, + cap, + } + .into() + ); + // ensure next crowdloan id is incremented + assert_eq!( + pallet_crowdloan::NextCrowdloanId::::get(), + crowdloan_id + 1 + ); + }); +} + +#[test] +fn test_create_fails_if_bad_origin() { + TestState::default().build_and_execute(|| { + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_err!( + Crowdloan::create( + RuntimeOrigin::none(), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + ), + DispatchError::BadOrigin + ); + + assert_err!( + Crowdloan::create( + RuntimeOrigin::root(), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + ), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_create_fails_if_deposit_is_too_low() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 20; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_err!( + Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + ), + pallet_crowdloan::Error::::DepositTooLow + ); + }); +} + +#[test] +fn test_create_fails_if_cap_is_not_greater_than_deposit() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 40; + let end: BlockNumberFor = 50; + + assert_err!( + Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + ), + pallet_crowdloan::Error::::CapTooLow + ); + }); +} + +#[test] +fn test_create_fails_if_min_contribution_is_too_low() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 5; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_err!( + Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + ), + pallet_crowdloan::Error::::MinimumContributionTooLow + ); + }); +} + +#[test] +fn test_create_fails_if_end_is_in_the_past() { + let current_block_number: BlockNumberFor = 10; + + TestState::default() + .with_block_number(current_block_number) + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = current_block_number - 5; + + assert_err!( + Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + ), + pallet_crowdloan::Error::::CannotEndInPast + ); + }); +} + +#[test] +fn test_create_fails_if_block_duration_is_too_short() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 11; + + assert_err!( + Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + ), + pallet_crowdloan::Error::::BlockDurationTooShort + ); + }); +} + +#[test] +fn test_create_fails_if_block_duration_is_too_long() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 1000; + + assert_err!( + Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + ), + pallet_crowdloan::Error::::BlockDurationTooLong + ); + }); +} + +#[test] +fn test_create_fails_if_creator_has_insufficient_balance() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 200; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_err!( + Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + ), + pallet_crowdloan::Error::::InsufficientBalance + ); + }); +} + +#[test] +fn test_contribute_succeeds() { + TestState::default() + .with_balance(U256::from(1), 200) + .with_balance(U256::from(2), 500) + .with_balance(U256::from(3), 200) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run some blocks + run_to_block(10); + + let crowdloan_id: CrowdloanId = 0; + + // only the creator has contributed so far + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); + + // first contribution to the crowdloan from creator + let amount: BalanceOf = 50; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(creator), + crowdloan_id, + amount + )); + assert_eq!( + last_event(), + pallet_crowdloan::Event::::Contributed { + crowdloan_id, + contributor: creator, + amount, + } + .into() + ); + assert_eq!( + pallet_crowdloan::Contributions::::get(crowdloan_id, creator), + Some(100) + ); + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); + assert_eq!( + Balances::free_balance(creator), + 200 - amount - initial_deposit + ); + + // second contribution to the crowdloan + let contributor1: AccountOf = U256::from(2); + let amount: BalanceOf = 100; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor1), + crowdloan_id, + amount + )); + assert_eq!( + last_event(), + pallet_crowdloan::Event::::Contributed { + crowdloan_id, + contributor: contributor1, + amount, + } + .into() + ); + assert_eq!( + pallet_crowdloan::Contributions::::get(crowdloan_id, contributor1), + Some(100) + ); + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 2) + ); + assert_eq!(Balances::free_balance(contributor1), 500 - amount); + + // third contribution to the crowdloan + let contributor2: AccountOf = U256::from(3); + let amount: BalanceOf = 50; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor2), + crowdloan_id, + amount + )); + assert_eq!( + last_event(), + pallet_crowdloan::Event::::Contributed { + crowdloan_id, + contributor: contributor2, + amount, + } + .into() + ); + assert_eq!( + pallet_crowdloan::Contributions::::get(crowdloan_id, contributor2), + Some(50) + ); + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 3) + ); + assert_eq!(Balances::free_balance(contributor2), 200 - amount); + + // ensure the contributions are present in the funds account + let funds_account = pallet_crowdloan::Pallet::::funds_account(crowdloan_id); + assert_eq!(Balances::free_balance(funds_account), 250); + + // ensure the crowdloan raised amount is updated correctly + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.raised == 250) + ); + }); +} + +#[test] +fn test_contribute_succeeds_if_contribution_will_make_the_raised_amount_exceed_the_cap() { + TestState::default() + .with_balance(U256::from(1), 200) + .with_balance(U256::from(2), 500) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run some blocks + run_to_block(10); + + // first contribution to the crowdloan from creator + let crowdloan_id: CrowdloanId = 0; + let amount: BalanceOf = 50; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(creator), + crowdloan_id, + amount + )); + assert_eq!( + last_event(), + pallet_crowdloan::Event::::Contributed { + crowdloan_id, + contributor: creator, + amount, + } + .into() + ); + assert_eq!( + pallet_crowdloan::Contributions::::get(crowdloan_id, creator), + Some(100) + ); + assert_eq!( + Balances::free_balance(creator), + 200 - amount - initial_deposit + ); + + // second contribution to the crowdloan above the cap + let contributor1: AccountOf = U256::from(2); + let amount: BalanceOf = 300; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor1), + crowdloan_id, + amount + )); + assert_eq!( + last_event(), + pallet_crowdloan::Event::::Contributed { + crowdloan_id, + contributor: contributor1, + amount: 200, // the amount is capped at the cap + } + .into() + ); + assert_eq!( + pallet_crowdloan::Contributions::::get(crowdloan_id, contributor1), + Some(200) + ); + assert_eq!(Balances::free_balance(contributor1), 500 - 200); + + // ensure the contributions are present in the crowdloan account up to the cap + let funds_account = pallet_crowdloan::Pallet::::funds_account(crowdloan_id); + assert_eq!(Balances::free_balance(funds_account), 300); + + // ensure the crowdloan raised amount is updated correctly + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.raised == 300) + ); + }); +} + +#[test] +fn test_contribute_fails_if_bad_origin() { + TestState::default().build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + let amount: BalanceOf = 100; + + assert_err!( + Crowdloan::contribute(RuntimeOrigin::none(), crowdloan_id, amount), + DispatchError::BadOrigin + ); + + assert_err!( + Crowdloan::contribute(RuntimeOrigin::root(), crowdloan_id, amount), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_contribute_fails_if_crowdloan_does_not_exist() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let contributor: AccountOf = U256::from(1); + let crowdloan_id: CrowdloanId = 0; + let amount: BalanceOf = 20; + + assert_err!( + Crowdloan::contribute(RuntimeOrigin::signed(contributor), crowdloan_id, amount), + pallet_crowdloan::Error::::InvalidCrowdloanId + ); + }); +} + +#[test] +fn test_contribute_fails_if_contribution_period_ended() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run past the end of the crowdloan + run_to_block(60); + + // contribute to the crowdloan + let contributor: AccountOf = U256::from(2); + let crowdloan_id: CrowdloanId = 0; + let amount: BalanceOf = 20; + assert_err!( + Crowdloan::contribute(RuntimeOrigin::signed(contributor), crowdloan_id, amount), + pallet_crowdloan::Error::::ContributionPeriodEnded + ); + }); +} + +#[test] +fn test_contribute_fails_if_cap_has_been_raised() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 1000) + .with_balance(U256::from(3), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run some blocks + run_to_block(10); + + // first contribution to the crowdloan fully raise the cap + let crowdloan_id: CrowdloanId = 0; + let contributor1: AccountOf = U256::from(2); + let amount: BalanceOf = cap - initial_deposit; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor1), + crowdloan_id, + amount + )); + + // second contribution to the crowdloan + let contributor2: AccountOf = U256::from(3); + let amount: BalanceOf = 10; + assert_err!( + Crowdloan::contribute(RuntimeOrigin::signed(contributor2), crowdloan_id, amount), + pallet_crowdloan::Error::::CapRaised + ); + }); +} + +#[test] +fn test_contribute_fails_if_contribution_is_below_minimum_contribution() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run some blocks + run_to_block(10); + + // contribute to the crowdloan + let contributor: AccountOf = U256::from(2); + let crowdloan_id: CrowdloanId = 0; + let amount: BalanceOf = 5; + assert_err!( + Crowdloan::contribute(RuntimeOrigin::signed(contributor), crowdloan_id, amount), + pallet_crowdloan::Error::::ContributionTooLow + ) + }); +} + +#[test] +fn test_contribute_fails_if_max_contributors_has_been_reached() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .with_balance(U256::from(3), 100) + .with_balance(U256::from(4), 100) + .with_balance(U256::from(5), 100) + .with_balance(U256::from(6), 100) + .with_balance(U256::from(7), 100) + .with_balance(U256::from(8), 100) + .with_balance(U256::from(9), 100) + .with_balance(U256::from(10), 100) + .with_balance(U256::from(11), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 1000; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run some blocks + run_to_block(10); + + // contribute to the crowdloan + let crowdloan_id: CrowdloanId = 0; + let amount: BalanceOf = 20; + for i in 2..=10 { + let contributor: AccountOf = U256::from(i); + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + } + + // try to contribute + let contributor: AccountOf = U256::from(10); + assert_err!( + Crowdloan::contribute(RuntimeOrigin::signed(contributor), crowdloan_id, amount), + pallet_crowdloan::Error::::MaxContributorsReached + ); + }); +} + +#[test] +fn test_contribute_fails_if_contributor_has_insufficient_balance() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 50) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run some blocks + run_to_block(10); + + // contribute to the crowdloan + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 100; + + assert_err!( + Crowdloan::contribute(RuntimeOrigin::signed(contributor), crowdloan_id, amount), + pallet_crowdloan::Error::::InsufficientBalance + ); + }); +} + +#[test] +fn test_withdraw_from_contributor_succeeds() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .with_balance(U256::from(3), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run some blocks + run_to_block(10); + + // contribute to the crowdloan + let crowdloan_id: CrowdloanId = 0; + + let contributor1: AccountOf = U256::from(2); + let amount1: BalanceOf = 100; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor1), + crowdloan_id, + amount1 + )); + + let contributor2: AccountOf = U256::from(3); + let amount2: BalanceOf = 100; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor2), + crowdloan_id, + amount2 + )); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // ensure the contributor count is correct + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 3) + ); + + // withdraw from contributor1 + assert_ok!(Crowdloan::withdraw( + RuntimeOrigin::signed(contributor1), + crowdloan_id + )); + // ensure the contributor1 contribution has been removed + assert_eq!( + pallet_crowdloan::Contributions::::get(crowdloan_id, contributor1), + None, + ); + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 2) + ); + // ensure the contributor1 has the correct amount + assert_eq!( + pallet_balances::Pallet::::free_balance(contributor1), + 100 + ); + + // withdraw from contributor2 + assert_ok!(Crowdloan::withdraw( + RuntimeOrigin::signed(contributor2), + crowdloan_id + )); + // ensure the contributor2 contribution has been removed + assert_eq!( + pallet_crowdloan::Contributions::::get(crowdloan_id, contributor2), + None, + ); + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); + // ensure the contributor2 has the correct amount + assert_eq!( + pallet_balances::Pallet::::free_balance(contributor2), + 100 + ); + + // ensure the crowdloan account has the correct amount + let funds_account = pallet_crowdloan::Pallet::::funds_account(crowdloan_id); + assert_eq!(Balances::free_balance(funds_account), initial_deposit); + // ensure the crowdloan raised amount is updated correctly + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.raised == initial_deposit) + ); + }); +} + +#[test] +fn test_withdraw_from_creator_with_contribution_over_deposit_succeeds() { + TestState::default() + .with_balance(U256::from(1), 200) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // contribute to the crowdloan as the creator + let crowdloan_id: CrowdloanId = 0; + + let amount: BalanceOf = 100; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(creator), + crowdloan_id, + amount + )); + + // ensure the contributor count is correct + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); + + // withdraw + let crowdloan_id: CrowdloanId = 0; + assert_ok!(Crowdloan::withdraw( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // ensure the creator has the correct amount + assert_eq!( + pallet_balances::Pallet::::free_balance(creator), + 200 - initial_deposit + ); + // ensure the creator contribution has been removed + assert_eq!( + pallet_crowdloan::Contributions::::get(crowdloan_id, creator), + Some(initial_deposit), + ); + // ensure the contributor count hasn't changed because deposit is kept + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); + + // ensure the crowdloan account has the correct amount + let funds_account = pallet_crowdloan::Pallet::::funds_account(crowdloan_id); + assert_eq!(Balances::free_balance(funds_account), initial_deposit); + // ensure the crowdloan raised amount is updated correctly + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.raised == initial_deposit) + ); + }); +} +#[test] +fn test_withdraw_fails_from_creator_with_no_contribution_over_deposit() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 200) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // try to withdraw + let crowdloan_id: CrowdloanId = 0; + assert_err!( + Crowdloan::withdraw(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::DepositCannotBeWithdrawn + ); + + // ensure the crowdloan account has the correct amount + let funds_account = pallet_crowdloan::Pallet::::funds_account(crowdloan_id); + assert_eq!(Balances::free_balance(funds_account), initial_deposit); + // ensure the crowdloan raised amount is updated correctly + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.raised == initial_deposit) + ); + }); +} + +#[test] +fn test_withdraw_fails_if_bad_origin() { + TestState::default().build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::withdraw(RuntimeOrigin::none(), crowdloan_id), + DispatchError::BadOrigin + ); + + assert_err!( + Crowdloan::withdraw(RuntimeOrigin::root(), crowdloan_id), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_withdraw_fails_if_crowdloan_does_not_exists() { + TestState::default().build_and_execute(|| { + let contributor: AccountOf = U256::from(1); + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::withdraw(RuntimeOrigin::signed(contributor), crowdloan_id), + pallet_crowdloan::Error::::InvalidCrowdloanId + ); + }); +} + +#[test] +fn test_withdraw_fails_if_crowdloan_has_already_been_finalized() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 200) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // finalize the crowdloan + assert_ok!(Crowdloan::finalize( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // try to withdraw + assert_err!( + Crowdloan::withdraw(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::AlreadyFinalized + ); + }); +} + +#[test] +fn test_withdraw_fails_if_no_contribution_exists() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 200) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // try to withdraw + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + assert_err!( + Crowdloan::withdraw(RuntimeOrigin::signed(contributor), crowdloan_id), + pallet_crowdloan::Error::::NoContribution + ); + }); +} + +#[test] +fn test_finalize_succeeds() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + let call = Box::new(RuntimeCall::TestPallet( + pallet_test::Call::::transfer_funds { + dest: U256::from(42), + }, + )); + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(call), + None + )); + + // run some blocks + run_to_block(10); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // finalize the crowdloan + assert_ok!(Crowdloan::finalize( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // ensure the transfer was a success from the dispatched call + assert_eq!( + pallet_balances::Pallet::::free_balance(U256::from(42)), + 100 + ); + + // ensure the crowdloan is marked as finalized + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.finalized) + ); + + // ensure the event is emitted + assert_eq!( + last_event(), + pallet_crowdloan::Event::::Finalized { crowdloan_id }.into() + ); + + // ensure the current crowdloan id was accessible from the dispatched call + assert_eq!( + pallet_test::PassedCrowdloanId::::get(), + Some(crowdloan_id) + ); + }); +} + +#[test] +fn test_finalize_succeeds_with_target_address() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + let target_address: AccountOf = U256::from(42); + let call = Box::new(RuntimeCall::TestPallet( + pallet_test::Call::::set_passed_crowdloan_id {}, + )); + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(call), + Some(target_address), + )); + + // run some blocks + run_to_block(10); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // finalize the crowdloan + assert_ok!(Crowdloan::finalize( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // ensure the target address has received the funds + assert_eq!( + pallet_balances::Pallet::::free_balance(target_address), + 100 + ); + + // ensure the crowdloan is marked as finalized + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.finalized) + ); + + // ensure the event is emitted + assert_eq!( + last_event(), + pallet_crowdloan::Event::::Finalized { crowdloan_id }.into() + ); + + // ensure the current crowdloan id was accessible from the dispatched call + assert_eq!( + pallet_test::PassedCrowdloanId::::get(), + Some(crowdloan_id) + ); + }) +} + +#[test] +fn test_finalize_fails_if_bad_origin() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::finalize(RuntimeOrigin::none(), crowdloan_id), + DispatchError::BadOrigin + ); + + assert_err!( + Crowdloan::finalize(RuntimeOrigin::root(), crowdloan_id), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_finalize_fails_if_crowdloan_does_not_exist() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let crowdloan_id: CrowdloanId = 0; + + // try to finalize + assert_err!( + Crowdloan::finalize(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::InvalidCrowdloanId + ); + }); +} + +#[test] +fn test_finalize_fails_if_not_creator_origin() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run some blocks + run_to_block(10); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // try finalize the crowdloan + assert_err!( + Crowdloan::finalize(RuntimeOrigin::signed(contributor), crowdloan_id), + pallet_crowdloan::Error::::InvalidOrigin + ); + }); +} + +#[test] +fn test_finalize_fails_if_crowdloan_has_not_ended() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // run some blocks + run_to_block(10); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some more blocks before end of contribution period + run_to_block(10); + + // try to finalize + assert_err!( + Crowdloan::finalize(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::ContributionPeriodNotEnded + ); + }); +} + +#[test] +fn test_finalize_fails_if_crowdloan_cap_is_not_raised() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // run some blocks + run_to_block(10); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 49; // below cap + + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // try finalize the crowdloan + assert_err!( + Crowdloan::finalize(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::CapNotRaised + ); + }); +} + +#[test] +fn test_finalize_fails_if_crowdloan_has_already_been_finalized() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // finalize the crowdloan + assert_ok!(Crowdloan::finalize( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // try finalize the crowdloan a second time + assert_err!( + Crowdloan::finalize(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::AlreadyFinalized + ); + }); +} + +#[test] +fn test_finalize_fails_if_call_fails() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + let call = Box::new(RuntimeCall::TestPallet( + pallet_test::Call::::failing_extrinsic {}, + )); + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(call), + None, + )); + + // run some blocks + run_to_block(10); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // try finalize the crowdloan + assert_err!( + Crowdloan::finalize(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_test::Error::::ShouldFail + ); + }); +} + +#[test] +fn test_refund_succeeds() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .with_balance(U256::from(3), 100) + .with_balance(U256::from(4), 100) + .with_balance(U256::from(5), 100) + .with_balance(U256::from(6), 100) + .with_balance(U256::from(7), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 400; + let end: BlockNumberFor = 50; + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // run some blocks + run_to_block(10); + + // make 6 contributions to reach 350 raised amount (initial deposit + contributions) + let crowdloan_id: CrowdloanId = 0; + let amount: BalanceOf = 50; + for i in 2..8 { + let contributor: AccountOf = U256::from(i); + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + } + + // ensure the contributor count is correct + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 7) + ); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // first round of refund + assert_ok!(Crowdloan::refund( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // ensure the contributor count is correct, we processed 5 refunds + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 2) + ); + + // ensure the crowdloan account has the correct amount + let funds_account = pallet_crowdloan::Pallet::::funds_account(crowdloan_id); + assert_eq!(Balances::free_balance(funds_account), 350 - 5 * amount); + // ensure raised amount is updated correctly + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.raised == 350 - 5 * amount) + ); + // ensure the event is emitted + assert_eq!( + last_event(), + pallet_crowdloan::Event::::PartiallyRefunded { crowdloan_id }.into() + ); + + // run some more blocks + run_to_block(70); + + // second round of refund + assert_ok!(Crowdloan::refund( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // ensure the contributor count is correct, we processed 1 more refund + // keeping deposit + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); + + // ensure the crowdloan account has the correct amount + assert_eq!( + pallet_balances::Pallet::::free_balance(funds_account), + initial_deposit + ); + // ensure the raised amount is updated correctly + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.raised == initial_deposit) + ); + + // ensure creator has the correct amount + assert_eq!( + pallet_balances::Pallet::::free_balance(creator), + initial_deposit + ); + + // ensure each contributor has been refunded and removed from the crowdloan + for i in 2..8 { + let contributor: AccountOf = U256::from(i); + assert_eq!( + pallet_balances::Pallet::::free_balance(contributor), + 100 + ); + assert_eq!( + pallet_crowdloan::Contributions::::get(crowdloan_id, contributor), + None, + ); + } + + // ensure the event is emitted + assert_eq!( + last_event(), + pallet_crowdloan::Event::::AllRefunded { crowdloan_id }.into() + ); + }) +} + +#[test] +fn test_refund_fails_if_bad_origin() { + TestState::default().build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::refund(RuntimeOrigin::none(), crowdloan_id), + DispatchError::BadOrigin + ); + + assert_err!( + Crowdloan::refund(RuntimeOrigin::root(), crowdloan_id), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_refund_fails_if_crowdloan_does_not_exist() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::refund(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::InvalidCrowdloanId + ); + }); +} + +#[test] +fn test_refund_fails_if_crowdloan_has_not_ended() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // run some blocks + run_to_block(10); + + // try to refund + let crowdloan_id: CrowdloanId = 0; + assert_err!( + Crowdloan::refund(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::ContributionPeriodNotEnded + ); + }); +} + +#[test] +fn test_dissolve_succeeds() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // run some blocks past end + run_to_block(60); + + let crowdloan_id: CrowdloanId = 0; + + // ensure the contributor count is correct + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); + + // dissolve the crowdloan + assert_ok!(Crowdloan::dissolve( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // ensure the crowdloan is removed from the crowdloans map + assert!(pallet_crowdloan::Crowdloans::::get(crowdloan_id).is_none()); + + // ensure the contributions are removed + assert!(!pallet_crowdloan::Contributions::::contains_prefix( + crowdloan_id + )); + + // ensure the event is emitted + assert_eq!( + last_event(), + pallet_crowdloan::Event::::Dissolved { crowdloan_id }.into() + ) + }); +} + +#[test] +fn test_dissolve_fails_if_bad_origin() { + TestState::default().build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::dissolve(RuntimeOrigin::none(), crowdloan_id), + DispatchError::BadOrigin + ); + + assert_err!( + Crowdloan::dissolve(RuntimeOrigin::root(), crowdloan_id), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_dissolve_fails_if_crowdloan_does_not_exist() { + TestState::default().build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + assert_err!( + Crowdloan::dissolve(RuntimeOrigin::signed(U256::from(1)), crowdloan_id), + pallet_crowdloan::Error::::InvalidCrowdloanId + ); + }); +} + +#[test] +fn test_dissolve_fails_if_crowdloan_has_been_finalized() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // run some blocks + run_to_block(10); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // finalize the crowdloan + assert_ok!(Crowdloan::finalize( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // try dissolve the crowdloan + assert_err!( + Crowdloan::dissolve(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::AlreadyFinalized + ); + }); +} + +#[test] +fn test_dissolve_fails_if_origin_is_not_creator() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // run some blocks + run_to_block(10); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + + // try dissolve the crowdloan + assert_err!( + Crowdloan::dissolve(RuntimeOrigin::signed(U256::from(2)), crowdloan_id), + pallet_crowdloan::Error::::InvalidOrigin + ); + }); +} + +#[test] +fn test_dissolve_fails_if_not_everyone_has_been_refunded() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // run some blocks + run_to_block(10); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some blocks + run_to_block(10); + + // try to dissolve the crowdloan + let crowdloan_id = 0; + assert_err!( + Crowdloan::dissolve(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::NotReadyToDissolve + ); + }); +} + +#[test] +fn test_update_min_contribution_succeeds() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + let crowdloan_id: CrowdloanId = 0; + let new_min_contribution: BalanceOf = 20; + + // update the min contribution + assert_ok!(Crowdloan::update_min_contribution( + RuntimeOrigin::signed(creator), + crowdloan_id, + new_min_contribution + )); + + // ensure the min contribution is updated + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.min_contribution == new_min_contribution) + ); + // ensure the event is emitted + assert_eq!( + last_event(), + pallet_crowdloan::Event::::MinContributionUpdated { + crowdloan_id, + new_min_contribution + } + .into() + ); + }); +} + +#[test] +fn test_update_min_contribution_fails_if_bad_origin() { + TestState::default().build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::update_min_contribution(RuntimeOrigin::none(), crowdloan_id, 20), + DispatchError::BadOrigin + ); + + assert_err!( + Crowdloan::update_min_contribution(RuntimeOrigin::root(), crowdloan_id, 20), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_update_min_contribution_fails_if_crowdloan_does_not_exist() { + TestState::default().build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::update_min_contribution( + RuntimeOrigin::signed(U256::from(1)), + crowdloan_id, + 20 + ), + pallet_crowdloan::Error::::InvalidCrowdloanId + ); + }); +} + +#[test] +fn test_update_min_contribution_fails_if_crowdloan_has_been_finalized() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some blocks + run_to_block(50); + + // finalize the crowdloan + let crowdloan_id: CrowdloanId = 0; + assert_ok!(Crowdloan::finalize( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // try update the min contribution + let new_min_contribution: BalanceOf = 20; + assert_err!( + Crowdloan::update_min_contribution( + RuntimeOrigin::signed(creator), + crowdloan_id, + new_min_contribution + ), + pallet_crowdloan::Error::::AlreadyFinalized + ); + }); +} + +#[test] +fn test_update_min_contribution_fails_if_not_creator() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + let crowdloan_id: CrowdloanId = 0; + let new_min_contribution: BalanceOf = 20; + + // try update the min contribution + assert_err!( + Crowdloan::update_min_contribution( + RuntimeOrigin::signed(U256::from(2)), + crowdloan_id, + new_min_contribution + ), + pallet_crowdloan::Error::::InvalidOrigin + ); + }); +} + +#[test] +fn test_update_min_contribution_fails_if_new_min_contribution_is_too_low() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + let crowdloan_id: CrowdloanId = 0; + let new_min_contribution: BalanceOf = 9; + + // try update the min contribution + assert_err!( + Crowdloan::update_min_contribution( + RuntimeOrigin::signed(creator), + crowdloan_id, + new_min_contribution + ), + pallet_crowdloan::Error::::MinimumContributionTooLow + ); + }); +} + +#[test] +fn test_update_end_succeeds() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + let crowdloan_id: CrowdloanId = 0; + let new_end: BlockNumberFor = 60; + + // update the end + assert_ok!(Crowdloan::update_end( + RuntimeOrigin::signed(creator), + crowdloan_id, + new_end + )); + + // ensure the end is updated + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.end == new_end) + ); + // ensure the event is emitted + assert_eq!( + last_event(), + pallet_crowdloan::Event::::EndUpdated { + crowdloan_id, + new_end + } + .into() + ); + }); +} + +#[test] +fn test_update_end_fails_if_bad_origin() { + TestState::default().build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::update_end(RuntimeOrigin::none(), crowdloan_id, 60), + DispatchError::BadOrigin + ); + + assert_err!( + Crowdloan::update_end(RuntimeOrigin::root(), crowdloan_id, 60), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_update_end_fails_if_crowdloan_does_not_exist() { + TestState::default().build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::update_end(RuntimeOrigin::signed(U256::from(1)), crowdloan_id, 60), + pallet_crowdloan::Error::::InvalidCrowdloanId + ); + }); +} + +#[test] +fn test_update_end_fails_if_crowdloan_has_been_finalized() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + let crowdloan_id: CrowdloanId = 0; + + // some contribution + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some blocks + run_to_block(60); + + // finalize the crowdloan + assert_ok!(Crowdloan::finalize( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // try update the end + let new_end: BlockNumberFor = 60; + assert_err!( + Crowdloan::update_end(RuntimeOrigin::signed(creator), crowdloan_id, new_end), + pallet_crowdloan::Error::::AlreadyFinalized + ); + }); +} + +#[test] +fn test_update_end_fails_if_not_creator() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + let crowdloan_id: CrowdloanId = 0; + let new_end: BlockNumberFor = 60; + + // try update the end + assert_err!( + Crowdloan::update_end(RuntimeOrigin::signed(U256::from(2)), crowdloan_id, new_end), + pallet_crowdloan::Error::::InvalidOrigin + ); + }); +} + +#[test] +fn test_update_end_fails_if_new_end_is_in_past() { + TestState::default() + .with_block_number(50) + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 100; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + let crowdloan_id: CrowdloanId = 0; + let new_end: BlockNumberFor = 40; + + // try update the end to a past block number + assert_err!( + Crowdloan::update_end(RuntimeOrigin::signed(creator), crowdloan_id, new_end), + pallet_crowdloan::Error::::CannotEndInPast + ); + }); +} + +#[test] +fn test_update_end_fails_if_block_duration_is_too_short() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // run some blocks + run_to_block(50); + + let crowdloan_id: CrowdloanId = 0; + let new_end: BlockNumberFor = 51; + + // try update the end to a block number that is too long + assert_err!( + Crowdloan::update_end(RuntimeOrigin::signed(creator), crowdloan_id, new_end), + pallet_crowdloan::Error::::BlockDurationTooShort + ); + }); +} + +#[test] +fn test_update_end_fails_if_block_duration_is_too_long() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + let crowdloan_id: CrowdloanId = 0; + let new_end: BlockNumberFor = 1000; + + // try update the end to a block number that is too long + assert_err!( + Crowdloan::update_end(RuntimeOrigin::signed(creator), crowdloan_id, new_end), + pallet_crowdloan::Error::::BlockDurationTooLong + ); + }); +} + +#[test] +fn test_update_cap_succeeds() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // try update the cap + let crowdloan_id: CrowdloanId = 0; + let new_cap: BalanceOf = 200; + assert_ok!(Crowdloan::update_cap( + RuntimeOrigin::signed(creator), + crowdloan_id, + new_cap + )); + + // ensure the cap is updated + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.cap == new_cap) + ); + // ensure the event is emitted + assert_eq!( + last_event(), + pallet_crowdloan::Event::::CapUpdated { + crowdloan_id, + new_cap + } + .into() + ); + }); +} + +#[test] +fn test_update_cap_fails_if_bad_origin() { + TestState::default().build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::update_cap(RuntimeOrigin::none(), crowdloan_id, 200), + DispatchError::BadOrigin + ); + + assert_err!( + Crowdloan::update_cap(RuntimeOrigin::root(), crowdloan_id, 200), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_update_cap_fails_if_crowdloan_does_not_exist() { + TestState::default().build_and_execute(|| { + let crowdloan_id: CrowdloanId = 0; + + assert_err!( + Crowdloan::update_cap(RuntimeOrigin::signed(U256::from(1)), crowdloan_id, 200), + pallet_crowdloan::Error::::InvalidCrowdloanId + ); + }); +} + +#[test] +fn test_update_cap_fails_if_crowdloan_has_been_finalized() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some blocks + run_to_block(60); + + // finalize the crowdloan + let crowdloan_id: CrowdloanId = 0; + assert_ok!(Crowdloan::finalize( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + + // try update the cap + let new_cap: BalanceOf = 200; + assert_err!( + Crowdloan::update_cap(RuntimeOrigin::signed(creator), crowdloan_id, new_cap), + pallet_crowdloan::Error::::AlreadyFinalized + ); + }); +} + +#[test] +fn test_update_cap_fails_if_not_creator() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // try update the cap + let crowdloan_id: CrowdloanId = 0; + let new_cap: BalanceOf = 200; + assert_err!( + Crowdloan::update_cap(RuntimeOrigin::signed(U256::from(2)), crowdloan_id, new_cap), + pallet_crowdloan::Error::::InvalidOrigin + ); + }); +} + +#[test] +fn test_update_cap_fails_if_new_cap_is_too_low() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); + let deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 100; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None, + )); + + // try update the cap + let crowdloan_id: CrowdloanId = 0; + let new_cap: BalanceOf = 49; + assert_err!( + Crowdloan::update_cap(RuntimeOrigin::signed(creator), crowdloan_id, new_cap), + pallet_crowdloan::Error::::CapTooLow + ); + }); +} diff --git a/pallets/crowdloan/src/weights.rs b/pallets/crowdloan/src/weights.rs new file mode 100644 index 0000000000..927e078d34 --- /dev/null +++ b/pallets/crowdloan/src/weights.rs @@ -0,0 +1,316 @@ + +//! Autogenerated weights for `pallet_crowdloan` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 43.0.0 +//! DATE: 2025-05-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Ubuntu-2404-noble-amd64-base`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("local")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/node-subtensor +// benchmark +// pallet +// --chain=local +// --wasm-execution=compiled +// --pallet=pallet-crowdloan +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --output=pallets/crowdloan/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs +// --allow-missing-host-functions + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_crowdloan`. +pub trait WeightInfo { + fn create() -> Weight; + fn contribute() -> Weight; + fn withdraw() -> Weight; + fn finalize() -> Weight; + fn refund(k: u32, ) -> Weight; + fn dissolve() -> Weight; + fn update_min_contribution() -> Weight; + fn update_end() -> Weight; + fn update_cap() -> Weight; +} + +/// Weights for `pallet_crowdloan` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::NextCrowdloanId` (r:1 w:1) + /// Proof: `Crowdloan::NextCrowdloanId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Contributions` (r:0 w:1) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Crowdloans` (r:0 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `156` + // Estimated: `6148` + // Minimum execution time: 42_128_000 picoseconds. + Weight::from_parts(42_930_000, 6148) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Contributions` (r:1 w:1) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + fn contribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `476` + // Estimated: `6148` + // Minimum execution time: 43_161_000 picoseconds. + Weight::from_parts(44_192_000, 6148) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Contributions` (r:1 w:1) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + fn withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `436` + // Estimated: `6148` + // Minimum execution time: 40_235_000 picoseconds. + Weight::from_parts(40_907_000, 6148) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::CurrentCrowdloanId` (r:0 w:1) + /// Proof: `Crowdloan::CurrentCrowdloanId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `376` + // Estimated: `6148` + // Minimum execution time: 40_986_000 picoseconds. + Weight::from_parts(41_858_000, 6148) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Contributions` (r:51 w:49) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:50 w:50) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + /// The range of component `k` is `[3, 50]`. + fn refund(k: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `372 + k * (49 ±0)` + // Estimated: `3743 + k * (2579 ±0)` + // Minimum execution time: 78_938_000 picoseconds. + Weight::from_parts(2_729_302, 3743) + // Standard Error: 351_422 + .saturating_add(Weight::from_parts(31_033_274, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(k.into()))) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(k.into()))) + .saturating_add(Weight::from_parts(0, 2579).saturating_mul(k.into())) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Contributions` (r:1 w:0) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + fn dissolve() -> Weight { + // Proof Size summary in bytes: + // Measured: `450` + // Estimated: `6148` + // Minimum execution time: 43_341_000 picoseconds. + Weight::from_parts(44_402_000, 6148) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + fn update_min_contribution() -> Weight { + // Proof Size summary in bytes: + // Measured: `224` + // Estimated: `3743` + // Minimum execution time: 8_876_000 picoseconds. + Weight::from_parts(9_137_000, 3743) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + fn update_end() -> Weight { + // Proof Size summary in bytes: + // Measured: `224` + // Estimated: `3743` + // Minimum execution time: 9_117_000 picoseconds. + Weight::from_parts(9_438_000, 3743) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + fn update_cap() -> Weight { + // Proof Size summary in bytes: + // Measured: `224` + // Estimated: `3743` + // Minimum execution time: 8_766_000 picoseconds. + Weight::from_parts(9_087_000, 3743) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::NextCrowdloanId` (r:1 w:1) + /// Proof: `Crowdloan::NextCrowdloanId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Contributions` (r:0 w:1) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Crowdloans` (r:0 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `156` + // Estimated: `6148` + // Minimum execution time: 42_128_000 picoseconds. + Weight::from_parts(42_930_000, 6148) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Contributions` (r:1 w:1) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + fn contribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `476` + // Estimated: `6148` + // Minimum execution time: 43_161_000 picoseconds. + Weight::from_parts(44_192_000, 6148) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Contributions` (r:1 w:1) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + fn withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `436` + // Estimated: `6148` + // Minimum execution time: 40_235_000 picoseconds. + Weight::from_parts(40_907_000, 6148) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::CurrentCrowdloanId` (r:0 w:1) + /// Proof: `Crowdloan::CurrentCrowdloanId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `376` + // Estimated: `6148` + // Minimum execution time: 40_986_000 picoseconds. + Weight::from_parts(41_858_000, 6148) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Contributions` (r:51 w:49) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:50 w:50) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + /// The range of component `k` is `[3, 50]`. + fn refund(k: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `372 + k * (49 ±0)` + // Estimated: `3743 + k * (2579 ±0)` + // Minimum execution time: 78_938_000 picoseconds. + Weight::from_parts(2_729_302, 3743) + // Standard Error: 351_422 + .saturating_add(Weight::from_parts(31_033_274, 0).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(k.into()))) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(k.into()))) + .saturating_add(Weight::from_parts(0, 2579).saturating_mul(k.into())) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::Contributions` (r:1 w:0) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) + fn dissolve() -> Weight { + // Proof Size summary in bytes: + // Measured: `450` + // Estimated: `6148` + // Minimum execution time: 43_341_000 picoseconds. + Weight::from_parts(44_402_000, 6148) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + fn update_min_contribution() -> Weight { + // Proof Size summary in bytes: + // Measured: `224` + // Estimated: `3743` + // Minimum execution time: 8_876_000 picoseconds. + Weight::from_parts(9_137_000, 3743) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + fn update_end() -> Weight { + // Proof Size summary in bytes: + // Measured: `224` + // Estimated: `3743` + // Minimum execution time: 9_117_000 picoseconds. + Weight::from_parts(9_438_000, 3743) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) + /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) + fn update_cap() -> Weight { + // Proof Size summary in bytes: + // Measured: `224` + // Estimated: `3743` + // Minimum execution time: 8_766_000 picoseconds. + Weight::from_parts(9_087_000, 3743) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} \ No newline at end of file diff --git a/pallets/drand/src/lib.rs b/pallets/drand/src/lib.rs index 5695f1456d..08d728daa3 100644 --- a/pallets/drand/src/lib.rs +++ b/pallets/drand/src/lib.rs @@ -73,8 +73,6 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -pub mod weights; -pub use weights::*; /// the main drand api endpoint const ENDPOINTS: [&str; 5] = [ @@ -162,8 +160,6 @@ pub mod pallet { type AuthorityId: AppCrypto; /// The overarching runtime event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// A type representing the weights required by the dispatchables of this pallet. - type WeightInfo: WeightInfo; /// something that knows how to verify beacon pulses type Verifier: Verifier; /// A configuration for base priority of unsigned transactions. @@ -313,7 +309,9 @@ pub mod pallet { impl Pallet { /// Verify and write a pulse from the beacon into the runtime #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::write_pulse(pulses_payload.pulses.len() as u32))] + #[pallet::weight(Weight::from_parts(5_708_000_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)))] pub fn write_pulse( origin: OriginFor, pulses_payload: PulsesPayload>, @@ -367,7 +365,9 @@ pub mod pallet { /// * `origin`: the root user /// * `config`: the beacon configuration #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::set_beacon_config())] + #[pallet::weight(Weight::from_parts(9_878_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)))] pub fn set_beacon_config( origin: OriginFor, config_payload: BeaconConfigurationPayload>, diff --git a/pallets/drand/src/mock.rs b/pallets/drand/src/mock.rs index ba9e16e6f4..6ef1f2bf8a 100644 --- a/pallets/drand/src/mock.rs +++ b/pallets/drand/src/mock.rs @@ -88,7 +88,6 @@ parameter_types! { impl pallet_drand_bridge::Config for Test { type AuthorityId = crypto::TestAuthId; type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_drand_bridge::weights::SubstrateWeight; type Verifier = QuicknetVerifier; type UnsignedPriority = UnsignedPriority; type HttpFetchTimeout = ConstU64<1_000>; diff --git a/pallets/drand/src/weights.rs b/pallets/drand/src/weights.rs deleted file mode 100644 index 6ab6e2905d..0000000000 --- a/pallets/drand/src/weights.rs +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2024 by Ideal Labs, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Autogenerated weights for pallet_template -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 - -// Executed Command: -// ../../target/release/node-template -// benchmark -// pallet -// --chain -// dev -// --pallet -// pallet_template -// --extrinsic -// * -// --steps=50 -// --repeat=20 -// --wasm-execution=compiled -// --output -// pallets/template/src/weights.rs -// --template -// ../../.maintain/frame-weight-template.hbs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for pallet_template. -pub trait WeightInfo { - fn write_pulse(pulses_count: u32) -> Weight; - fn set_beacon_config() -> Weight; -} - -/// Weights for pallet_template using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `Drand::BeaconConfig` (r:0 w:1) - /// Proof: `Drand::BeaconConfig` (`max_values`: Some(1), `max_size`: Some(238), added: 733, mode: `MaxEncodedLen`) - /// Storage: `Drand::NextUnsignedAt` (r:0 w:1) - /// Proof: `Drand::NextUnsignedAt` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn set_beacon_config() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 8_000_000 picoseconds. - Weight::from_parts(8_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Drand::BeaconConfig` (r:1 w:0) - /// Proof: `Drand::BeaconConfig` (`max_values`: Some(1), `max_size`: Some(238), added: 733, mode: `MaxEncodedLen`) - fn write_pulse(pulses_count: u32) -> Weight { - // Adjust the weight calculation based on pulses_count - Weight::from_parts(6_000_000 * pulses_count as u64, 0) - .saturating_add(Weight::from_parts(0, 1723 * pulses_count as u64)) - .saturating_add(T::DbWeight::get().reads_writes(1, pulses_count as u64)) - } -} diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index 8d4457b0c9..a7cd03e652 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -4,689 +4,1485 @@ use crate::Pallet as Subtensor; use crate::*; -use frame_benchmarking::{account, benchmarks, whitelisted_caller}; +use codec::Compact; +use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::{RawOrigin, pallet_prelude::BlockNumberFor}; pub use pallet::*; use sp_core::H256; -use sp_runtime::traits::{BlakeTwo256, Hash}; +use sp_runtime::{ + BoundedVec, + traits::{BlakeTwo256, Hash}, +}; use sp_std::vec; -benchmarks! { - // Add individual benchmarks here - benchmark_register { - let netuid: u16 = 1; //11 is the benchmark network. - let tempo: u16 = 1; - let modality: u16 = 0; - let hotkey: T::AccountId = account("Alice", 0, 1); - let coldkey: T::AccountId = account("Test", 0, 2); +#[frame_benchmarking::v2::benchmarks] +mod pallet_benchmarks { + use super::*; + + #[benchmark] + fn register() { + let netuid: u16 = 1; + let tempo: u16 = 1; + let hotkey: T::AccountId = account("Alice", 0, 1); + let coldkey: T::AccountId = account("Test", 0, 2); + + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work): (u64, Vec) = + Subtensor::::create_work_for_block_number(netuid, block_number, 3, &hotkey); + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuid, + block_number, + nonce, + work, + hotkey.clone(), + coldkey.clone(), + ); + } + + #[benchmark] + fn set_weights() { + let netuid: u16 = 1; + let version_key: u64 = 1; + let tempo: u16 = 1; + + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_max_allowed_uids(netuid, 4096); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_max_registrations_per_block(netuid, 4096); + Subtensor::::set_target_registrations_per_interval(netuid, 4096); + + let mut seed: u32 = 1; + let mut dests = Vec::new(); + let mut weights = Vec::new(); + let signer: T::AccountId = account("Alice", 0, seed); + + for _ in 0..4096 { + let hotkey: T::AccountId = account("Alice", 0, seed); + let coldkey: T::AccountId = account("Test", 0, seed); + seed += 1; + + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked: u64 = 1_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + let uid = Subtensor::::get_uid_for_net_and_hotkey(netuid, &hotkey).unwrap(); + Subtensor::::set_validator_permit_for_uid(netuid, uid, true); + + dests.push(uid); + weights.push(uid); + } + + #[extrinsic_call] + _( + RawOrigin::Signed(signer.clone()), + netuid, + dests, + weights, + version_key, + ); + } + + #[benchmark] + fn become_delegate() { + let netuid: u16 = 1; + let tempo: u16 = 1; - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed(netuid, true); - Subtensor::::set_network_pow_registration_allowed(netuid, true); + Subtensor::::init_new_network(netuid, tempo); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_burn(netuid, 1); + Subtensor::::set_max_allowed_uids(netuid, 4096); + Subtensor::::set_network_registration_allowed(netuid, true); + + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + let amount_to_be_staked: u64 = 1_000_000_000; + + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); + } - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( - netuid, - block_number, - 3, - &hotkey, - ); + #[benchmark] + fn add_stake() { + let netuid: u16 = 1; + let tempo: u16 = 1; + Subtensor::::init_new_network(netuid, tempo); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_burn(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + let total_stake: u64 = 1_000_000_000; + let amount: u64 = 60_000_000; + + Subtensor::::add_balance_to_coldkey_account(&coldkey, total_stake); + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + netuid, + amount, + ); + } - }: register( RawOrigin::Signed( hotkey.clone() ), netuid, block_number, nonce, work, hotkey.clone(), coldkey.clone() ) + // #[benchmark] + // fn add_stake_aggregate() { + // let netuid: u16 = 1; + // let tempo: u16 = 1; + // + // Subtensor::::init_new_network(netuid, tempo); + // SubtokenEnabled::::insert(netuid, true); + // Subtensor::::set_burn(netuid, 1); + // Subtensor::::set_network_registration_allowed(netuid, true); + // Subtensor::::set_max_allowed_uids(netuid, 4096); + // + // let seed: u32 = 1; + // let coldkey: T::AccountId = account("Test", 0, seed); + // let hotkey: T::AccountId = account("Alice", 0, seed); + // let total_stake: u64 = 1_000_000_000; + // let amount: u64 = 600_000; + // + // Subtensor::::add_balance_to_coldkey_account(&coldkey, total_stake); + // assert_ok!(Subtensor::::do_burned_registration( + // RawOrigin::Signed(coldkey.clone()).into(), + // netuid, + // hotkey.clone() + // )); + // + // #[extrinsic_call] + // _( + // RawOrigin::Signed(coldkey.clone()), + // hotkey.clone(), + // netuid, + // amount, + // ); + // } + // + // #[benchmark] + // fn remove_stake_limit_aggregate() { + // let netuid: u16 = 1; + // + // Subtensor::::increase_total_stake(1_000_000_000_000); + // Subtensor::::init_new_network(netuid, 1); + // Subtensor::::set_network_registration_allowed(netuid, true); + // SubtokenEnabled::::insert(netuid, true); + // Subtensor::::set_max_allowed_uids(netuid, 4096); + // + // let seed: u32 = 1; + // let coldkey: T::AccountId = account("Test", 0, seed); + // let hotkey: T::AccountId = account("Alice", 0, seed); + // Subtensor::::set_burn(netuid, 1); + // + // let limit: u64 = 1_000_000_000; + // let tao_reserve: u64 = 150_000_000_000; + // let alpha_in: u64 = 100_000_000_000; + // SubnetTAO::::insert(netuid, tao_reserve); + // SubnetAlphaIn::::insert(netuid, alpha_in); + // + // let wallet_bal: u64 = 1_000_000; + // Subtensor::::add_balance_to_coldkey_account(&coldkey, wallet_bal); + // + // assert_ok!(Subtensor::::do_burned_registration( + // RawOrigin::Signed(coldkey.clone()).into(), + // netuid, + // hotkey.clone() + // )); + // + // Subtensor::::add_balance_to_coldkey_account(&coldkey, 100_000_000_000u64); + // assert_ok!(Subtensor::::add_stake( + // RawOrigin::Signed(coldkey.clone()).into(), + // hotkey.clone(), + // netuid, + // 100_000_000_000u64 + // )); + // + // let amount_unstaked: u64 = 30_000_000_000; + // + // #[extrinsic_call] + // _( + // RawOrigin::Signed(coldkey.clone()), + // hotkey.clone(), + // netuid, + // amount_unstaked, + // limit, + // false, + // ); + // } + // + // #[benchmark] + // fn remove_stake_aggregate() { + // let netuid: u16 = 1; + // + // Subtensor::::increase_total_stake(1_000_000_000_000); + // Subtensor::::init_new_network(netuid, 1); + // Subtensor::::set_network_registration_allowed(netuid, true); + // SubtokenEnabled::::insert(netuid, true); + // Subtensor::::set_max_allowed_uids(netuid, 4096); + // + // let seed: u32 = 1; + // let coldkey: T::AccountId = account("Test", 0, seed); + // let hotkey: T::AccountId = account("Alice", 0, seed); + // Subtensor::::set_burn(netuid, 1); + // + // let wallet_bal: u64 = 1_000_000; + // Subtensor::::add_balance_to_coldkey_account(&coldkey, wallet_bal); + // + // assert_ok!(Subtensor::::do_burned_registration( + // RawOrigin::Signed(coldkey.clone()).into(), + // netuid, + // hotkey.clone() + // )); + // + // Subtensor::::add_balance_to_coldkey_account(&coldkey, 100_000_000_000u64); + // assert_ok!(Subtensor::::add_stake( + // RawOrigin::Signed(coldkey.clone()).into(), + // hotkey.clone(), + // netuid, + // 100_000_000_000u64 + // )); + // + // let amount_unstaked: u64 = 600_000; + // + // #[extrinsic_call] + // _( + // RawOrigin::Signed(coldkey.clone()), + // hotkey.clone(), + // netuid, + // amount_unstaked, + // ); + // } + // + // #[benchmark] + // fn add_stake_limit_aggregate() { + // let netuid: u16 = 1; + // + // Subtensor::::init_new_network(netuid, 1); + // SubtokenEnabled::::insert(netuid, true); + // Subtensor::::set_burn(netuid, 1); + // Subtensor::::set_network_registration_allowed(netuid, true); + // Subtensor::::set_max_allowed_uids(netuid, 4096); + // + // let seed: u32 = 1; + // let coldkey: T::AccountId = account("Test", 0, seed); + // let hotkey: T::AccountId = account("Alice", 0, seed); + // + // let amount: u64 = 900_000_000_000; + // let limit: u64 = 6_000_000_000; + // let stake_amt: u64 = 440_000_000_000; + // Subtensor::::add_balance_to_coldkey_account(&coldkey, amount); + // + // let tao_reserve: u64 = 150_000_000_000; + // let alpha_in: u64 = 100_000_000_000; + // SubnetTAO::::insert(netuid, tao_reserve); + // SubnetAlphaIn::::insert(netuid, alpha_in); + // + // assert_ok!(Subtensor::::do_burned_registration( + // RawOrigin::Signed(coldkey.clone()).into(), + // netuid, + // hotkey.clone() + // )); + // + // #[extrinsic_call] + // _( + // RawOrigin::Signed(coldkey.clone()), + // hotkey.clone(), + // netuid, + // stake_amt, + // limit, + // false, + // ); + // } + + #[benchmark] + fn serve_axon() { + let netuid: u16 = 1; + let caller: T::AccountId = whitelisted_caller(); + let version: u32 = 2; + let ip: u128 = 1676056785; + let port: u16 = 128; + let ip_type: u8 = 4; + let protocol: u8 = 0; + let placeholder1: u8 = 0; + let placeholder2: u8 = 0; + + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let reg_fee: u64 = Subtensor::::get_burn_as_u64(netuid); + let deposit = reg_fee.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&caller, deposit); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(caller.clone()).into(), + netuid, + caller.clone() + )); + Subtensor::::set_serving_rate_limit(netuid, 0); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + netuid, + version, + ip, + port, + ip_type, + protocol, + placeholder1, + placeholder2, + ); + } - benchmark_set_weights { + #[benchmark] + fn serve_prometheus() { + let netuid: u16 = 1; + let caller: T::AccountId = whitelisted_caller(); + let version: u32 = 2; + let ip: u128 = 1676056785; + let port: u16 = 128; + let ip_type: u8 = 4; + + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let reg_fee: u64 = Subtensor::::get_burn_as_u64(netuid); + let deposit = reg_fee.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&caller, deposit); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(caller.clone()).into(), + netuid, + caller.clone() + )); + Subtensor::::set_serving_rate_limit(netuid, 0); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + netuid, + version, + ip, + port, + ip_type, + ); + } - // This is a whitelisted caller who can make transaction without weights. - let netuid: u16 = 1; - let version_key: u64 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; + #[benchmark] + fn burned_register() { + let netuid: u16 = 1; + let seed: u32 = 1; + let hotkey: T::AccountId = account("Alice", 0, seed); + let coldkey: T::AccountId = account("Test", 0, seed); - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_burn(netuid, 1); - Subtensor::::set_network_registration_allowed( netuid, true ); - Subtensor::::set_max_registrations_per_block( netuid, 4096 ); - Subtensor::::set_target_registrations_per_interval( netuid, 4096 ); + let amount: u64 = 1_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount); - let mut seed : u32 = 1; - let mut dests: Vec = vec![]; - let mut weights: Vec = vec![]; - let signer : T::AccountId = account("Alice", 0, seed); + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), netuid, hotkey.clone()); + } - for id in 0..4096_u16 { - let hotkey: T::AccountId = account("Alice", 0, seed); - let coldkey: T::AccountId = account("Test", 0, seed); - seed += 1; + #[benchmark] + fn root_register() { + let netuid: u16 = 1; + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); - Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())?; + let amount: u64 = 100_000_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount); - let uid = Subtensor::::get_uid_for_net_and_hotkey(netuid, &hotkey.clone()).unwrap(); - Subtensor::::set_validator_permit_for_uid(netuid, uid, true); - dests.push(id); - weights.push(id); + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); } - }: set_weights(RawOrigin::Signed( signer.clone() ), netuid, dests, weights, version_key) + #[benchmark] + fn register_network() { + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("TestHotkey", 0, seed); + Subtensor::::set_network_rate_limit(1); + let amount: u64 = 100_000_000_000_000u64.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount); - benchmark_become_delegate { - // This is a whitelisted caller who can make transaction without weights. - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let version_key: u64 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; - let seed : u32 = 1; + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); + } - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_burn(netuid, 1); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); + #[benchmark] + fn commit_weights() { + let tempo: u16 = 1; + let netuid: u16 = 1; + let version_key: u64 = 0; + let uids: Vec = vec![0]; + let weight_values: Vec = vec![10]; + let hotkey: T::AccountId = account("hot", 0, 1); + let coldkey: T::AccountId = account("cold", 0, 2); + let start_nonce: u64 = 300_000; + + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey.clone(), + netuid, + uids.clone(), + weight_values.clone(), + version_key, + )); + + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work) = Subtensor::::create_work_for_block_number( + netuid, + block_number, + start_nonce, + &hotkey, + ); + assert_ok!(Subtensor::::register( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + block_number, + nonce, + work, + hotkey.clone(), + coldkey.clone() + )); + Subtensor::::set_validator_permit_for_uid(netuid, 0, true); + Subtensor::::set_commit_reveal_weights_enabled(netuid, true); + + #[extrinsic_call] + _(RawOrigin::Signed(hotkey.clone()), netuid, commit_hash); + } - Subtensor::::set_network_registration_allowed( netuid, true); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + #[benchmark] + fn reveal_weights() { + let tempo: u16 = 0; + let netuid: u16 = 1; + let version_key: u64 = 0; + let uids: Vec = vec![0]; + let weight_values: Vec = vec![10]; + let salt: Vec = vec![8]; + let hotkey: T::AccountId = account("hot", 0, 1); + let coldkey: T::AccountId = account("cold", 1, 2); + + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work) = + Subtensor::::create_work_for_block_number(netuid, block_number, 3, &hotkey); + + let _ = Subtensor::::register( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + block_number, + nonce, + work.clone(), + hotkey.clone(), + coldkey.clone(), + ); + + Subtensor::::set_validator_permit_for_uid(netuid, 0, true); + Subtensor::::set_commit_reveal_weights_enabled(netuid, true); + + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey.clone(), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + let _ = Subtensor::::commit_weights( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + commit_hash, + ); + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + ); + } - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); + #[benchmark] + fn schedule_swap_coldkey() { + let old_coldkey: T::AccountId = account("old_cold", 0, 1); + let new_coldkey: T::AccountId = account("new_cold", 1, 2); + let amount: u64 = 100_000_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&old_coldkey, amount); - let amount_to_be_staked = 1000000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + #[extrinsic_call] + _(RawOrigin::Signed(old_coldkey.clone()), new_coldkey.clone()); + } - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - }: become_delegate(RawOrigin::Signed( coldkey.clone() ), hotkey.clone()) + #[benchmark] + fn sudo_set_tx_childkey_take_rate_limit() { + let new_rate_limit: u64 = 100; - benchmark_add_stake { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let version_key: u64 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; - let seed : u32 = 1; + #[extrinsic_call] + _(RawOrigin::Root, new_rate_limit); + } - Subtensor::::init_new_network(netuid, tempo); + #[benchmark] + fn set_childkey_take() { + let netuid: u16 = 1; + let coldkey: T::AccountId = account("Cold", 0, 1); + let hotkey: T::AccountId = account("Hot", 0, 1); + let take: u16 = 1000; + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + + let reg_fee: u64 = Subtensor::::get_burn_as_u64(netuid); + let deposit = reg_fee.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + netuid, + take, + ); + } - Subtensor::::set_burn(netuid, 1); - Subtensor::::set_network_registration_allowed( netuid, true ); + #[benchmark] + fn swap_coldkey() { + let old_coldkey: T::AccountId = account("old_coldkey", 0, 0); + let new_coldkey: T::AccountId = account("new_coldkey", 0, 0); + let hotkey1: T::AccountId = account("hotkey1", 0, 0); + let netuid: u16 = 1; + let swap_cost: u64 = Subtensor::::get_key_swap_cost(); + let free_balance_old: u64 = 12345 + swap_cost; + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work) = + Subtensor::::create_work_for_block_number(netuid, block_number, 3, &hotkey1); + let _ = Subtensor::::register( + RawOrigin::Signed(old_coldkey.clone()).into(), + netuid, + block_number, + nonce, + work.clone(), + hotkey1.clone(), + old_coldkey.clone(), + ); + + Subtensor::::add_balance_to_coldkey_account(&old_coldkey, free_balance_old); + let name: Vec = b"The fourth Coolest Identity".to_vec(); + let identity = ChainIdentity { + name, + url: vec![], + image: vec![], + discord: vec![], + description: vec![], + additional: vec![], + }; + Identities::::insert(&old_coldkey, identity); + + #[extrinsic_call] + _( + RawOrigin::Root, + old_coldkey.clone(), + new_coldkey.clone(), + swap_cost, + ); + } - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + #[benchmark] + fn batch_reveal_weights() { + let tempo: u16 = 0; + let netuid: u16 = 1; + let num_commits: usize = 10; + + let hotkey: T::AccountId = account("hot", 0, 1); + let coldkey: T::AccountId = account("cold", 0, 2); + + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + Subtensor::::set_commit_reveal_weights_enabled(netuid, true); + Subtensor::::set_weights_set_rate_limit(netuid, 0); + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work) = + Subtensor::::create_work_for_block_number(netuid, block_number, 3, &hotkey); + let origin = T::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())); + assert_ok!(Subtensor::::register( + origin.clone(), + netuid, + block_number, + nonce, + work.clone(), + hotkey.clone(), + coldkey.clone() + )); + Subtensor::::set_validator_permit_for_uid(netuid, 0, true); + + let mut uids_list = Vec::new(); + let mut values_list = Vec::new(); + let mut salts_list = Vec::new(); + let mut version_keys = Vec::new(); + + for i in 0..num_commits { + let uids = vec![0u16]; + let values = vec![i as u16]; + let salts = vec![i as u16]; + let version_key_i: u64 = i as u64; + + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey.clone(), + netuid, + uids.clone(), + values.clone(), + salts.clone(), + version_key_i, + )); + + assert_ok!(Subtensor::::commit_weights( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + commit_hash + )); + + uids_list.push(uids); + values_list.push(values); + salts_list.push(salts); + version_keys.push(version_key_i); + } + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuid, + uids_list, + values_list, + salts_list, + version_keys, + ); + } - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); + #[benchmark] + fn recycle_alpha() { + let netuid: u16 = 1; - let amount: u64 = 1; - let amount_to_be_staked = 1000000000u64; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + let coldkey: T::AccountId = account("Test", 0, 1); + let hotkey: T::AccountId = account("Alice", 0, 1); - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - }: add_stake(RawOrigin::Signed( coldkey.clone() ), hotkey, netuid, amount) + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_burn(netuid, 1); - benchmark_remove_stake{ - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let version_key: u64 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; - let seed : u32 = 1; + let amount_to_be_staked: u64 = 1_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + let alpha_amount: u64 = 1_000_000; + SubnetAlphaOut::::insert(netuid, alpha_amount * 2); + + Subtensor::::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + alpha_amount, + ); + + assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + alpha_amount, + netuid, + ); + } - // Set our total stake to 1000 TAO - Subtensor::::increase_total_stake(1_000_000_000_000); + #[benchmark] + fn burn_alpha() { + let netuid: u16 = 1; + let coldkey: T::AccountId = account("Test", 0, 1); + let hotkey: T::AccountId = account("Alice", 0, 1); - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed( netuid, true ); + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_burn(netuid, 1); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + let amount_to_be_staked: u64 = 1_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + let alpha_amount: u64 = 1_000_000; + SubnetAlphaOut::::insert(netuid, alpha_amount * 2); + Subtensor::::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + alpha_amount, + ); + assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + alpha_amount, + netuid, + ); + } - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - Subtensor::::set_burn(netuid, 1); + #[benchmark] + fn start_call() { + let netuid: u16 = 1; + let coldkey: T::AccountId = account("Test", 0, 1); + let hotkey: T::AccountId = account("Alice", 0, 1); - let wallet_bal = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), wallet_bal); + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_network_registration_allowed(netuid, true); - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked: u64 = 1_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); + SubnetOwner::::set(netuid, coldkey.clone()); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + assert_eq!(SubnetOwner::::get(netuid), coldkey.clone()); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + let current_block: u64 = Subtensor::::get_current_block_as_u64(); + let duration = ::DurationOfStartCall::get(); + let block: BlockNumberFor = (current_block + duration) + .try_into() + .ok() + .expect("can't convert to block number"); + frame_system::Pallet::::set_block_number(block); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), netuid); + } - // Stake 10% of our current total staked TAO - let u64_staked_amt = 100_000_000_000; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), u64_staked_amt); + #[benchmark] + fn adjust_senate() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("Alice", 0, 1); + let root: u16 = Subtensor::::get_root_netuid(); - assert_ok!( Subtensor::::add_stake(RawOrigin::Signed( coldkey.clone() ).into() , hotkey.clone(), netuid, u64_staked_amt)); + Subtensor::::init_new_network(root, 1); + Uids::::insert(root, &hotkey, 0u16); - let amount_unstaked: u64 = u64_staked_amt - 1; - }: remove_stake(RawOrigin::Signed( coldkey.clone() ), hotkey.clone(), netuid, amount_unstaked) + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); + } - benchmark_serve_axon{ - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; + #[benchmark] + fn add_stake_limit() { + let netuid: u16 = 1; + let tempo: u16 = 1; + let seed: u32 = 1; - let version: u32 = 2; - let ip: u128 = 1676056785; - let port: u16 = 128; - let ip_type: u8 = 4; - let protocol: u8 = 0; - let placeholder1: u8 = 0; - let placeholder2: u8 = 0; + Subtensor::::init_new_network(netuid, tempo); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_burn(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + let amount = 900_000_000_000; + let limit: u64 = 6_000_000_000; + let amount_to_be_staked = 440_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount); + + let tao_reserve = 150_000_000_000_u64; + let alpha_in = 100_000_000_000_u64; + SubnetTAO::::insert(netuid, tao_reserve); + SubnetAlphaIn::::insert(netuid, alpha_in); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey, + netuid, + amount_to_be_staked, + limit, + false, + ); + } - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + #[benchmark] + fn move_stake() { + let coldkey: T::AccountId = whitelisted_caller(); + let origin: T::AccountId = account("A", 0, 1); + let destination: T::AccountId = account("B", 0, 2); + let netuid: u16 = 1; + + SubtokenEnabled::::insert(netuid, true); + Subtensor::::init_new_network(netuid, 1); + + let burn_fee = Subtensor::::get_burn_as_u64(netuid); + let stake_tao: u64 = 1_000_000; + let deposit = burn_fee.saturating_mul(2).saturating_add(stake_tao); + Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + origin.clone() + )); + + SubnetTAO::::insert(netuid, deposit); + SubnetAlphaIn::::insert(netuid, deposit); + TotalStake::::set(deposit); + + assert_ok!(Subtensor::::add_stake_limit( + RawOrigin::Signed(coldkey.clone()).into(), + origin.clone(), + netuid, + stake_tao, + u64::MAX, + false + )); + + let alpha_to_move: u64 = + Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet(&origin, &coldkey, netuid); + + Subtensor::::create_account_if_non_existent(&coldkey, &destination); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + origin.clone(), + destination.clone(), + netuid, + netuid, + alpha_to_move, + ); + } - Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&caller.clone(), amount_to_be_staked); + #[benchmark] + fn remove_stake_limit() { + let netuid: u16 = 1; + let tempo: u16 = 1; + let seed: u32 = 1; - assert_ok!(Subtensor::::do_burned_registration(caller_origin.clone(), netuid, caller.clone())); + // Set our total stake to 1000 TAO + Subtensor::::increase_total_stake(1_000_000_000_000); - Subtensor::::set_serving_rate_limit(netuid, 0); + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); - }: serve_axon(RawOrigin::Signed( caller.clone() ), netuid, version, ip, port, ip_type, protocol, placeholder1, placeholder2) + Subtensor::::set_max_allowed_uids(netuid, 4096); + assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); - benchmark_serve_prometheus { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + Subtensor::::set_burn(netuid, 1); - let version: u32 = 2; - let ip: u128 = 1676056785; - let port: u16 = 128; - let ip_type: u8 = 4; + let limit: u64 = 1_000_000_000; + let tao_reserve = 150_000_000_000_u64; + let alpha_in = 100_000_000_000_u64; + SubnetTAO::::insert(netuid, tao_reserve); + SubnetAlphaIn::::insert(netuid, alpha_in); + + let wallet_bal = 1000000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), wallet_bal); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + let u64_staked_amt = 100_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), u64_staked_amt); + + assert_ok!(Subtensor::::add_stake( + RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone(), + netuid, + u64_staked_amt + )); + + let amount_unstaked: u64 = 30_000_000_000; + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + netuid, + amount_unstaked, + limit, + false, + ); + } + + #[benchmark] + fn swap_stake_limit() { + let coldkey: T::AccountId = whitelisted_caller::>(); + let hot: T::AccountId = account("A", 0, 1); + let netuid1: u16 = 1; + let netuid2: u16 = 2; + let allow: bool = true; + + SubtokenEnabled::::insert(netuid1, true); + Subtensor::::init_new_network(netuid1, 1); + SubtokenEnabled::::insert(netuid2, true); + Subtensor::::init_new_network(netuid2, 1); + + let tao_reserve = 150_000_000_000_u64; + let alpha_in = 100_000_000_000_u64; + SubnetTAO::::insert(netuid1, tao_reserve); + SubnetAlphaIn::::insert(netuid1, alpha_in); + SubnetTAO::::insert(netuid2, tao_reserve); + + Subtensor::::increase_total_stake(1_000_000_000_000); + + let amount = 900_000_000_000; + let limit_stake: u64 = 6_000_000_000; + let limit_swap: u64 = 1_000_000_000; + let amount_to_be_staked = 440_000_000_000; + let amount_swapped: u64 = 30_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + netuid1, + hot.clone() + )); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + netuid2, + hot.clone() + )); + + assert_ok!(Subtensor::::add_stake_limit( + RawOrigin::Signed(coldkey.clone()).into(), + hot.clone(), + netuid1, + amount_to_be_staked, + limit_stake, + allow + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hot.clone(), + netuid1, + netuid2, + amount_swapped, + limit_swap, + allow, + ); + } + + #[benchmark] + fn transfer_stake() { + let coldkey: T::AccountId = whitelisted_caller(); + let dest: T::AccountId = account("B", 0, 2); + let hot: T::AccountId = account("A", 0, 1); + let netuid: u16 = 1; + + SubtokenEnabled::::insert(netuid, true); + Subtensor::::init_new_network(netuid, 1); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid); + let stake_tao: u64 = 1_000_000; + let deposit = reg_fee.saturating_mul(2).saturating_add(stake_tao); + Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hot.clone() + )); + + SubnetTAO::::insert(netuid, deposit); + SubnetAlphaIn::::insert(netuid, deposit); + TotalStake::::set(deposit); + + assert_ok!(Subtensor::::add_stake_limit( + RawOrigin::Signed(coldkey.clone()).into(), + hot.clone(), + netuid, + stake_tao, + u64::MAX, + false + )); + + let alpha_to_transfer: u64 = + Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet(&hot, &coldkey, netuid); + + Subtensor::::create_account_if_non_existent(&dest, &hot); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + dest.clone(), + hot.clone(), + netuid, + netuid, + alpha_to_transfer, + ); + } + + #[benchmark] + fn swap_stake() { + let coldkey: T::AccountId = whitelisted_caller(); + let hot: T::AccountId = account("A", 0, 9); + let netuid1: u16 = 1; + let netuid2: u16 = 2; + + SubtokenEnabled::::insert(netuid1, true); + Subtensor::::init_new_network(netuid1, 1); + SubtokenEnabled::::insert(netuid2, true); + Subtensor::::init_new_network(netuid2, 1); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid1); + let stake_tao: u64 = 1_000_000; + let deposit = reg_fee.saturating_mul(2).saturating_add(stake_tao); + Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + netuid1, + hot.clone() + )); + + SubnetTAO::::insert(netuid1, deposit); + SubnetAlphaIn::::insert(netuid1, deposit); + SubnetTAO::::insert(netuid2, deposit); + SubnetAlphaIn::::insert(netuid2, deposit); + TotalStake::::set(deposit); + + assert_ok!(Subtensor::::add_stake_limit( + RawOrigin::Signed(coldkey.clone()).into(), + hot.clone(), + netuid1, + stake_tao, + u64::MAX, + false + )); + + let alpha_to_swap: u64 = + Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet(&hot, &coldkey, netuid1); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hot.clone(), + netuid1, + netuid2, + alpha_to_swap, + ); + } + + #[benchmark] + fn batch_commit_weights() { + let hotkey: T::AccountId = whitelisted_caller(); + let netuid: u16 = 1; + let count: usize = 3; + let mut netuids: Vec> = Vec::new(); + let mut hashes: Vec = Vec::new(); + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid); + Subtensor::::add_balance_to_coldkey_account(&hotkey, reg_fee.saturating_mul(2)); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + hotkey.clone() + )); + + Subtensor::::set_validator_permit_for_uid(netuid, 0, true); + Subtensor::::set_commit_reveal_weights_enabled(netuid, true); + + for i in 0..count { + netuids.push(Compact(netuid)); + hashes.push(H256::repeat_byte(i as u8)); + } + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuids.clone(), + hashes.clone(), + ); + } - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + #[benchmark] + fn batch_set_weights() { + let hotkey: T::AccountId = whitelisted_caller(); + let netuid: u16 = 1; + let version: u64 = 1; + let entries: Vec<(Compact, Compact)> = vec![(Compact(0u16), Compact(0u16))]; + let netuids: Vec> = vec![Compact(netuid)]; + let weights: Vec, Compact)>> = vec![entries.clone()]; + let keys: Vec> = vec![Compact(version)]; + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid); + Subtensor::::add_balance_to_coldkey_account(&hotkey, reg_fee.saturating_mul(2)); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuids.clone(), + weights.clone(), + keys.clone(), + ); + } - Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&caller.clone(), amount_to_be_staked); + #[benchmark] + fn commit_crv3_weights() { + let hotkey: T::AccountId = whitelisted_caller(); + let netuid: u16 = 1; + let vec_commit: Vec = vec![0; MAX_CRV3_COMMIT_SIZE_BYTES as usize]; + let commit: BoundedVec<_, _> = vec_commit.try_into().unwrap(); + let round: u64 = 0; + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid); + Subtensor::::add_balance_to_coldkey_account(&hotkey, reg_fee.saturating_mul(2)); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + hotkey.clone() + )); + + Subtensor::::set_commit_reveal_weights_enabled(netuid, true); + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuid, + commit.clone(), + round, + ); + } - assert_ok!(Subtensor::::do_burned_registration(caller_origin.clone(), netuid, caller.clone())); - Subtensor::::set_serving_rate_limit(netuid, 0); + #[benchmark] + fn decrease_take() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("Alice", 0, 1); + let take: u16 = 100; - }: serve_prometheus(RawOrigin::Signed( caller.clone() ), netuid, version, ip, port, ip_type) + Delegates::::insert(&hotkey, 200u16); + Owner::::insert(&hotkey, &coldkey); - /* - benchmark_sudo_register { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let tempo: u16 = 0; - let modality: u16 = 0; - let stake: u64 = 10; - let balance: u64 = 1000000000; + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone(), take); + } - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + #[benchmark] + fn increase_take() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("Alice", 0, 2); + let take: u16 = 150; - let seed : u32 = 1; - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let hotkey: T::AccountId = account("Alice", 0, seed); - let coldkey: T::AccountId = account("Test", 0, seed); + Delegates::::insert(&hotkey, 100u16); + Owner::::insert(&hotkey, &coldkey); - let amount_to_be_staked = balance.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone(), take); + } - }: sudo_register(RawOrigin::>::Root, netuid, hotkey, coldkey, stake, balance) - */ - benchmark_burned_register { - let netuid: u16 = 1; - let seed : u32 = 1; - let hotkey: T::AccountId = account("Alice", 0, seed); - let coldkey: T::AccountId = account("Test", 0, seed); - let modality: u16 = 0; - let tempo: u16 = 1; + #[benchmark] + fn register_network_with_identity() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("Alice", 0, 1); + let identity: Option = None; + + Subtensor::::set_network_registration_allowed(1, true); + Subtensor::::set_network_rate_limit(1); + let amount: u64 = 9_999_999_999_999; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + identity.clone(), + ); + } - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_burn(netuid, 1); - - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - - }: burned_register(RawOrigin::Signed( coldkey.clone() ), netuid, hotkey) - - - benchmark_root_register { - let netuid: u16 = 1; - let version_key: u64 = 1; - let tempo: u16 = 1; - let seed : u32 = 1; - - Subtensor::::init_new_network(netuid, tempo); - - Subtensor::::set_burn(netuid, 1); - Subtensor::::set_network_registration_allowed( netuid, true); - - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); - - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - - let amount: u64 = 1; - let amount_to_be_staked = 100_000_000_000_000u64; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - }: root_register(RawOrigin::Signed(coldkey), hotkey) - - benchmark_register_network { - let seed : u32 = 1; - - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("TestHotkey", 0, seed); - - Subtensor::::set_network_rate_limit(1); - - let amount: u64 = 1; - let amount_to_be_staked = 100_000_000_000_000u64; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - }: register_network(RawOrigin::Signed(coldkey), hotkey.clone()) - - benchmark_dissolve_network { - let seed : u32 = 1; - - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("TestHotkey", 0, seed); - - Subtensor::::set_network_rate_limit(0); - - let amount: u64 = 1; - let amount_to_be_staked = 100_000_000_000_000u64; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - assert_ok!(Subtensor::::register_network(RawOrigin::Signed(coldkey.clone()).into(), hotkey.clone())); - }: dissolve_network(RawOrigin::Root, coldkey.clone(), 1) - - - // swap_hotkey { - // let seed: u32 = 1; - // let coldkey: T::AccountId = account("Alice", 0, seed); - // let old_hotkey: T::AccountId = account("Bob", 0, seed); - // let new_hotkey: T::AccountId = account("Charlie", 0, seed); - - // let netuid = 1u16; - // Subtensor::::init_new_network(netuid, 100); - // Subtensor::::set_min_burn(netuid, 1); - // Subtensor::::set_max_burn(netuid, 1); - // Subtensor::::set_target_registrations_per_interval(netuid, 256); - // Subtensor::::set_max_registrations_per_block(netuid, 256); - - // Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), 10_000_000_000u64); - // assert_ok!(Subtensor::::burned_register(RawOrigin::Signed(coldkey.clone()).into(), netuid, old_hotkey.clone())); - // assert_ok!(Subtensor::::become_delegate(RawOrigin::Signed(coldkey.clone()).into(), old_hotkey.clone())); - - // let max_uids = Subtensor::::get_max_allowed_uids(netuid) as u32; - // for i in 0..max_uids - 1 { - // let coldkey: T::AccountId = account("Axon", 0, i); - // let hotkey: T::AccountId = account("Hotkey", 0, i); - - // Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), 10_000_000_000u64); - // assert_ok!(Subtensor::::burned_register(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey)); - // assert_ok!(Subtensor::::add_stake(RawOrigin::Signed(coldkey).into(), old_hotkey.clone(), 1_000_000_000)); - // } - // }: _(RawOrigin::Signed(coldkey), old_hotkey, new_hotkey) - - commit_weights { - let tempo: u16 = 1; - let netuid: u16 = 1; - let version_key: u64 = 0; - let uids: Vec = vec![0]; - let weight_values: Vec = vec![10]; - let hotkey: T::AccountId = account("hot", 0, 1); - let coldkey: T::AccountId = account("cold", 0, 2); - let start_nonce = 300000; - - let commit_hash: H256 = BlakeTwo256::hash_of(&( - hotkey.clone(), - netuid, - uids.clone(), - weight_values.clone(), - version_key, - )); - - Subtensor::::init_new_network(netuid, tempo); - - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( - netuid, - block_number, - start_nonce, - &hotkey, - ); - let result = Subtensor::::register( - ::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())), - netuid, - block_number, - nonce, - work, - hotkey.clone(), - coldkey, - ); - Subtensor::::set_validator_permit_for_uid(netuid, 0, true); - -}: commit_weights(RawOrigin::Signed(hotkey.clone()), netuid, commit_hash) - -reveal_weights { - let tempo: u16 = 0; - let netuid: u16 = 1; - let version_key: u64 = 0; - let uids: Vec = vec![0]; - let weight_values: Vec = vec![10]; - let salt: Vec = vec![8]; - let hotkey: T::AccountId = account("hot", 0, 1); - let coldkey: T::AccountId = account("cold", 1, 2); - - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed(netuid, true); - Subtensor::::set_network_pow_registration_allowed(netuid, true); - - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( - netuid, - block_number, - 3, - &hotkey, - ); - - let _ = Subtensor::::register( - ::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())), - netuid, - block_number, - nonce, - work.clone(), - hotkey.clone(), - coldkey.clone(), - ); - - Subtensor::::set_validator_permit_for_uid(netuid, 0, true); - - let commit_hash: H256 = BlakeTwo256::hash_of(&( - hotkey.clone(), - netuid, - uids.clone(), - weight_values.clone(), - salt.clone(), - version_key, - )); - let _ = Subtensor::::commit_weights(::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())), netuid, commit_hash); - - }: reveal_weights(RawOrigin::Signed(hotkey.clone()), netuid, uids, weight_values, salt, version_key) - - schedule_swap_coldkey { - let old_coldkey: T::AccountId = account("old_cold", 0, 1); - let new_coldkey: T::AccountId = account("new_cold", 1, 2); - }: schedule_swap_coldkey(RawOrigin::Signed(old_coldkey.clone()), new_coldkey.clone()) - - schedule_dissolve_network { - let coldkey: T::AccountId = account("coldkey", 0, 1); - let netuid = 1; - }: schedule_dissolve_network(RawOrigin::Signed(coldkey.clone()), netuid) - benchmark_sudo_set_tx_childkey_take_rate_limit { - // We don't need to set up any initial state for this benchmark - // as it's a simple setter function that only requires root origin - let new_rate_limit: u64 = 100; -}: sudo_set_tx_childkey_take_rate_limit(RawOrigin::Root, new_rate_limit) - - benchmark_set_childkey_take { - // Setup - let netuid: u16 = 1; - let tempo: u16 = 1; - let seed: u32 = 1; - let coldkey: T::AccountId = account("Cold", 0, seed); - let hotkey: T::AccountId = account("Hot", 0, seed); - let take: u16 = 1000; // 10% in basis points - - // Initialize the network - Subtensor::::init_new_network(netuid, tempo); - - // Register the hotkey - Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1_000_000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); -}: set_childkey_take(RawOrigin::Signed(coldkey), hotkey, netuid, take) - - swap_coldkey { - // Set up initial state - let old_coldkey: T::AccountId = account("old_coldkey", 0, 0); - let new_coldkey: T::AccountId = account("new_coldkey", 0, 0); - let hotkey1: T::AccountId = account("hotkey1", 0, 0); - let netuid = 1u16; - let stake_amount1 = 1000u64; - let stake_amount2 = 2000u64; - let swap_cost = Subtensor::::get_key_swap_cost(); - let free_balance_old = 12345u64 + swap_cost; - let tempo: u16 = 1; - - // Setup initial state - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed(netuid, true); - Subtensor::::set_network_pow_registration_allowed(netuid, true); - - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( - netuid, - block_number, - 3, - &hotkey1, - ); - - let _ = Subtensor::::register( - ::RuntimeOrigin::from(RawOrigin::Signed(old_coldkey.clone())), - netuid, - block_number, - nonce, - work.clone(), - hotkey1.clone(), - old_coldkey.clone(), - ); - - // Add balance to old coldkey - Subtensor::::add_balance_to_coldkey_account( - &old_coldkey, - stake_amount1 + stake_amount2 + free_balance_old, - ); - - // Insert an Identity - let name: Vec = b"The fourth Coolest Identity".to_vec(); - let identity: ChainIdentity = ChainIdentity { - name: name.clone(), - url: vec![], - image: vec![], - discord: vec![], - description: vec![], - additional: vec![], - }; - - Identities::::insert(&old_coldkey, identity); - - // Benchmark setup complete, now execute the extrinsic -}: swap_coldkey(RawOrigin::Root, old_coldkey.clone(), new_coldkey.clone(), swap_cost) - -batch_reveal_weights { - let tempo: u16 = 0; - let netuid: u16 = 1; - let num_commits: usize = 10; - - let hotkey: T::AccountId = account("hot", 0, 1); - let coldkey: T::AccountId = account("cold", 0, 2); - - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed(netuid, true); - Subtensor::::set_network_pow_registration_allowed(netuid, true); - Subtensor::::set_commit_reveal_weights_enabled(netuid, true); - Subtensor::::set_weights_set_rate_limit(netuid, 0); // Disable rate limiting for benchmarking - - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( - netuid, - block_number, - 3, - &hotkey, - ); - - let origin = T::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())); - assert_ok!(Subtensor::::register( - origin.clone(), - netuid, - block_number, - nonce, - work.clone(), - hotkey.clone(), - coldkey.clone(), - )); - - let uid: u16 = 0; - - Subtensor::::set_validator_permit_for_uid(netuid, uid, true); - - let mut uids_list = Vec::new(); - let mut values_list = Vec::new(); - let mut salts_list = Vec::new(); - let mut version_keys = Vec::new(); - - for i in 0..num_commits { - let uids: Vec = vec![uid]; - let values: Vec = vec![i as u16]; - let salt: Vec = vec![i as u16]; - let version_key_i: u64 = i as u64; - - let commit_hash: H256 = BlakeTwo256::hash_of(&( - hotkey.clone(), - netuid, - uids.clone(), - values.clone(), - salt.clone(), - version_key_i, - )); - - assert_ok!(Subtensor::::commit_weights( - T::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())), - netuid, - commit_hash, - )); - - uids_list.push(uids); - values_list.push(values); - salts_list.push(salt); - version_keys.push(version_key_i); - } -}: batch_reveal_weights( - RawOrigin::Signed(hotkey.clone()), - netuid, - uids_list, - values_list, - salts_list, - version_keys -) - -benchmark_recycle_alpha { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let tempo: u16 = 1; - let seed: u32 = 1; - - // Set up coldkey and hotkey - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - - // Initialize network - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed(netuid, true); - - // Register the neuron - Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - - // Add alpha to the hotkey - let alpha_amount: u64 = 1000000; - TotalHotkeyAlpha::::insert(&hotkey, netuid, alpha_amount); - SubnetAlphaOut::::insert(netuid, alpha_amount * 2); - - // Verify the alpha has been added - assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); - -}: recycle_alpha(RawOrigin::Signed(coldkey), hotkey, alpha_amount, netuid) - -benchmark_burn_alpha { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid = 1; - let tempo = 1; - let seed = 1; - - // Set up coldkey and hotkey - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - - // Initialize network - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed(netuid, true); - - // Register the neuron - Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - - // Add alpha to the hotkey - let alpha_amount: u64 = 1000000; - TotalHotkeyAlpha::::insert(&hotkey, netuid, alpha_amount); - SubnetAlphaOut::::insert(netuid, alpha_amount * 2); - - // Verify the alpha has been added - assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); - -}: burn_alpha(RawOrigin::Signed(coldkey), hotkey, alpha_amount, netuid) - - -benchmark_start_call { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let tempo: u16 = 1; - let seed: u32 = 1; - - // Set up coldkey and hotkey - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - - // Initialize network - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed(netuid, true); - - // Register the neuron - Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - assert_eq!(SubnetOwner::::get(netuid), coldkey.clone()); - assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); - let current_block: u64 = Subtensor::::get_current_block_as_u64(); - let duration = ::DurationOfStartCall::get(); - let block: BlockNumberFor = (current_block + duration).try_into().ok().expect("can't convert to block number"); - frame_system::Pallet::::set_block_number(block); - -}: start_call(RawOrigin::Signed(coldkey), netuid) + #[benchmark] + fn serve_axon_tls() { + let caller: T::AccountId = whitelisted_caller(); + let netuid: u16 = 1; + let version: u32 = 1; + let ip: u128 = 0xC0A8_0001; + let port: u16 = 30333; + let ip_type: u8 = 4; + let proto: u8 = 0; + let p1: u8 = 0; + let p2: u8 = 0; + let cert: Vec = vec![]; + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid); + let deposit: u64 = reg_fee.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&caller, deposit); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(caller.clone()).into(), + netuid, + caller.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + netuid, + version, + ip, + port, + ip_type, + proto, + p1, + p2, + cert.clone(), + ); + } + #[benchmark] + fn set_identity() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("Alice", 0, 5); + let name = b"n".to_vec(); + let url = vec![]; + let repo = vec![]; + let img = vec![]; + let disc = vec![]; + let descr = vec![]; + let add = vec![]; + + Subtensor::::create_account_if_non_existent(&coldkey, &hotkey); + Subtensor::::init_new_network(1, 1); + let deposit: u64 = 1_000_000_000u64.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); + SubtokenEnabled::::insert(1, true); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + 1, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + name.clone(), + url.clone(), + repo.clone(), + img.clone(), + disc.clone(), + descr.clone(), + add.clone(), + ); + } + + #[benchmark] + fn set_subnet_identity() { + let coldkey: T::AccountId = whitelisted_caller(); + let netuid: u16 = 1; + let name = b"n".to_vec(); + let repo = vec![]; + let contact = vec![]; + let url = vec![]; + let disc = vec![]; + let descr = vec![]; + let add = vec![]; + + SubnetOwner::::insert(netuid, coldkey.clone()); + SubtokenEnabled::::insert(netuid, true); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + netuid, + name.clone(), + repo.clone(), + contact.clone(), + url.clone(), + disc.clone(), + descr.clone(), + add.clone(), + ); + } + + #[benchmark] + fn set_tao_weights() { + let netuid: u16 = 1; + let hotkey: T::AccountId = account("A", 0, 6); + let dests = vec![0u16]; + let weights = vec![0u16]; + let version: u64 = 1; + + Subtensor::::init_new_network(netuid, 1); + + #[extrinsic_call] + _( + RawOrigin::None, + netuid, + hotkey.clone(), + dests.clone(), + weights.clone(), + version, + ); + } + + #[benchmark] + fn swap_hotkey() { + let coldkey: T::AccountId = whitelisted_caller(); + let old: T::AccountId = account("A", 0, 7); + let new: T::AccountId = account("B", 0, 8); + Owner::::insert(&old, &coldkey); + let cost: u64 = Subtensor::::get_key_swap_cost(); + Subtensor::::add_balance_to_coldkey_account(&coldkey, cost); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), old.clone(), new.clone()); + } + + #[benchmark] + fn try_associate_hotkey() { + let coldkey: T::AccountId = whitelisted_caller(); + let hot: T::AccountId = account("A", 0, 1); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hot.clone()); + } + + #[benchmark] + fn unstake_all() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("A", 0, 14); + Subtensor::::create_account_if_non_existent(&coldkey, &hotkey); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); + } + + #[benchmark] + fn unstake_all_alpha() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("A", 0, 15); + Subtensor::::create_account_if_non_existent(&coldkey, &hotkey); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); + } } diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index b2633fa381..1f3a91b339 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -665,4 +665,10 @@ impl Pallet { let halved_interval: I64F64 = interval.saturating_mul(halving); halved_interval.saturating_to_num::() } + pub fn get_rate_limited_last_block(rate_limit_key: &RateLimitKey) -> u64 { + LastRateLimitedBlock::::get(rate_limit_key) + } + pub fn set_rate_limited_last_block(rate_limit_key: &RateLimitKey, block: u64) { + LastRateLimitedBlock::::set(rate_limit_key, block); + } } diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index b4f23ced83..11930bf26e 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -55,6 +55,11 @@ pub fn u16_proportion_to_fixed(x: u16) -> I32F32 { I32F32::saturating_from_num(x).safe_div(I32F32::saturating_from_num(u16::MAX)) } +#[allow(dead_code)] +pub fn fixed_to_fixed_u16_proportion(x: I32F32) -> I32F32 { + x.safe_div(I32F32::saturating_from_num(u16::MAX)) +} + #[allow(dead_code)] pub fn fixed_proportion_to_u16(x: I32F32) -> u16 { fixed_to_u16(x.saturating_mul(I32F32::saturating_from_num(u16::MAX))) @@ -80,11 +85,6 @@ pub fn vec_fixed64_to_u64(vec: Vec) -> Vec { vec.into_iter().map(fixed64_to_u64).collect() } -#[allow(dead_code)] -pub fn vec_u16_proportions_to_fixed(vec: Vec) -> Vec { - vec.into_iter().map(u16_proportion_to_fixed).collect() -} - #[allow(dead_code)] pub fn vec_fixed_proportions_to_u16(vec: Vec) -> Vec { vec.into_iter().map(fixed_proportion_to_u16).collect() @@ -246,6 +246,22 @@ pub fn is_topk(vector: &[I32F32], k: usize) -> Vec { result } +// Returns a bool vector where an item is true if the vector item is in topk values and is non-zero. +#[allow(dead_code, clippy::indexing_slicing)] +pub fn is_topk_nonzero(vector: &[I32F32], k: usize) -> Vec { + let n: usize = vector.len(); + let mut result: Vec = vector.iter().map(|&elem| elem != I32F32::from(0)).collect(); + if n < k { + return result; + } + let mut idxs: Vec = (0..n).collect(); + idxs.sort_by_key(|&idx| &vector[idx]); // ascending stable sort + for &idx in idxs.iter().take(n.saturating_sub(k)) { + result[idx] = false; + } + result +} + // Returns a normalized (sum to 1 except 0) copy of the input vector. #[allow(dead_code)] pub fn normalize(x: &[I32F32]) -> Vec { @@ -1207,6 +1223,48 @@ pub fn interpolate_sparse( result } +// Element-wise product of two vectors. +#[allow(dead_code)] +pub fn vec_mul(a: &[I32F32], b: &[I32F32]) -> Vec { + a.iter() + .zip(b.iter()) + .map(|(x, y)| x.checked_mul(*y).unwrap_or_default()) + .collect() +} + +// Element-wise product of matrix and vector +pub fn mat_vec_mul(matrix: &[Vec], vector: &[I32F32]) -> Vec> { + let Some(first_row) = matrix.first() else { + return vec![vec![]]; + }; + if first_row.is_empty() { + return vec![vec![]]; + } + matrix.iter().map(|row| vec_mul(row, vector)).collect() +} + +// Element-wise product of matrix and vector +#[allow(dead_code)] +pub fn mat_vec_mul_sparse( + matrix: &[Vec<(u16, I32F32)>], + vector: &[I32F32], +) -> Vec> { + let mut result: Vec> = vec![vec![]; matrix.len()]; + for (i, matrix_row) in matrix.iter().enumerate() { + for (j, value) in matrix_row.iter() { + if let Some(vector_value) = vector.get(*j as usize) { + let new_value = value.saturating_mul(*vector_value); + if new_value != I32F32::saturating_from_num(0.0) { + if let Some(result_row) = result.get_mut(i) { + result_row.push((*j, new_value)); + } + } + } + } + } + result +} + // Element-wise product of two matrices. #[allow(dead_code)] pub fn hadamard(mat1: &[Vec], mat2: &[Vec]) -> Vec> { @@ -1259,6 +1317,20 @@ pub fn hadamard_sparse( result } +/// Clamp the input value between high and low. +/// Note: assumes high > low +pub fn clamp_value(value: I32F32, low: I32F32, high: I32F32) -> I32F32 { + // First, clamp the value to ensure it does not exceed the upper bound (high). + // If the value is greater than 'high', it will be set to 'high'. + // otherwise it remains unchanged. + value + .min(I32F32::from_num(high)) + // Next, clamp the value to ensure it does not go below the lower bound (_low). + // If the value (after the first clamping) is less than 'low', it will be set to 'low'. + // otherwise it remains unchanged. + .max(I32F32::from_num(low)) +} + // Return matrix exponential moving average: `alpha * a_ij + one_minus_alpha * b_ij`. // `alpha` is the EMA coefficient, how much to add of the new observation, typically small, // higher alpha discounts older observations faster. @@ -1319,144 +1391,117 @@ pub fn mat_ema_sparse( result } -// Return sparse matrix only with elements >= threshold of an input sparse matrix. -#[allow(dead_code)] -pub fn sparse_threshold(w: &[Vec<(u16, I32F32)>], threshold: I32F32) -> Vec> { - w.iter() - .map(|row| { - row.iter() - .filter(|(_, weight)| *weight >= threshold) - .copied() - .collect() - }) - .collect() -} - /// Calculates the exponential moving average (EMA) for a sparse matrix using dynamic alpha values. #[allow(dead_code)] -pub fn mat_ema_alpha_vec_sparse( +pub fn mat_ema_alpha_sparse( new: &[Vec<(u16, I32F32)>], old: &[Vec<(u16, I32F32)>], - alpha: &[I32F32], + alpha: &[Vec], ) -> Vec> { - // Ensure the new and old matrices have the same number of rows. + // Ensure dimensions match. assert!(new.len() == old.len()); - let n = new.len(); // Assume square matrix, rows=cols + assert!(new.len() == alpha.len()); + + // The output vector of rows. + let mut result: Vec> = Vec::with_capacity(new.len()); let zero: I32F32 = I32F32::saturating_from_num(0.0); - let mut result: Vec> = vec![vec![]; n]; + let one = I32F32::saturating_from_num(1.0); // Iterate over each row of the matrices. - for (i, (new_row, old_row)) in new.iter().zip(old).enumerate() { + for ((new_row, old_row), alpha_row) in new.iter().zip(old).zip(alpha) { // Initialize a row of zeros for the result matrix. - let mut row: Vec = vec![zero; n]; + let mut decayed_values: Vec = vec![zero; alpha_row.len()]; - // Process the new matrix values. - for (j, value) in new_row.iter() { - // Retrieve the alpha value for the current column. - let alpha_val: I32F32 = alpha.get(*j as usize).copied().unwrap_or(zero); - // Compute the EMA component for the new value using saturating multiplication. - if let Some(row_val) = row.get_mut(*j as usize) { - *row_val = alpha_val.saturating_mul(*value); - } - log::trace!( - "new[{}][{}] * alpha[{}] = {} * {} = {}", - i, - j, - j, - value, - alpha_val, - row.get(*j as usize).unwrap_or(&zero) - ); - } + let mut result_row: Vec<(u16, I32F32)> = Vec::new(); // Process the old matrix values. - for (j, value) in old_row.iter() { - // Retrieve the alpha value for the current column. - let alpha_val: I32F32 = alpha.get(*j as usize).copied().unwrap_or(zero); - // Calculate the complement of the alpha value using saturating subtraction. - let one_minus_alpha: I32F32 = - I32F32::saturating_from_num(1.0).saturating_sub(alpha_val); - // Compute the EMA component for the old value and add it to the row using saturating operations. - if let Some(row_val) = row.get_mut(*j as usize) { - *row_val = row_val.saturating_add(one_minus_alpha.saturating_mul(*value)); + for (j, old_val) in old_row.iter() { + if let (Some(alpha_val), Some(decayed_val)) = ( + alpha_row.get(*j as usize), + decayed_values.get_mut(*j as usize), + ) { + // Calculate the complement of the alpha value + let one_minus_alpha = one.saturating_sub(*alpha_val); + // Bonds_decayed = Bonds * (1 - alpha) + *decayed_val = one_minus_alpha.saturating_mul(*old_val); } - log::trace!( - "old[{}][{}] * (1 - alpha[{}]) = {} * {} = {}", - i, - j, - j, - value, - one_minus_alpha, - one_minus_alpha.saturating_mul(*value) - ); } - // Collect the non-zero values into the result matrix. - for (j, value) in row.iter().enumerate() { - if *value > zero { - if let Some(result_row) = result.get_mut(i) { - result_row.push((j as u16, *value)); - log::trace!("result[{}][{}] = {}", i, j, value); + // Process the new matrix values. + for (j, new_val) in new_row.iter() { + if let (Some(alpha_val), Some(decayed_val)) = + (alpha_row.get(*j as usize), decayed_values.get(*j as usize)) + { + // Each validator can increase bonds by at most clamped_alpha per epoch towards the cap + // Validators allocate their purchase across miners based on weights + let purchase_increment = alpha_val.saturating_mul(*new_val).max(zero); + let result_val = decayed_val.saturating_add(purchase_increment).min(one); + + if result_val > zero { + result_row.push((*j, result_val)); } } } + result.push(result_row); } // Return the computed EMA sparse matrix. result } -/// Return matrix exponential moving average: `alpha_j * a_ij + one_minus_alpha_j * b_ij`. -/// `alpha_` is the EMA coefficient passed as a vector per column. +/// Calculates the exponential moving average (EMA) for a dense matrix using dynamic alpha values. #[allow(dead_code)] -pub fn mat_ema_alpha_vec( - new: &[Vec], - old: &[Vec], - alpha: &[I32F32], +pub fn mat_ema_alpha( + new: &[Vec], // Weights + old: &[Vec], // Bonds + alpha: &[Vec], ) -> Vec> { // Check if the new matrix is empty or its first row is empty. if new.is_empty() || new.first().is_none_or(|row| row.is_empty()) { return vec![vec![]; 1]; } - // Ensure the dimensions of the new and old matrices match. + // Ensure the dimensions of the new, old and alpha matrices match. assert!(new.len() == old.len()); - assert!(new.first().map_or(0, |row| row.len()) == alpha.len()); + assert!(new.len() == alpha.len()); // Initialize the result matrix with zeros, having the same dimensions as the new matrix. - let mut result: Vec> = - vec![ - vec![I32F32::saturating_from_num(0.0); new.first().map_or(0, |row| row.len())]; - new.len() - ]; + let zero: I32F32 = I32F32::saturating_from_num(0.0); + let one = I32F32::saturating_from_num(1.0); + + let mut result: Vec> = Vec::with_capacity(new.len()); // Iterate over each row of the matrices. - for (i, (new_row, old_row)) in new.iter().zip(old).enumerate() { - // Ensure the current row of the new and old matrices have the same length. + for ((new_row, old_row), alpha_row) in new.iter().zip(old).zip(alpha) { assert!(new_row.len() == old_row.len()); + assert!(new_row.len() == alpha_row.len()); + let mut result_row: Vec = Vec::new(); // Iterate over each column of the current row. - for (j, &alpha_val) in alpha.iter().enumerate().take(new_row.len()) { - // Calculate the complement of the alpha value using saturating subtraction. - let one_minus_alpha = I32F32::saturating_from_num(1.0).saturating_sub(alpha_val); - + for j in 0..new_row.len() { // Compute the EMA for the current element using saturating operations. - if let (Some(new_val), Some(old_val), Some(result_val)) = ( - new_row.get(j), - old_row.get(j), - result.get_mut(i).and_then(|row| row.get_mut(j)), - ) { - *result_val = alpha_val - .saturating_mul(*new_val) - .saturating_add(one_minus_alpha.saturating_mul(*old_val)); + if let (Some(new_val), Some(old_val), Some(alpha_val)) = + (new_row.get(j), old_row.get(j), alpha_row.get(j)) + { + // Calculate the complement of the alpha value + let one_minus_alpha = one.saturating_sub(*alpha_val); + + // Bonds_decayed = Bonds * (1 - alpha) + let decayed_val = one_minus_alpha.saturating_mul(*old_val); + + // Each validator can increase bonds by at most clamped_alpha per epoch towards the cap + // Validators allocate their purchase across miners based on weights + let purchase_increment = alpha_val.saturating_mul(*new_val).max(zero); + let result_val = decayed_val.saturating_add(purchase_increment).min(one); + result_row.push(result_val); } } + result.push(result_row); } // Return the computed EMA matrix. result } - /// Return the quantile of a vector of I32F32 values. pub fn quantile(data: &[I32F32], quantile: f64) -> I32F32 { // Clone the input data to avoid modifying the original vector. diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 62027f9636..c550135123 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -12,7 +12,7 @@ impl Pallet { pub fn epoch_dense(netuid: u16, rao_emission: u64) -> Vec<(T::AccountId, u64, u64)> { // Get subnetwork size. let n: u16 = Self::get_subnetwork_n(netuid); - log::trace!("n:\n{:?}\n", n); + log::trace!("n: {:?}", n); // ====================== // == Active & updated == @@ -20,7 +20,7 @@ impl Pallet { // Get current block. let current_block: u64 = Self::get_current_block_as_u64(); - log::trace!("current_block:\n{:?}\n", current_block); + log::trace!("current_block: {:?}", current_block); // Get tempo. let tempo: u64 = Self::get_tempo(netuid).into(); @@ -28,25 +28,25 @@ impl Pallet { // Get activity cutoff. let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; - log::trace!("activity_cutoff:\n{:?}\n", activity_cutoff); + log::trace!("activity_cutoff: {:?}", activity_cutoff); // Last update vector. let last_update: Vec = Self::get_last_update(netuid); - log::trace!("Last update:\n{:?}\n", &last_update); + log::trace!("Last update: {:?}", &last_update); // Inactive mask. let inactive: Vec = last_update .iter() .map(|updated| updated.saturating_add(activity_cutoff) < current_block) .collect(); - log::trace!("Inactive:\n{:?}\n", inactive.clone()); + log::trace!("Inactive: {:?}", inactive.clone()); // Logical negation of inactive. let active: Vec = inactive.iter().map(|&b| !b).collect(); // Block at registration vector (block when each neuron was most recently registered). let block_at_registration: Vec = Self::get_block_at_registration(netuid); - log::trace!("Block at registration:\n{:?}\n", &block_at_registration); + log::trace!("Block at registration: {:?}", &block_at_registration); // Outdated matrix, outdated_ij=True if i has last updated (weights) after j has last registered. let outdated: Vec> = last_update @@ -58,7 +58,7 @@ impl Pallet { .collect() }) .collect(); - log::trace!("Outdated:\n{:?}\n", &outdated); + log::trace!("Outdated: {:?}", &outdated); // Recently registered matrix, recently_ij=True if last_tempo was *before* j was last registered. // Mask if: the last tempo block happened *before* the registration block @@ -68,7 +68,7 @@ impl Pallet { .iter() .map(|registered| last_tempo <= *registered) .collect(); - log::trace!("Recently registered:\n{:?}\n", &recently_registered); + log::trace!("Recently registered: {:?}", &recently_registered); // =========== // == Stake == @@ -80,11 +80,27 @@ impl Pallet { log::trace!("hotkeys: {:?}", &hotkeys); // Access network stake as normalized vector. - let (mut total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = + let (total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = Self::get_stake_weights_for_network(netuid); - inplace_normalize_64(&mut total_stake); - let stake: Vec = vec_fixed64_to_fixed32(total_stake); - log::trace!("S:\n{:?}\n", &stake); + + // Get the minimum stake required. + let min_stake = Self::get_stake_threshold(); + + // Set stake of validators that doesn't meet the staking threshold to 0 as filter. + let mut filtered_stake: Vec = total_stake + .iter() + .map(|&s| { + if fixed64_to_u64(s) < min_stake { + return I64F64::from(0); + } + s + }) + .collect(); + log::debug!("Filtered stake: {:?}", &filtered_stake); + + inplace_normalize_64(&mut filtered_stake); + let stake: Vec = vec_fixed64_to_fixed32(filtered_stake); + log::trace!("S: {:?}", &stake); // ======================= // == Validator permits == @@ -102,7 +118,8 @@ impl Pallet { log::trace!("max_allowed_validators: {:?}", max_allowed_validators); // Get new validator permits. - let new_validator_permits: Vec = is_topk(&stake, max_allowed_validators as usize); + let new_validator_permits: Vec = + is_topk_nonzero(&stake, max_allowed_validators as usize); log::trace!("new_validator_permits: {:?}", new_validator_permits); // ================== @@ -119,7 +136,7 @@ impl Pallet { // Normalize active stake. inplace_normalize(&mut active_stake); - log::trace!("S:\n{:?}\n", &active_stake); + log::trace!("S: {:?}", &active_stake); // ============= // == Weights == @@ -130,7 +147,7 @@ impl Pallet { // Access network weights row unnormalized. let mut weights: Vec> = Self::get_weights(netuid); - log::trace!("W:\n{:?}\n", &weights); + log::trace!("W: {:?}", &weights); // Mask weights that are not from permitted validators. inplace_mask_rows(&validator_forbids, &mut weights); @@ -144,15 +161,15 @@ impl Pallet { } inplace_mask_diag(&mut weights); - log::trace!("W (permit+diag):\n{:?}\n", &weights); + log::trace!("W (permit+diag): {:?}", &weights); // Mask outdated weights: remove weights referring to deregistered neurons. inplace_mask_matrix(&outdated, &mut weights); - log::trace!("W (permit+diag+outdate):\n{:?}\n", &weights); + log::trace!("W (permit+diag+outdate): {:?}", &weights); // Normalize remaining weights. inplace_row_normalize(&mut weights); - log::trace!("W (mask+norm):\n{:?}\n", &weights); + log::trace!("W (mask+norm): {:?}", &weights); // ================================ // == Consensus, Validator Trust == @@ -183,7 +200,7 @@ impl Pallet { inplace_normalize(&mut ranks); let incentive: Vec = ranks.clone(); - log::trace!("I:\n{:?}\n", &incentive); + log::trace!("I: {:?}", &incentive); // ========================= // == Bonds and Dividends == @@ -197,26 +214,61 @@ impl Pallet { let weights_for_bonds: Vec> = interpolate(&weights, &clipped_weights, bonds_penalty); - // Access network bonds. - let mut bonds: Vec> = Self::get_bonds(netuid); - // Remove bonds referring to neurons that have registered since last tempo. - inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds - inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 - log::trace!("B:\n{:?}\n", &bonds); - - // Compute bonds delta column normalized. - let mut bonds_delta: Vec> = row_hadamard(&weights_for_bonds, &active_stake); // ΔB = W◦S - inplace_col_normalize(&mut bonds_delta); // sum_i b_ij = 1 - log::trace!("ΔB:\n{:?}\n", &bonds_delta); - // Compute the Exponential Moving Average (EMA) of bonds. - let mut ema_bonds = Self::compute_ema_bonds(netuid, consensus.clone(), bonds_delta, bonds); - inplace_col_normalize(&mut ema_bonds); // sum_i b_ij = 1 - log::trace!("emaB:\n{:?}\n", &ema_bonds); - - // Compute dividends: d_i = SUM(j) b_ij * inc_j - let mut dividends: Vec = matmul_transpose(&ema_bonds, &incentive); - inplace_normalize(&mut dividends); - log::trace!("D:\n{:?}\n", ÷nds); + let mut dividends: Vec; + let mut ema_bonds: Vec>; + if Yuma3On::::get(netuid) { + // Access network bonds. + let mut bonds: Vec> = Self::get_bonds_fixed_proportion(netuid); + inplace_mask_cols(&recently_registered, &mut bonds); // mask outdated bonds + log::trace!("B: {:?}", &bonds); + + // Compute the Exponential Moving Average (EMA) of bonds. + ema_bonds = Self::compute_bonds(netuid, &weights_for_bonds, &bonds, &consensus); + log::trace!("emaB: {:?}", &ema_bonds); + + // Normalize EMA bonds. + let mut ema_bonds_norm = ema_bonds.clone(); + inplace_col_normalize(&mut ema_bonds_norm); + log::trace!("emaB norm: {:?}", &ema_bonds_norm); + + // # === Dividend Calculation=== + let total_bonds_per_validator: Vec = + row_sum(&mat_vec_mul(&ema_bonds_norm, &incentive)); + log::trace!( + "total_bonds_per_validator: {:?}", + &total_bonds_per_validator + ); + + dividends = vec_mul(&total_bonds_per_validator, &active_stake); + inplace_normalize(&mut dividends); + log::trace!("D: {:?}", ÷nds); + } else { + // original Yuma - liquid alpha disabled + // Access network bonds. + let mut bonds: Vec> = Self::get_bonds(netuid); + // Remove bonds referring to neurons that have registered since last tempo. + inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds + inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 + log::trace!("B: {:?}", &bonds); + + // Compute bonds delta column normalized. + let mut bonds_delta: Vec> = row_hadamard(&weights_for_bonds, &active_stake); // ΔB = W◦S + inplace_col_normalize(&mut bonds_delta); // sum_i b_ij = 1 + log::trace!("ΔB: {:?}", &bonds_delta); + + // Compute the Exponential Moving Average (EMA) of bonds. + ema_bonds = Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid); + inplace_col_normalize(&mut ema_bonds); // sum_i b_ij = 1 + log::trace!("emaB: {:?}", &ema_bonds); + + // Compute dividends: d_i = SUM(j) b_ij * inc_j + dividends = matmul_transpose(&ema_bonds, &incentive); + inplace_normalize(&mut dividends); + log::trace!("Dividends: {:?}", ÷nds); + + // Column max-upscale EMA bonds for storage: max_i w_ij = 1. + inplace_col_max_upscale(&mut ema_bonds); + } // ================================= // == Emission and Pruning scores == @@ -341,8 +393,6 @@ impl Pallet { ValidatorTrust::::insert(netuid, cloned_validator_trust); ValidatorPermit::::insert(netuid, new_validator_permits.clone()); - // Column max-upscale EMA bonds for storage: max_i w_ij = 1. - inplace_col_max_upscale(&mut ema_bonds); new_validator_permits .iter() .zip(validator_permits) @@ -437,10 +487,26 @@ impl Pallet { log::debug!("hotkeys: {:?}", &hotkeys); // Access network stake as normalized vector. - let (mut total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = + let (total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = Self::get_stake_weights_for_network(netuid); - inplace_normalize_64(&mut total_stake); - let stake: Vec = vec_fixed64_to_fixed32(total_stake); + + // Get the minimum stake required. + let min_stake = Self::get_stake_threshold(); + + // Set stake of validators that doesn't meet the staking threshold to 0 as filter. + let mut filtered_stake: Vec = total_stake + .iter() + .map(|&s| { + if fixed64_to_u64(s) < min_stake { + return I64F64::from(0); + } + s + }) + .collect(); + log::debug!("Filtered stake: {:?}", &filtered_stake); + + inplace_normalize_64(&mut filtered_stake); + let stake: Vec = vec_fixed64_to_fixed32(filtered_stake); log::debug!("Normalised Stake: {:?}", &stake); // ======================= @@ -459,7 +525,8 @@ impl Pallet { log::trace!("max_allowed_validators: {:?}", max_allowed_validators); // Get new validator permits. - let new_validator_permits: Vec = is_topk(&stake, max_allowed_validators as usize); + let new_validator_permits: Vec = + is_topk_nonzero(&stake, max_allowed_validators as usize); log::trace!("new_validator_permits: {:?}", new_validator_permits); // ================== @@ -476,7 +543,7 @@ impl Pallet { // Normalize active stake. inplace_normalize(&mut active_stake); - log::debug!("Active Stake:\n{:?}\n", &active_stake); + log::trace!("Active Stake: {:?}", &active_stake); // ============= // == Weights == @@ -545,7 +612,7 @@ impl Pallet { // Compute server trust: ratio of rank after vs. rank before. let trust: Vec = vecdiv(&ranks, &preranks); // range: I32F32(0, 1) - log::trace!("T: {:?}", &trust); + log::trace!("Trust: {:?}", &trust); inplace_normalize(&mut ranks); // range: I32F32(0, 1) let incentive: Vec = ranks.clone(); @@ -563,47 +630,92 @@ impl Pallet { let weights_for_bonds: Vec> = interpolate_sparse(&weights, &clipped_weights, n, bonds_penalty); - // Access network bonds. - let mut bonds: Vec> = Self::get_bonds_sparse(netuid); - log::trace!("B: {:?}", &bonds); + let mut dividends: Vec; + let mut ema_bonds: Vec>; + if Yuma3On::::get(netuid) { + // Access network bonds. + let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid); + log::trace!("Bonds: {:?}", &bonds); + + // Remove bonds referring to neurons that have registered since last tempo. + // Mask if: the last tempo block happened *before* the registration block + // ==> last_tempo <= registered + let last_tempo: u64 = current_block.saturating_sub(tempo); + bonds = scalar_vec_mask_sparse_matrix( + &bonds, + last_tempo, + &block_at_registration, + &|last_tempo, registered| last_tempo <= registered, + ); + log::trace!("Bonds: (mask) {:?}", &bonds); + + // Compute the Exponential Moving Average (EMA) of bonds. + log::trace!("weights_for_bonds: {:?}", &weights_for_bonds); + ema_bonds = Self::compute_bonds_sparse(netuid, &weights_for_bonds, &bonds, &consensus); + log::trace!("emaB: {:?}", &ema_bonds); + + // Normalize EMA bonds. + let mut ema_bonds_norm = ema_bonds.clone(); + inplace_col_normalize_sparse(&mut ema_bonds_norm, n); // sum_i b_ij = 1 + log::trace!("emaB norm: {:?}", &ema_bonds_norm); + + // # === Dividend Calculation=== + let total_bonds_per_validator: Vec = + row_sum_sparse(&mat_vec_mul_sparse(&ema_bonds_norm, &incentive)); + log::trace!( + "total_bonds_per_validator: {:?}", + &total_bonds_per_validator + ); - // Remove bonds referring to neurons that have registered since last tempo. - // Mask if: the last tempo block happened *before* the registration block - // ==> last_tempo <= registered - let last_tempo: u64 = current_block.saturating_sub(tempo); - bonds = scalar_vec_mask_sparse_matrix( - &bonds, - last_tempo, - &block_at_registration, - &|last_tempo, registered| last_tempo <= registered, - ); - log::trace!("B (outdatedmask): {:?}", &bonds); - - // Normalize remaining bonds: sum_i b_ij = 1. - inplace_col_normalize_sparse(&mut bonds, n); - log::trace!("B (mask+norm): {:?}", &bonds); - - // Compute bonds delta column normalized. - let mut bonds_delta: Vec> = - row_hadamard_sparse(&weights_for_bonds, &active_stake); // ΔB = W◦S (outdated W masked) - log::trace!("ΔB: {:?}", &bonds_delta); - - // Normalize bonds delta. - inplace_col_normalize_sparse(&mut bonds_delta, n); // sum_i b_ij = 1 - log::trace!("ΔB (norm): {:?}", &bonds_delta); - - // Compute the Exponential Moving Average (EMA) of bonds. - let mut ema_bonds = - Self::compute_ema_bonds_sparse(netuid, consensus.clone(), bonds_delta, bonds); - // Normalize EMA bonds. - inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 - log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); - - // Compute dividends: d_i = SUM(j) b_ij * inc_j. - // range: I32F32(0, 1) - let mut dividends: Vec = matmul_transpose_sparse(&ema_bonds, &incentive); - inplace_normalize(&mut dividends); - log::trace!("Dividends: {:?}", ÷nds); + dividends = vec_mul(&total_bonds_per_validator, &active_stake); + inplace_normalize(&mut dividends); + log::trace!("Dividends: {:?}", ÷nds); + } else { + // original Yuma - liquid alpha disabled + // Access network bonds. + let mut bonds: Vec> = Self::get_bonds_sparse(netuid); + log::trace!("B: {:?}", &bonds); + + // Remove bonds referring to neurons that have registered since last tempo. + // Mask if: the last tempo block happened *before* the registration block + // ==> last_tempo <= registered + let last_tempo: u64 = current_block.saturating_sub(tempo); + bonds = scalar_vec_mask_sparse_matrix( + &bonds, + last_tempo, + &block_at_registration, + &|last_tempo, registered| last_tempo <= registered, + ); + log::trace!("B (outdatedmask): {:?}", &bonds); + + // Normalize remaining bonds: sum_i b_ij = 1. + inplace_col_normalize_sparse(&mut bonds, n); + log::trace!("B (mask+norm): {:?}", &bonds); + + // Compute bonds delta column normalized. + let mut bonds_delta: Vec> = + row_hadamard_sparse(&weights_for_bonds, &active_stake); // ΔB = W◦S (outdated W masked) + log::trace!("ΔB: {:?}", &bonds_delta); + + // Normalize bonds delta. + inplace_col_normalize_sparse(&mut bonds_delta, n); // sum_i b_ij = 1 + log::trace!("ΔB (norm): {:?}", &bonds_delta); + + // Compute the Exponential Moving Average (EMA) of bonds. + ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid); + // Normalize EMA bonds. + inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 + log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); + + // Compute dividends: d_i = SUM(j) b_ij * inc_j. + // range: I32F32(0, 1) + dividends = matmul_transpose_sparse(&ema_bonds, &incentive); + inplace_normalize(&mut dividends); + log::trace!("Dividends: {:?}", ÷nds); + + // Column max-upscale EMA bonds for storage: max_i w_ij = 1. + inplace_col_max_upscale_sparse(&mut ema_bonds, n); + } // ================================= // == Emission and Pruning scores == @@ -734,8 +846,6 @@ impl Pallet { ValidatorTrust::::insert(netuid, cloned_validator_trust); ValidatorPermit::::insert(netuid, new_validator_permits.clone()); - // Column max-upscale EMA bonds for storage: max_i w_ij = 1. - inplace_col_max_upscale_sparse(&mut ema_bonds, n); new_validator_permits .iter() .zip(validator_permits) @@ -848,7 +958,7 @@ impl Pallet { bonds .get_mut(uid_i as usize) .expect("uid_i is filtered to be less than n; qed") - .push((uid_j, I32F32::saturating_from_num(bonds_ij))); + .push((uid_j, u16_to_fixed(bonds_ij))); } } bonds @@ -868,186 +978,30 @@ impl Pallet { .expect("uid_i has been filtered to be less than n; qed") .get_mut(uid_j as usize) .expect("uid_j has been filtered to be less than n; qed") = - I32F32::saturating_from_num(bonds_ij); + u16_to_fixed(bonds_ij); } } bonds } - /// Calculate the logistic function parameters 'a' and 'b' based on alpha and consensus values. - /// - /// # Args: - /// * `alpha_high` - The high alpha value. - /// * `alpha_low` - The low alpha value. - /// * `consensus_high` - The high consensus value. - /// * `consensus_low` - The low consensus value. - /// - /// # Returns: - /// A tuple containing the slope 'a' and intercept 'b' for the logistic function. - pub fn calculate_logistic_params( - alpha_high: I32F32, - alpha_low: I32F32, - consensus_high: I32F32, - consensus_low: I32F32, - ) -> (I32F32, I32F32) { - log::trace!("alpha_high: {:?}", alpha_high); - log::trace!("alpha_low: {:?}", alpha_low); - log::trace!("consensus_high: {:?}", consensus_high); - log::trace!("consensus_low: {:?}", consensus_low); - // Check for division by zero - // extra caution to ensure we never divide by zero - if consensus_high <= consensus_low || alpha_low == 0 || alpha_high == 0 { - // Return 0 for both 'a' and 'b' when consensus values are equal - return ( - I32F32::saturating_from_num(0.0), - I32F32::saturating_from_num(0.0), - ); - } - - // Calculate the slope 'a' of the logistic function. - // a = (ln((1 / alpha_high - 1)) - ln((1 / alpha_low - 1))) / (consensus_low - consensus_high) - let a = (safe_ln( - (I32F32::saturating_from_num(1.0).safe_div(alpha_high)) - .saturating_sub(I32F32::saturating_from_num(1.0)), - ) - .saturating_sub(safe_ln( - (I32F32::saturating_from_num(1.0).safe_div(alpha_low)) - .saturating_sub(I32F32::saturating_from_num(1.0)), - ))) - .safe_div(consensus_low.saturating_sub(consensus_high)); - log::trace!("a: {:?}", a); - - // Calculate the intercept 'b' of the logistic function. - // b = ln((1 / alpha_low - 1)) + a * consensus_low - let b = safe_ln( - (I32F32::saturating_from_num(1.0).safe_div(alpha_low)) - .saturating_sub(I32F32::saturating_from_num(1.0)), - ) - .saturating_add(a.saturating_mul(consensus_low)); - log::trace!("b: {:?}", b); - - // Return the calculated slope 'a' and intercept 'b'. - (a, b) - } - - /// Compute the alpha values using the logistic function parameters 'a' and 'b'. - /// - /// # Args: - /// * `consensus` - A vector of consensus values. - /// * `a` - The slope of the logistic function. - /// * `b` - The intercept of the logistic function. - /// - /// # Returns: - /// A vector of computed alpha values. - pub fn compute_alpha_values(consensus: &[I32F32], a: I32F32, b: I32F32) -> Vec { - // Compute the alpha values for each consensus value. - let alpha: Vec = consensus - .iter() - .map(|c| { - // Calculate the exponent value for the logistic function. - // exp_val = exp(b - a * c) - let exp_val = safe_exp(b.saturating_sub(a.saturating_mul(*c))); - - // Compute the alpha value using the logistic function formula. - // alpha = 1 / (1 + exp_val) - I32F32::saturating_from_num(1.0) - .safe_div(I32F32::saturating_from_num(1.0).saturating_add(exp_val)) - }) - .collect(); - - // Log the computed alpha values for debugging purposes. - log::trace!("alpha: {:?}", alpha); - - // Return the computed alpha values. - alpha - } - - /// Clamp the alpha values between alpha_high and alpha_low. - /// - /// # Args: - /// * `alpha` - A vector of alpha values. - /// * `alpha_high` - The high alpha value. - /// * `alpha_low` - The low alpha value. - /// - /// # Returns: - /// A vector of clamped alpha values. - pub fn clamp_alpha_values( - alpha: Vec, - alpha_high: I32F32, - alpha_low: I32F32, - ) -> Vec { - let clamped_alpha: Vec = alpha - .iter() - .map(|a| { - // First, clamp the value to ensure it does not exceed the upper bound (alpha_high). - // If 'a' is greater than 'alpha_high', it will be set to 'alpha_high'. - // If 'a' is less than or equal to 'alpha_high', it remains unchanged. - let clamped_a = a - .min(&alpha_high) - // Next, clamp the value to ensure it does not go below the lower bound (alpha_low). - // If the value (after the first clamping) is less than 'alpha_low', it will be set to 'alpha_low'. - // If the value is greater than or equal to 'alpha_low', it remains unchanged. - .max(&alpha_low); - // Return the clamped value. - *clamped_a - }) - .collect(); - log::trace!("alpha_clamped: {:?}", clamped_alpha); - clamped_alpha - } - - /// Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values for a sparse matrix. - /// - /// # Args: - /// * `bonds_delta` - A vector of bond deltas. - /// * `bonds` - A vector of bonds. - /// * `alpha` - A vector of clamped alpha values. - /// - /// # Returns: - /// A vector of EMA bonds. - pub fn compute_ema_bonds_with_liquid_alpha_sparse( - bonds_delta: &[Vec<(u16, I32F32)>], - bonds: &[Vec<(u16, I32F32)>], - alpha: Vec, - ) -> Vec> { - // Compute the Exponential Moving Average (EMA) of bonds using the provided clamped alpha values. - let ema_bonds = mat_ema_alpha_vec_sparse(bonds_delta, bonds, &alpha); - - // Log the computed EMA bonds for debugging purposes. - log::trace!( - "Exponential Moving Average Bonds Liquid Alpha: {:?}", - ema_bonds - ); - - // Return the computed EMA bonds. - ema_bonds + pub fn get_bonds_fixed_proportion(netuid: u16) -> Vec> { + let mut bonds = Self::get_bonds(netuid); + bonds.iter_mut().for_each(|bonds_row| { + bonds_row + .iter_mut() + .for_each(|bond| *bond = fixed_to_fixed_u16_proportion(*bond)); + }); + bonds } - /// Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values. - /// - /// # Args: - /// * `bonds_delta` - A vector of bond deltas. - /// * `bonds` - A vector of bonds. - /// * `alpha` - A vector of clamped alpha values. - /// - /// # Returns: - /// A vector of EMA bonds. - pub fn compute_ema_bonds_with_liquid_alpha( - bonds_delta: &[Vec], - bonds: &[Vec], - alpha: Vec, - ) -> Vec> { - // Compute the Exponential Moving Average (EMA) of bonds using the provided clamped alpha values. - let ema_bonds = mat_ema_alpha_vec(bonds_delta, bonds, &alpha); - - // Log the computed EMA bonds for debugging purposes. - log::trace!( - "Exponential Moving Average Bonds Liquid Alpha: {:?}", - ema_bonds - ); - - // Return the computed EMA bonds. - ema_bonds + pub fn get_bonds_sparse_fixed_proportion(netuid: u16) -> Vec> { + let mut bonds = Self::get_bonds_sparse(netuid); + bonds.iter_mut().for_each(|bonds_row| { + bonds_row + .iter_mut() + .for_each(|(_, bond)| *bond = fixed_to_fixed_u16_proportion(*bond)); + }); + bonds } /// Compute the Exponential Moving Average (EMA) of bonds using a normal alpha value for a sparse matrix. @@ -1118,93 +1072,63 @@ impl Pallet { ema_bonds } - /// Compute the Exponential Moving Average (EMA) of bonds based on the Liquid Alpha setting for a sparse matrix. + /// Compute the Exponential Moving Average (EMA) of bonds based on the Liquid Alpha setting /// /// # Args: /// * `netuid` - The network ID. - /// * `consensus` - A vector of consensus values. - /// * `bonds_delta` - A vector of bond deltas. + /// * `weights` - A vector of weights. /// * `bonds` - A vector of bonds. + /// * `consensus` - A vector of consensus values. + /// * `active_stake` - A vector of active stake values. /// /// # Returns: /// A vector of EMA bonds. - pub fn compute_ema_bonds_sparse( + pub fn compute_bonds( netuid: u16, - consensus: Vec, - bonds_delta: Vec>, - bonds: Vec>, - ) -> Vec> { + weights: &[Vec], // weights_for_bonds + bonds: &[Vec], + consensus: &[I32F32], + ) -> Vec> { // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. - // This way we avoid the quantil function panic. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() && consensus .iter() .any(|&c| c != I32F32::saturating_from_num(0)) { - // Calculate the 75th percentile (high) and 25th percentile (low) of the consensus values. - let consensus_high = quantile(&consensus, 0.75); - let consensus_low = quantile(&consensus, 0.25); - // Further check if the high and low consensus values meet the required conditions. - if (consensus_high > consensus_low) || consensus_high != 0 || consensus_low < 0 { - // if (consensus_high > consensus_low) || consensus_high != 0) || consensus_low != 0 { - // if (consensus_high > consensus_low) || consensus_low != 0 { - log::trace!("Using Liquid Alpha"); - - // Get the high and low alpha values for the network. - let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - log::trace!("alpha_low: {:?} alpha_high: {:?}", alpha_low, alpha_high); - - // Calculate the logistic function parameters 'a' and 'b' based on alpha and consensus values. - let (a, b) = Self::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); - - // Compute the alpha values using the logistic function parameters. - let alpha = Self::compute_alpha_values(&consensus, a, b); - - // Clamp the alpha values between alpha_high and alpha_low. - let clamped_alpha = Self::clamp_alpha_values(alpha, alpha_high, alpha_low); + // Liquid Alpha is enabled, compute the liquid alphas matrix. + let alphas: Vec> = + Self::compute_liquid_alpha_values(netuid, weights, bonds, consensus); + log::trace!("alphas: {:?}", &alphas); - // Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values. - Self::compute_ema_bonds_with_liquid_alpha_sparse( - &bonds_delta, - &bonds, - clamped_alpha, - ) - } else { - log::trace!("Using Bonds Moving Average"); - - // Compute the EMA of bonds using a normal alpha value. - Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid) - } + // Compute the Exponential Moving Average (EMA) of bonds using the provided clamped alpha values. + mat_ema_alpha(weights, bonds, &alphas) } else { - log::trace!("Using Bonds Moving Average"); + // Liquid Alpha is disabled, compute the liquid alpha value. + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); - // Compute the EMA of bonds using a normal alpha value. - Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid) + // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. + mat_ema(weights, bonds, alpha) } } - /// Compute the Exponential Moving Average (EMA) of bonds based on the Liquid Alpha setting. + /// Compute the Exponential Moving Average (EMA) of bonds based on the Liquid Alpha setting for a sparse matrix. /// /// # Args: /// * `netuid` - The network ID. - /// * `consensus` - A vector of consensus values. - /// * `bonds_delta` - A vector of bond deltas. + /// * `weights` - A vector of weights. /// * `bonds` - A vector of bonds. + /// * `consensus` - A vector of consensus values. + /// * `active_stake` - A vector of active stake values. /// /// # Returns: /// A vector of EMA bonds. - pub fn compute_ema_bonds( + pub fn compute_bonds_sparse( netuid: u16, - consensus: Vec, - bonds_delta: Vec>, - bonds: Vec>, - ) -> Vec> { + weights: &[Vec<(u16, I32F32)>], + bonds: &[Vec<(u16, I32F32)>], + consensus: &[I32F32], + ) -> Vec> { // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() @@ -1212,46 +1136,181 @@ impl Pallet { .iter() .any(|&c| c != I32F32::saturating_from_num(0)) { - // Calculate the 75th percentile (high) and 25th percentile (low) of the consensus values. - let consensus_high = quantile(&consensus, 0.75); - let consensus_low = quantile(&consensus, 0.25); + // Liquid Alpha is enabled, compute the liquid alphas matrix. + let alphas: Vec> = + Self::compute_liquid_alpha_values_sparse(netuid, weights, bonds, consensus); + log::trace!("alphas: {:?}", &alphas); - // Further check if the high and low consensus values meet the required conditions. - if (consensus_high > consensus_low) || consensus_high != 0 || consensus_low < 0 { - log::trace!("Using Liquid Alpha"); + // Compute the Exponential Moving Average (EMA) of bonds using the provided clamped alpha values. + mat_ema_alpha_sparse(weights, bonds, &alphas) + } else { + // Liquid Alpha is disabled, compute the liquid alpha value. + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); - // Get the high and low alpha values for the network. - let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - log::trace!("alpha_low: {:?} alpha_high: {:?}", alpha_low, alpha_high); + // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. + mat_ema_sparse(weights, bonds, alpha) + } + } - // Calculate the logistic function parameters 'a' and 'b' based on alpha and consensus values. - let (a, b) = Self::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); + /// Compute liquid alphas matrix + /// There is a separate alpha param for each validator-miner binding + /// + /// # Args: + /// * `netuid` - The network ID. + /// * `weights` - A vector of weights. + /// * `bonds` - A vector of bonds. + /// * `consensus` - A vector of consensus values. + /// + /// # Returns: + /// A matrix of alphas + pub fn compute_liquid_alpha_values( + netuid: u16, + weights: &[Vec], // current epoch weights + bonds: &[Vec], // previous epoch bonds + consensus: &[I32F32], // previous epoch consensus weights + ) -> Vec> { + assert!(weights.len() == bonds.len()); - // Compute the alpha values using the logistic function parameters. - let alpha = Self::compute_alpha_values(&consensus, a, b); + // Get the high and low alpha values for the network. + let alpha_sigmoid_steepness: I32F32 = Self::get_alpha_sigmoid_steepness(netuid); + let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - // Clamp the alpha values between alpha_high and alpha_low. - let clamped_alpha = Self::clamp_alpha_values(alpha, alpha_high, alpha_low); + let mut alphas = Vec::new(); - // Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values. - Self::compute_ema_bonds_with_liquid_alpha(&bonds_delta, &bonds, clamped_alpha) - } else { - log::trace!("Using Bonds Moving Average"); + for (w_row, b_row) in weights.iter().zip(bonds.iter()) { + let mut row_alphas = Vec::new(); - // Compute the EMA of bonds using a normal alpha value. - Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid) + for ((weight, bond), consensus_val) in + w_row.iter().zip(b_row.iter()).zip(consensus.iter()) + { + let alpha = Self::alpha_sigmoid( + *consensus_val, + *weight, + *bond, + alpha_low, + alpha_high, + alpha_sigmoid_steepness, + ); + row_alphas.push(alpha); } - } else { - log::trace!("Using Bonds Moving Average"); + alphas.push(row_alphas); + } + alphas + } + + /// Compute liquid alphas sparse matrix + /// There is a separate alpha param for each validator-miner binding + /// + /// # Args: + /// * `netuid` - The network ID. + /// * `weights` - A vector of weights. + /// * `bonds` - A vector of bonds. + /// * `consensus` - A vector of consensus values. + /// + /// # Returns: + /// A dense matrix of alphas + pub fn compute_liquid_alpha_values_sparse( + netuid: u16, + weights: &[Vec<(u16, I32F32)>], // current epoch weights + bonds: &[Vec<(u16, I32F32)>], // previous epoch bonds + consensus: &[I32F32], // previous epoch consensus weights + ) -> Vec> { + assert!(weights.len() == bonds.len()); + + let alpha_sigmoid_steepness: I32F32 = Self::get_alpha_sigmoid_steepness(netuid); + let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); + + let mut alphas = Vec::with_capacity(consensus.len()); + let zero = I32F32::from_num(0.0); + + // iterate over rows + for (w_row, b_row) in weights.iter().zip(bonds.iter()) { + let mut row_alphas = Vec::with_capacity(w_row.len()); + let mut w_iter = w_row.iter().peekable(); + let mut b_iter = b_row.iter().peekable(); + for (j_pos, consensus_val) in consensus.iter().enumerate() { + let j = j_pos as u16; + + let mut weight = zero; + while let Some(&&(i, val)) = w_iter.peek() { + if i < j { + w_iter.next(); + } else { + if i == j { + weight = val; + } + break; + } + } - // Compute the EMA of bonds using a normal alpha value. - Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid) + let mut bond = zero; + while let Some(&&(i, val)) = b_iter.peek() { + if i < j { + b_iter.next(); + } else { + if i == j { + bond = val; + } + break; + } + } + + let alpha = Self::alpha_sigmoid( + *consensus_val, + weight, + bond, + alpha_low, + alpha_high, + alpha_sigmoid_steepness, + ); + row_alphas.push(alpha); + } + alphas.push(row_alphas); } + alphas + } + + /// Helper function to compute the alpha value using a sigmoid function. + pub fn alpha_sigmoid( + consensus: I32F32, + weight: I32F32, + bond: I32F32, + alpha_low: I32F32, + alpha_high: I32F32, + alpha_sigmoid_steepness: I32F32, + ) -> I32F32 { + let zero = I32F32::from_num(0.0); + let one = I32F32::from_num(1.0); + + let diff_buy = clamp_value(weight.saturating_sub(consensus), zero, one); + let diff_sell = clamp_value(bond.saturating_sub(weight), zero, one); + let combined_diff = if weight >= bond { diff_buy } else { diff_sell }; + + // sigmoid = 1. / (1. + e^(-steepness * (combined_diff - 0.5))) + let sigmoid = one.saturating_div( + one.saturating_add(safe_exp( + I32F32::from_num(-1).saturating_mul( + alpha_sigmoid_steepness + .saturating_mul(combined_diff.saturating_sub(I32F32::from_num(0.5))), + ), + )), + ); + let alpha = + alpha_low.saturating_add(sigmoid.saturating_mul(alpha_high.saturating_sub(alpha_low))); + + clamp_value(alpha, alpha_low, alpha_high) + } + + pub fn compute_disabled_liquid_alpha(netuid: u16) -> I32F32 { + // Retrieve the bonds moving average for the given network ID and scale it down. + let bonds_moving_average: I64F64 = I64F64::from_num(Self::get_bonds_moving_average(netuid)) + .saturating_div(I64F64::from_num(1_000_000)); + + // Calculate the alpha value for the EMA calculation. + // Alpha is derived by subtracting the scaled bonds moving average from 1. + let alpha: I32F32 = + I32F32::from_num(1).saturating_sub(I32F32::from_num(bonds_moving_average)); + alpha } pub fn do_set_alpha_values( @@ -1294,4 +1353,39 @@ impl Pallet { ); Ok(()) } + + pub fn do_reset_bonds(netuid: u16, account_id: &T::AccountId) -> Result<(), DispatchError> { + // check bonds reset enabled for this subnet + let bonds_reset_enabled: bool = Self::get_bonds_reset(netuid); + if !bonds_reset_enabled { + return Ok(()); + } + + if let Ok(uid) = Self::get_uid_for_net_and_hotkey(netuid, account_id) { + for (i, bonds_vec) in + as IterableStorageDoubleMap>>::iter_prefix( + netuid, + ) + { + Bonds::::insert( + netuid, + i, + bonds_vec + .clone() + .iter() + .filter(|(j, _)| *j != uid) + .collect::>(), + ); + } + log::debug!("Reset bonds for {:?}, netuid {:?}", account_id, netuid); + } else { + log::warn!( + "Uid not found for {:?}, netuid {:?} - skipping bonds reset", + account_id, + netuid + ); + } + + Ok(()) + } } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 16a8994ff6..f4f511e74a 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -4,9 +4,9 @@ // Edit this file to define custom logic or remove it if it is not needed. // Learn more about FRAME and the core library of Substrate FRAME pallets: // -pub use pallet::*; use frame_system::{self as system, ensure_signed}; +pub use pallet::*; use frame_support::{ dispatch::{self, DispatchInfo, DispatchResult, DispatchResultWithPostInfo, PostDispatchInfo}, @@ -66,6 +66,7 @@ pub const MAX_CRV3_COMMIT_SIZE_BYTES: u32 = 5000; #[import_section(config::config)] #[frame_support::pallet] pub mod pallet { + use crate::RateLimitKey; use crate::migrations; use frame_support::{ BoundedVec, @@ -268,6 +269,80 @@ pub mod pallet { /// Additional information about the subnet pub additional: Vec, } + + /// Data structure for stake related jobs. + #[derive(Encode, Decode, TypeInfo, Clone, PartialEq, Eq, Debug)] + pub enum StakeJob { + /// Represents a job for "add_stake" operation + AddStake { + /// Hotkey account + hotkey: AccountId, + /// Coldkey account + coldkey: AccountId, + /// Subnet ID + netuid: u16, + /// The amount of stake to be added to the hotkey staking account. + stake_to_be_added: u64, + }, + /// Represents a job for "remove_stake" operation + RemoveStake { + /// Hotkey account + hotkey: AccountId, + /// Coldkey account + coldkey: AccountId, + /// Subnet ID + netuid: u16, + /// Alpha value + alpha_unstaked: u64, + }, + /// Represents a job for "add_stake_limit" operation + AddStakeLimit { + /// Coldkey account + coldkey: AccountId, + /// Hotkey account + hotkey: AccountId, + /// Subnet ID + netuid: u16, + /// The amount of stake to be added to the hotkey staking account. + stake_to_be_added: u64, + /// The limit price expressed in units of RAO per one Alpha. + limit_price: u64, + /// Allows partial execution of the amount. If set to false, this becomes + /// fill or kill type or order. + allow_partial: bool, + }, + /// Represents a job for "remove_stake_limit" operation + RemoveStakeLimit { + /// Coldkey account + coldkey: AccountId, + /// Hotkey account + hotkey: AccountId, + /// Subnet ID + netuid: u16, + /// The amount of stake to be added to the hotkey staking account. + alpha_unstaked: u64, + /// The limit price + limit_price: u64, + /// Allows partial execution of the amount. If set to false, this becomes + /// fill or kill type or order. + allow_partial: bool, + }, + /// Represents a job for "unstake_all" operation + UnstakeAll { + /// Coldkey account + coldkey: AccountId, + /// Hotkey account + hotkey: AccountId, + }, + /// Represents a job for "unstake_all_alpha" operation + UnstakeAllAlpha { + /// Coldkey account + coldkey: AccountId, + /// Hotkey account + hotkey: AccountId, + }, + } + /// ============================ /// ==== Staking + Accounts ==== /// ============================ @@ -566,6 +641,11 @@ pub mod pallet { T::InitialRho::get() } #[pallet::type_value] + /// Default value for alpha sigmoid steepness. + pub fn DefaultAlphaSigmoidSteepness() -> u16 { + T::InitialAlphaSigmoidSteepness::get() + } + #[pallet::type_value] /// Default value for kappa parameter. pub fn DefaultKappa() -> u16 { T::InitialKappa::get() @@ -620,8 +700,13 @@ pub mod pallet { pub fn DefaultBondsPenalty() -> u16 { T::InitialBondsPenalty::get() } + /// Default value for bonds reset - will not reset bonds #[pallet::type_value] + pub fn DefaultBondsResetOn() -> bool { + T::InitialBondsResetOn::get() + } /// Default validator prune length. + #[pallet::type_value] pub fn DefaultValidatorPruneLen() -> u64 { T::InitialValidatorPruneLen::get() } @@ -725,17 +810,27 @@ pub mod pallet { false } #[pallet::type_value] + /// -- ITEM (switches liquid alpha on) + pub fn DefaultYuma3() -> bool { + false + } + #[pallet::type_value] /// (alpha_low: 0.7, alpha_high: 0.9) pub fn DefaultAlphaValues() -> (u16, u16) { (45875, 58982) } - #[pallet::type_value] /// Default value for coldkey swap schedule duration pub fn DefaultColdkeySwapScheduleDuration() -> BlockNumberFor { T::InitialColdkeySwapScheduleDuration::get() } + #[pallet::type_value] + /// Default value for coldkey swap reschedule duration + pub fn DefaultColdkeySwapRescheduleDuration() -> BlockNumberFor { + T::InitialColdkeySwapRescheduleDuration::get() + } + #[pallet::type_value] /// Default value for applying pending items (e.g. childkeys). pub fn DefaultPendingCooldown() -> u64 { @@ -800,6 +895,20 @@ pub mod pallet { 360 } + #[pallet::type_value] + /// Default value for coldkey swap scheduled + pub fn DefaultColdkeySwapScheduled() -> (BlockNumberFor, T::AccountId) { + let default_account = T::AccountId::decode(&mut TrailingZeroInput::zeroes()) + .expect("trailing zeroes always produce a valid account ID; qed"); + (BlockNumberFor::::from(0_u32), default_account) + } + + #[pallet::type_value] + /// Default value for setting subnet owner hotkey rate limit + pub fn DefaultSetSNOwnerHotkeyRateLimit() -> u64 { + 50400 + } + #[pallet::storage] pub type MinActivityCutoff = StorageValue<_, u16, ValueQuery, DefaultMinActivityCutoff>; @@ -808,6 +917,10 @@ pub mod pallet { pub type ColdkeySwapScheduleDuration = StorageValue<_, BlockNumberFor, ValueQuery, DefaultColdkeySwapScheduleDuration>; + #[pallet::storage] + pub type ColdkeySwapRescheduleDuration = + StorageValue<_, BlockNumberFor, ValueQuery, DefaultColdkeySwapRescheduleDuration>; + #[pallet::storage] pub type DissolveNetworkScheduleDuration = StorageValue<_, BlockNumberFor, ValueQuery, DefaultDissolveNetworkScheduleDuration>; @@ -816,6 +929,21 @@ pub mod pallet { pub type SenateRequiredStakePercentage = StorageValue<_, u64, ValueQuery, DefaultSenateRequiredStakePercentage>; + #[pallet::storage] + pub type StakeJobs = StorageDoubleMap< + _, + Blake2_128Concat, + BlockNumberFor, // first key: current block number + Twox64Concat, + u64, // second key: unique job ID + StakeJob, + OptionQuery, + >; + + #[pallet::storage] + /// Ensures unique IDs for StakeJobs storage map + pub type NextStakeJobId = StorageValue<_, u64, ValueQuery, DefaultZeroU64>; + /// ============================ /// ==== Staking Variables ==== /// ============================ @@ -939,18 +1067,6 @@ pub mod pallet { ValueQuery, DefaultZeroU64, >; - #[pallet::storage] - /// --- NMAP ( hot, cold, netuid ) --> last_emission_on_hot_cold_net | Returns the last_emission_update_on_hot_cold_net - pub type LastHotkeyColdkeyEmissionOnNetuid = StorageNMap< - _, - ( - NMapKey, // hot - NMapKey, // cold - NMapKey, // subnet - ), - u64, // Stake - ValueQuery, - >; /// ========================== /// ==== Staking Counters ==== @@ -968,8 +1084,6 @@ pub mod pallet { pub type TotalIssuance = StorageValue<_, u64, ValueQuery, DefaultTotalIssuance>; #[pallet::storage] // --- ITEM ( total_stake ) pub type TotalStake = StorageValue<_, u64, ValueQuery>; - #[pallet::storage] // --- ITEM ( dynamic_block ) -- block when dynamic was turned on. - pub type DynamicBlock = StorageValue<_, u64, ValueQuery>; #[pallet::storage] // --- ITEM ( moving_alpha ) -- subnet moving alpha. pub type SubnetMovingAlpha = StorageValue<_, I96F32, ValueQuery, DefaultMovingAlpha>; #[pallet::storage] // --- MAP ( netuid ) --> moving_price | The subnet moving price. @@ -990,12 +1104,6 @@ pub mod pallet { #[pallet::storage] // --- MAP ( netuid ) --> tao_in_emission | Returns the amount of tao emitted into this subent on the last block. pub type SubnetTaoInEmission = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultZeroU64>; - #[pallet::storage] // --- MAP ( netuid ) --> alpha_sell_per_block | Alpha sold per block. - pub type SubnetAlphaEmissionSell = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultZeroU64>; - #[pallet::storage] // --- MAP ( netuid ) --> total_stake_at_moment_of_subnet_registration - pub type TotalStakeAtDynamic = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultZeroU64>; #[pallet::storage] // --- MAP ( netuid ) --> alpha_supply_in_pool | Returns the amount of alpha in the pool. pub type SubnetAlphaIn = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultZeroU64>; @@ -1009,9 +1117,15 @@ pub mod pallet { pub type OwnedHotkeys = StorageMap<_, Blake2_128Concat, T::AccountId, Vec, ValueQuery>; - #[pallet::storage] // --- DMAP ( cold ) --> () | Maps coldkey to if a coldkey swap is scheduled. - pub type ColdkeySwapScheduled = - StorageMap<_, Blake2_128Concat, T::AccountId, (), ValueQuery>; + #[pallet::storage] // --- DMAP ( cold ) --> (block_expected, new_coldkey) | Maps coldkey to the block to swap at and new coldkey. + pub type ColdkeySwapScheduled = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + (BlockNumberFor, T::AccountId), + ValueQuery, + DefaultColdkeySwapScheduled, + >; #[pallet::storage] // --- DMAP ( hot, netuid ) --> alpha | Returns the total amount of alpha a hotkey owns. pub type TotalHotkeyAlpha = StorageDoubleMap< @@ -1061,9 +1175,6 @@ pub mod pallet { #[pallet::storage] // --- MAP ( netuid ) --> token_symbol | Returns the token symbol for a subnet. pub type TokenSymbol = StorageMap<_, Identity, u16, Vec, ValueQuery, DefaultUnicodeVecU8>; - #[pallet::storage] // --- MAP ( netuid ) --> subnet_name | Returns the name of the subnet. - pub type SubnetName = - StorageMap<_, Identity, u16, Vec, ValueQuery, DefaultUnicodeVecU8>; /// ============================ /// ==== Global Parameters ===== @@ -1087,10 +1198,6 @@ pub mod pallet { pub type NetworkLastRegistered = StorageValue<_, u64, ValueQuery, DefaultNetworkLastRegistered>; #[pallet::storage] - /// ITEM( network_min_allowed_uids ) - pub type NetworkMinAllowedUids = - StorageValue<_, u16, ValueQuery, DefaultNetworkMinAllowedUids>; - #[pallet::storage] /// ITEM( min_network_lock_cost ) pub type NetworkMinLockCost = StorageValue<_, u64, ValueQuery, DefaultNetworkMinLockCost>; #[pallet::storage] @@ -1114,6 +1221,15 @@ pub mod pallet { pub type WeightsVersionKeyRateLimit = StorageValue<_, u64, ValueQuery, DefaultWeightsVersionKeyRateLimit>; + /// ============================ + /// ==== Rate Limiting ===== + /// ============================ + + #[pallet::storage] + /// --- MAP ( RateLimitKey ) --> Block number in which the last rate limited operation occured + pub type LastRateLimitedBlock = + StorageMap<_, Identity, RateLimitKey, u64, ValueQuery, DefaultZeroU64>; + /// ============================ /// ==== Subnet Locks ===== /// ============================ @@ -1215,12 +1331,13 @@ pub mod pallet { /// --- MAP ( netuid ) --> Rho pub type Rho = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultRho>; #[pallet::storage] + /// --- MAP ( netuid ) --> AlphaSigmoidSteepness + pub type AlphaSigmoidSteepness = + StorageMap<_, Identity, u16, u16, ValueQuery, DefaultAlphaSigmoidSteepness>; + #[pallet::storage] /// --- MAP ( netuid ) --> Kappa pub type Kappa = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultKappa>; #[pallet::storage] - /// --- MAP ( netuid ) --> uid, we use to record uids to prune at next epoch. - pub type NeuronsToPruneAtNextEpoch = StorageMap<_, Identity, u16, u16, ValueQuery>; - #[pallet::storage] /// --- MAP ( netuid ) --> registrations_this_interval pub type RegistrationsThisInterval = StorageMap<_, Identity, u16, u16, ValueQuery>; #[pallet::storage] @@ -1271,6 +1388,10 @@ pub mod pallet { /// --- MAP ( netuid ) --> bonds_penalty pub type BondsPenalty = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultBondsPenalty>; + #[pallet::storage] + /// --- MAP ( netuid ) --> bonds_reset + pub type BondsResetOn = + StorageMap<_, Identity, u16, bool, ValueQuery, DefaultBondsResetOn>; /// --- MAP ( netuid ) --> weights_set_rate_limit #[pallet::storage] pub type WeightsSetRateLimit = @@ -1347,9 +1468,15 @@ pub mod pallet { pub type LiquidAlphaOn = StorageMap<_, Blake2_128Concat, u16, bool, ValueQuery, DefaultLiquidAlpha>; #[pallet::storage] + /// --- MAP ( netuid ) --> Whether or not Yuma3 is enabled + pub type Yuma3On = StorageMap<_, Blake2_128Concat, u16, bool, ValueQuery, DefaultYuma3>; + #[pallet::storage] /// MAP ( netuid ) --> (alpha_low, alpha_high) pub type AlphaValues = StorageMap<_, Identity, u16, (u16, u16), ValueQuery, DefaultAlphaValues>; + #[pallet::storage] + /// --- MAP ( netuid ) --> If subtoken trading enabled + pub type SubtokenEnabled = StorageMap<_, Identity, u16, bool, ValueQuery, DefaultFalse>; /// ======================================= /// ==== Subnetwork Consensus Storage ==== @@ -1656,6 +1783,15 @@ pub mod pallet { } true } + + /// Ensure subtoken enalbed + pub fn ensure_subtoken_enabled(subnet: u16) -> DispatchResult { + ensure!( + SubtokenEnabled::::get(subnet), + Error::::SubtokenDisabled + ); + Ok(()) + } } } @@ -1692,6 +1828,7 @@ pub enum CustomTransactionError { ServingRateLimitExceeded, InvalidPort, BadRequest, + ZeroMaxAmount, } impl From for u8 { @@ -1712,6 +1849,7 @@ impl From for u8 { CustomTransactionError::ServingRateLimitExceeded => 12, CustomTransactionError::InvalidPort => 13, CustomTransactionError::BadRequest => 255, + CustomTransactionError::ZeroMaxAmount => 14, } } } @@ -1988,8 +2126,13 @@ where .into(); } - // Calcaulate the maximum amount that can be executed with price limit - let max_amount = Pallet::::get_max_amount_add(*netuid, *limit_price); + // Calculate the maximum amount that can be executed with price limit + let Ok(max_amount) = Pallet::::get_max_amount_add(*netuid, *limit_price) else { + return InvalidTransaction::Custom( + CustomTransactionError::ZeroMaxAmount.into(), + ) + .into(); + }; // Fully validate the user input Self::result_to_validity( @@ -2029,8 +2172,14 @@ where limit_price, allow_partial, }) => { - // Calcaulate the maximum amount that can be executed with price limit - let max_amount = Pallet::::get_max_amount_remove(*netuid, *limit_price); + // Calculate the maximum amount that can be executed with price limit + let Ok(max_amount) = Pallet::::get_max_amount_remove(*netuid, *limit_price) + else { + return InvalidTransaction::Custom( + CustomTransactionError::ZeroMaxAmount.into(), + ) + .into(); + }; // Fully validate the user input Self::result_to_validity( @@ -2045,6 +2194,112 @@ where Self::get_priority_staking(who, hotkey, *amount_unstaked), ) } + // Some(Call::add_stake_aggregate { + // hotkey, + // netuid, + // amount_staked, + // }) => { + // if ColdkeySwapScheduled::::contains_key(who) { + // return InvalidTransaction::Custom( + // CustomTransactionError::ColdkeyInSwapSchedule.into(), + // ) + // .into(); + // } + // // Fully validate the user input + // Self::result_to_validity( + // Pallet::::validate_add_stake( + // who, + // hotkey, + // *netuid, + // *amount_staked, + // *amount_staked, + // false, + // ), + // Self::get_priority_staking(who, hotkey, *amount_staked), + // ) + // } + // Some(Call::add_stake_limit_aggregate { + // hotkey, + // netuid, + // amount_staked, + // limit_price, + // allow_partial, + // }) => { + // if ColdkeySwapScheduled::::contains_key(who) { + // return InvalidTransaction::Custom( + // CustomTransactionError::ColdkeyInSwapSchedule.into(), + // ) + // .into(); + // } + // + // // Calculate the maximum amount that can be executed with price limit + // let Ok(max_amount) = Pallet::::get_max_amount_add(*netuid, *limit_price) else { + // return InvalidTransaction::Custom( + // CustomTransactionError::ZeroMaxAmount.into(), + // ) + // .into(); + // }; + // + // // Fully validate the user input + // Self::result_to_validity( + // Pallet::::validate_add_stake( + // who, + // hotkey, + // *netuid, + // *amount_staked, + // max_amount, + // *allow_partial, + // ), + // Self::get_priority_staking(who, hotkey, *amount_staked), + // ) + // } + // Some(Call::remove_stake_aggregate { + // hotkey, + // netuid, + // amount_unstaked, + // }) => { + // // Fully validate the user input + // Self::result_to_validity( + // Pallet::::validate_remove_stake( + // who, + // hotkey, + // *netuid, + // *amount_unstaked, + // *amount_unstaked, + // false, + // ), + // Self::get_priority_staking(who, hotkey, *amount_unstaked), + // ) + // } + // Some(Call::remove_stake_limit_aggregate { + // hotkey, + // netuid, + // amount_unstaked, + // limit_price, + // allow_partial, + // }) => { + // // Calculate the maximum amount that can be executed with price limit + // let Ok(max_amount) = Pallet::::get_max_amount_remove(*netuid, *limit_price) + // else { + // return InvalidTransaction::Custom( + // CustomTransactionError::ZeroMaxAmount.into(), + // ) + // .into(); + // }; + // + // // Fully validate the user input + // Self::result_to_validity( + // Pallet::::validate_remove_stake( + // who, + // hotkey, + // *netuid, + // *amount_unstaked, + // max_amount, + // *allow_partial, + // ), + // Self::get_priority_staking(who, hotkey, *amount_unstaked), + // ) + // } Some(Call::unstake_all { hotkey }) => { // Fully validate the user input Self::result_to_validity( @@ -2167,11 +2422,16 @@ where } // Get the max amount possible to exchange - let max_amount = Pallet::::get_max_amount_move( + let Ok(max_amount) = Pallet::::get_max_amount_move( *origin_netuid, *destination_netuid, *limit_price, - ); + ) else { + return InvalidTransaction::Custom( + CustomTransactionError::ZeroMaxAmount.into(), + ) + .into(); + }; // Fully validate the user input Self::result_to_validity( @@ -2446,3 +2706,11 @@ impl CollectiveInterface for () { Ok(true) } } + +/// Enum that defines types of rate limited operations for +/// storing last block when this operation occured +#[derive(Encode, Decode, Clone, PartialEq, Eq, Debug, TypeInfo)] +pub enum RateLimitKey { + // The setting sn owner hotkey operation is rate limited per netuid + SetSNOwnerHotkey(u16), +} diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index cf4d97b65b..4377d9f016 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -96,12 +96,18 @@ mod config { /// Initial bonds penalty. #[pallet::constant] type InitialBondsPenalty: Get; + /// Initial bonds reset. + #[pallet::constant] + type InitialBondsResetOn: Get; /// Initial target registrations per interval. #[pallet::constant] type InitialTargetRegistrationsPerInterval: Get; /// Rho constant. #[pallet::constant] type InitialRho: Get; + /// AlphaSigmoidSteepness constant. + #[pallet::constant] + type InitialAlphaSigmoidSteepness: Get; /// Kappa constant. #[pallet::constant] type InitialKappa: Get; @@ -195,12 +201,17 @@ mod config { /// A flag to indicate if Liquid Alpha is enabled. #[pallet::constant] type LiquidAlphaOn: Get; + /// A flag to indicate if Yuma3 is enabled. + type Yuma3On: Get; // /// Initial hotkey emission tempo. // #[pallet::constant] // type InitialHotkeyEmissionTempo: Get; /// Coldkey swap schedule duartion. #[pallet::constant] type InitialColdkeySwapScheduleDuration: Get>; + /// Coldkey swap reschedule duration. + #[pallet::constant] + type InitialColdkeySwapRescheduleDuration: Get>; /// Dissolve network schedule duration #[pallet::constant] type InitialDissolveNetworkScheduleDuration: Get>; diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 4ea03c957b..650fb50451 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -77,8 +77,8 @@ mod dispatches { /// * 'MaxWeightExceeded': /// - Attempting to set weights with max value exceeding limit. #[pallet::call_index(0)] - #[pallet::weight((Weight::from_parts(22_060_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4106)) + #[pallet::weight((Weight::from_parts(20_730_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4111)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn set_weights( origin: OriginFor, @@ -120,8 +120,8 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(80)] - #[pallet::weight((Weight::from_parts(22_060_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4106)) + #[pallet::weight((Weight::from_parts(105_100_000, 0) + .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, @@ -152,9 +152,9 @@ mod dispatches { /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. /// #[pallet::call_index(96)] - #[pallet::weight((Weight::from_parts(46_000_000, 0) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(72_300_000, 0) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_weights( origin: T::RuntimeOrigin, netuid: u16, @@ -186,8 +186,8 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(100)] - #[pallet::weight((Weight::from_parts(46_000_000, 0) - .saturating_add(T::DbWeight::get().reads(1)) + #[pallet::weight((Weight::from_parts(89_380_000, 0) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn batch_commit_weights( origin: OriginFor, @@ -235,9 +235,9 @@ mod dispatches { /// - The revealed hash does not match any committed hash. /// #[pallet::call_index(97)] - #[pallet::weight((Weight::from_parts(103_000_000, 0) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(3)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(122_000_000, 0) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn reveal_weights( origin: T::RuntimeOrigin, netuid: u16, @@ -279,9 +279,9 @@ mod dispatches { /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. /// #[pallet::call_index(99)] - #[pallet::weight((Weight::from_parts(46_000_000, 0) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(73_720_000, 0) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_crv3_weights( origin: T::RuntimeOrigin, netuid: u16, @@ -331,9 +331,9 @@ mod dispatches { /// * `InvalidInputLengths`: /// - The input vectors are of mismatched lengths. #[pallet::call_index(98)] - #[pallet::weight((Weight::from_parts(367_612_000, 0) - .saturating_add(T::DbWeight::get().reads(14)) - .saturating_add(T::DbWeight::get().writes(3)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(420_500_000, 0) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn batch_reveal_weights( origin: T::RuntimeOrigin, netuid: u16, @@ -413,9 +413,9 @@ mod dispatches { /// - Attempting to set weights with max value exceeding limit. /// #[pallet::call_index(8)] - #[pallet::weight((Weight::from_parts(10_151_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4104)) - .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(4_068_000, 0) + .saturating_add(T::DbWeight::get().reads(0)) + .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Normal, Pays::No))] pub fn set_tao_weights( _origin: OriginFor, _netuid: u16, @@ -454,9 +454,9 @@ mod dispatches { /// - The hotkey we are delegating is not owned by the calling coldket. /// #[pallet::call_index(1)] - #[pallet::weight((Weight::from_parts(79_000_000, 0) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(4_709_000, 0) + .saturating_add(T::DbWeight::get().reads(0)) + .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Normal, Pays::No))] pub fn become_delegate(_origin: OriginFor, _hotkey: T::AccountId) -> DispatchResult { // DEPRECATED // Self::do_become_delegate(origin, hotkey, Self::get_default_delegate_take()) @@ -498,7 +498,9 @@ mod dispatches { /// - The delegate is setting a take which is not lower than the previous. /// #[pallet::call_index(65)] - #[pallet::weight((0, DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(37_380_000, 0) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn decrease_take( origin: OriginFor, hotkey: T::AccountId, @@ -538,7 +540,9 @@ mod dispatches { /// - The delegate is setting a take which is not greater than the previous. /// #[pallet::call_index(66)] - #[pallet::weight((0, DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(44_630_000, 0) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn increase_take( origin: OriginFor, hotkey: T::AccountId, @@ -560,6 +564,9 @@ mod dispatches { /// * 'hotkey' (T::AccountId): /// - The associated hotkey account. /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// /// * 'amount_staked' (u64): /// - The amount of stake to be added to the hotkey staking account. /// @@ -578,9 +585,9 @@ mod dispatches { /// - Errors stemming from transaction pallet. /// #[pallet::call_index(2)] - #[pallet::weight((Weight::from_parts(124_000_000, 0) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(151_000_000, 0) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(10)), DispatchClass::Normal, Pays::No))] pub fn add_stake( origin: OriginFor, hotkey: T::AccountId, @@ -601,6 +608,9 @@ mod dispatches { /// * 'hotkey' (T::AccountId): /// - The associated hotkey account. /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// /// * 'amount_unstaked' (u64): /// - The amount of stake to be added to the hotkey staking account. /// @@ -619,10 +629,9 @@ mod dispatches { /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. /// #[pallet::call_index(3)] - #[pallet::weight((Weight::from_parts(111_000_000, 0) - .saturating_add(Weight::from_parts(0, 43991)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(196_800_000, 0) + .saturating_add(T::DbWeight::get().reads(19)) + .saturating_add(T::DbWeight::get().writes(10)), DispatchClass::Normal, Pays::No))] pub fn remove_stake( origin: OriginFor, hotkey: T::AccountId, @@ -684,7 +693,7 @@ mod dispatches { /// - Attempting to set prometheus information withing the rate limit min. /// #[pallet::call_index(4)] - #[pallet::weight((Weight::from_parts(46_000_000, 0) + #[pallet::weight((Weight::from_parts(35_670_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_axon( @@ -768,7 +777,7 @@ mod dispatches { /// - Attempting to set prometheus information withing the rate limit min. /// #[pallet::call_index(40)] - #[pallet::weight((Weight::from_parts(46_000_000, 0) + #[pallet::weight((Weight::from_parts(33_890_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_axon_tls( @@ -818,7 +827,7 @@ mod dispatches { /// - The ip type v4 or v6. /// #[pallet::call_index(5)] - #[pallet::weight((Weight::from_parts(45_000_000, 0) + #[pallet::weight((Weight::from_parts(31_170_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_prometheus( @@ -880,9 +889,9 @@ mod dispatches { /// - The seal is incorrect. /// #[pallet::call_index(6)] - #[pallet::weight((Weight::from_parts(192_000_000, 0) - .saturating_add(T::DbWeight::get().reads(24)) - .saturating_add(T::DbWeight::get().writes(22)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(216_200_000, 0) + .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().writes(23)), DispatchClass::Normal, Pays::No))] pub fn register( origin: OriginFor, netuid: u16, @@ -897,7 +906,7 @@ mod dispatches { /// Register the hotkey to root network #[pallet::call_index(62)] - #[pallet::weight((Weight::from_parts(164_000_000, 0) + #[pallet::weight((Weight::from_parts(145_500_000, 0) .saturating_add(T::DbWeight::get().reads(23)) .saturating_add(T::DbWeight::get().writes(20)), DispatchClass::Normal, Pays::No))] pub fn root_register(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { @@ -906,18 +915,18 @@ mod dispatches { /// Attempt to adjust the senate membership to include a hotkey #[pallet::call_index(63)] - #[pallet::weight((Weight::from_parts(0, 0) - .saturating_add(T::DbWeight::get().reads(0)) - .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Normal, Pays::Yes))] + #[pallet::weight((Weight::from_parts(68_100_000, 0) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)), DispatchClass::Normal, Pays::Yes))] pub fn adjust_senate(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_adjust_senate(origin, hotkey) } /// User register a new subnetwork via burning token #[pallet::call_index(7)] - #[pallet::weight((Weight::from_parts(177_000_000, 0) - .saturating_add(T::DbWeight::get().reads(26)) - .saturating_add(T::DbWeight::get().writes(24)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(219_400_000, 0) + .saturating_add(T::DbWeight::get().reads(33)) + .saturating_add(T::DbWeight::get().writes(29)), DispatchClass::Normal, Pays::No))] pub fn burned_register( origin: OriginFor, netuid: u16, @@ -928,9 +937,9 @@ mod dispatches { /// The extrinsic for user to change its hotkey #[pallet::call_index(70)] - #[pallet::weight((Weight::from_parts(1_940_000_000, 0) - .saturating_add(T::DbWeight::get().reads(272)) - .saturating_add(T::DbWeight::get().writes(527)), DispatchClass::Operational, Pays::No))] + #[pallet::weight((Weight::from_parts(240_600_000, 0) + .saturating_add(T::DbWeight::get().reads(31)) + .saturating_add(T::DbWeight::get().writes(23)), DispatchClass::Operational, Pays::No))] pub fn swap_hotkey( origin: OriginFor, hotkey: T::AccountId, @@ -955,10 +964,9 @@ mod dispatches { /// /// Weight is calculated based on the number of database reads and writes. #[pallet::call_index(71)] - #[pallet::weight((Weight::from_parts(127_713_000, 0) - .saturating_add(Weight::from_parts(0, 11645)) - .saturating_add(T::DbWeight::get().reads(18)) - .saturating_add(T::DbWeight::get().writes(12)), DispatchClass::Operational, Pays::No))] + #[pallet::weight((Weight::from_parts(179_500_000, 0) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(9)), DispatchClass::Operational, Pays::No))] pub fn swap_coldkey( origin: OriginFor, old_coldkey: T::AccountId, @@ -1003,8 +1011,8 @@ mod dispatches { /// #[pallet::call_index(75)] #[pallet::weight(( - Weight::from_parts(34_000, 0) - .saturating_add(T::DbWeight::get().reads(4)) + Weight::from_parts(49_470_000, 0) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::Yes @@ -1036,7 +1044,8 @@ mod dispatches { /// #[pallet::call_index(69)] #[pallet::weight(( - Weight::from_parts(6_000, 0) + Weight::from_parts(6_873_000, 0) + .saturating_add(T::DbWeight::get().reads(0)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, Pays::No @@ -1064,6 +1073,7 @@ mod dispatches { #[pallet::call_index(76)] #[pallet::weight(( Weight::from_parts(6_000, 0) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, Pays::No @@ -1088,6 +1098,7 @@ mod dispatches { #[pallet::call_index(77)] #[pallet::weight(( Weight::from_parts(6_000, 0) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, Pays::No @@ -1119,7 +1130,7 @@ mod dispatches { /// ## Complexity /// - O(1). #[pallet::call_index(51)] - #[pallet::weight((Weight::from_parts(0, 0), DispatchClass::Operational, Pays::No))] + #[pallet::weight((Weight::from_parts(111_100_000, 0), DispatchClass::Operational, Pays::No))] pub fn sudo( origin: OriginFor, call: Box, @@ -1166,8 +1177,7 @@ mod dispatches { /// User vote on a proposal #[pallet::call_index(55)] - #[pallet::weight((Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + #[pallet::weight((Weight::from_parts(111_100_000, 0) .saturating_add(T::DbWeight::get().reads(0)) .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Operational))] pub fn vote( @@ -1182,9 +1192,9 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] - #[pallet::weight((Weight::from_parts(157_000_000, 0) - .saturating_add(T::DbWeight::get().reads(16)) - .saturating_add(T::DbWeight::get().writes(30)), DispatchClass::Operational, Pays::No))] + #[pallet::weight((Weight::from_parts(260_500_000, 0) + .saturating_add(T::DbWeight::get().reads(33)) + .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Operational, Pays::No))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1316,18 +1326,23 @@ mod dispatches { /// - Consider adding checks to prevent scheduling too far into the future. /// TODO: Benchmark this call #[pallet::call_index(73)] - #[pallet::weight((Weight::from_parts(119_000_000, 0) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::Yes))] + #[pallet::weight((Weight::from_parts(44_520_000, 0) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Operational, Pays::Yes))] pub fn schedule_swap_coldkey( origin: OriginFor, new_coldkey: T::AccountId, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - ensure!( - !ColdkeySwapScheduled::::contains_key(&who), - Error::::SwapAlreadyScheduled - ); + let current_block = >::block_number(); + + // If the coldkey has a scheduled swap, check if we can reschedule it + if ColdkeySwapScheduled::::contains_key(&who) { + let (scheduled_block, _scheduled_coldkey) = ColdkeySwapScheduled::::get(&who); + let reschedule_duration = ColdkeySwapRescheduleDuration::::get(); + let redo_when = scheduled_block.saturating_add(reschedule_duration); + ensure!(redo_when <= current_block, Error::::SwapAlreadyScheduled); + } // Calculate the swap cost and ensure sufficient balance let swap_cost = Self::get_key_swap_cost(); @@ -1358,7 +1373,7 @@ mod dispatches { ) .map_err(|_| Error::::FailedToSchedule)?; - ColdkeySwapScheduled::::insert(&who, ()); + ColdkeySwapScheduled::::insert(&who, (when, new_coldkey.clone())); // Emit the SwapScheduled event Self::deposit_event(Event::ColdkeySwapScheduled { old_coldkey: who.clone(), @@ -1449,8 +1464,8 @@ mod dispatches { /// - The ip type v4 or v6. /// #[pallet::call_index(68)] - #[pallet::weight((Weight::from_parts(45_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4)) + #[pallet::weight((Weight::from_parts(32_340_000, 0) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::Yes))] pub fn set_identity( origin: OriginFor, @@ -1491,8 +1506,8 @@ mod dispatches { /// * `subnet_contact` (Vec): /// - The contact information for the subnet. #[pallet::call_index(78)] - #[pallet::weight((Weight::from_parts(45_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4)) + #[pallet::weight((Weight::from_parts(23_080_000, 0) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::Yes))] pub fn set_subnet_identity( origin: OriginFor, @@ -1520,9 +1535,9 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] - #[pallet::weight((Weight::from_parts(157_000_000, 0) - .saturating_add(T::DbWeight::get().reads(16)) - .saturating_add(T::DbWeight::get().writes(30)), DispatchClass::Operational, Pays::No))] + #[pallet::weight((Weight::from_parts(239_700_000, 0) + .saturating_add(T::DbWeight::get().reads(32)) + .saturating_add(T::DbWeight::get().writes(50)), DispatchClass::Operational, Pays::No))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, @@ -1557,7 +1572,9 @@ mod dispatches { /// * `TxRateLimitExceeded`: /// - Thrown if key has hit transaction rate limit #[pallet::call_index(83)] - #[pallet::weight((Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, Pays::No))] + #[pallet::weight((Weight::from_parts(36_200_000, 0) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Operational, Pays::No))] pub fn unstake_all(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_unstake_all(origin, hotkey) } @@ -1588,7 +1605,9 @@ mod dispatches { /// * `TxRateLimitExceeded`: /// - Thrown if key has hit transaction rate limit #[pallet::call_index(84)] - #[pallet::weight((Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, Pays::No))] + #[pallet::weight((Weight::from_parts(68_730_000, 0) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(6)), DispatchClass::Operational, Pays::No))] pub fn unstake_all_alpha(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_unstake_all_alpha(origin, hotkey) } @@ -1615,7 +1634,9 @@ mod dispatches { /// - The alpha stake amount to move. /// #[pallet::call_index(85)] - #[pallet::weight((Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, Pays::No))] + #[pallet::weight((Weight::from_parts(196_600_000, 0) + .saturating_add(T::DbWeight::get().reads(17)) + .saturating_add(T::DbWeight::get().writes(13)), DispatchClass::Operational, Pays::No))] pub fn move_stake( origin: T::RuntimeOrigin, origin_hotkey: T::AccountId, @@ -1656,7 +1677,9 @@ mod dispatches { /// # Events /// May emit a `StakeTransferred` event on success. #[pallet::call_index(86)] - #[pallet::weight((Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, Pays::No))] + #[pallet::weight((Weight::from_parts(207_300_000, 0) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(13)), DispatchClass::Operational, Pays::No))] pub fn transfer_stake( origin: T::RuntimeOrigin, destination_coldkey: T::AccountId, @@ -1696,7 +1719,9 @@ mod dispatches { /// May emit a `StakeSwapped` event on success. #[pallet::call_index(87)] #[pallet::weight(( - Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().writes(1)), + Weight::from_parts(221_600_000, 0) + .saturating_add(T::DbWeight::get().reads(25)) + .saturating_add(T::DbWeight::get().writes(16)), DispatchClass::Operational, Pays::No ))] @@ -1731,6 +1756,9 @@ mod dispatches { /// * 'hotkey' (T::AccountId): /// - The associated hotkey account. /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// /// * 'amount_staked' (u64): /// - The amount of stake to be added to the hotkey staking account. /// @@ -1756,9 +1784,9 @@ mod dispatches { /// - Errors stemming from transaction pallet. /// #[pallet::call_index(88)] - #[pallet::weight((Weight::from_parts(124_000_000, 0) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(159_200_000, 0) + .saturating_add(T::DbWeight::get().reads(13)) + .saturating_add(T::DbWeight::get().writes(10)), DispatchClass::Normal, Pays::No))] pub fn add_stake_limit( origin: OriginFor, hotkey: T::AccountId, @@ -1792,6 +1820,9 @@ mod dispatches { /// * 'hotkey' (T::AccountId): /// - The associated hotkey account. /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// /// * 'amount_unstaked' (u64): /// - The amount of stake to be added to the hotkey staking account. /// @@ -1817,10 +1848,9 @@ mod dispatches { /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. /// #[pallet::call_index(89)] - #[pallet::weight((Weight::from_parts(111_000_000, 0) - .saturating_add(Weight::from_parts(0, 43991)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(192_600_000, 0) + .saturating_add(T::DbWeight::get().reads(18)) + .saturating_add(T::DbWeight::get().writes(10)), DispatchClass::Normal, Pays::No))] pub fn remove_stake_limit( origin: OriginFor, hotkey: T::AccountId, @@ -1862,7 +1892,9 @@ mod dispatches { /// May emit a `StakeSwapped` event on success. #[pallet::call_index(90)] #[pallet::weight(( - Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().writes(1)), + Weight::from_parts(232_000_000, 0) + .saturating_add(T::DbWeight::get().reads(25)) + .saturating_add(T::DbWeight::get().writes(16)), DispatchClass::Operational, Pays::No ))] @@ -1896,7 +1928,7 @@ mod dispatches { /// Will charge based on the weight even if the hotkey is already associated with a coldkey. #[pallet::call_index(91)] #[pallet::weight(( - Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(3, 3)), + Weight::from_parts(32_560_000, 0).saturating_add(T::DbWeight::get().reads_writes(3, 3)), DispatchClass::Operational, Pays::Yes ))] @@ -1921,7 +1953,7 @@ mod dispatches { /// Emits a `FirstEmissionBlockNumberSet` event on success. #[pallet::call_index(92)] #[pallet::weight(( - Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(6, 1)), + Weight::from_parts(35_770_000, 0).saturating_add(T::DbWeight::get().reads_writes(4, 2)), DispatchClass::Operational, Pays::Yes ))] @@ -1961,7 +1993,7 @@ mod dispatches { #[pallet::weight(( Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(2, 1)), DispatchClass::Operational, - Pays::Yes + Pays::No ))] pub fn associate_evm_key( origin: T::RuntimeOrigin, @@ -1986,7 +2018,7 @@ mod dispatches { /// Emits a `TokensRecycled` event on success. #[pallet::call_index(101)] #[pallet::weight(( - Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(3, 2)), + Weight::from_parts(101_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(7, 4)), DispatchClass::Operational, Pays::Yes ))] @@ -2011,7 +2043,7 @@ mod dispatches { /// Emits a `TokensBurned` event on success. #[pallet::call_index(102)] #[pallet::weight(( - Weight::from_parts(2_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(2, 1)), + Weight::from_parts(98_010_000, 0).saturating_add(T::DbWeight::get().reads_writes(7, 3)), DispatchClass::Operational, Pays::Yes ))] @@ -2023,5 +2055,289 @@ mod dispatches { ) -> DispatchResult { Self::do_burn_alpha(origin, hotkey, amount, netuid) } + + // /// --- Adds stake to a hotkey on a subnet with a price limit. + // /// This extrinsic allows to specify the limit price for alpha token + // /// at which or better (lower) the staking should execute. + // /// + // /// In case if slippage occurs and the price shall move beyond the limit + // /// price, the staking order may execute only partially or not execute + // /// at all. + // /// + // /// The operation will be delayed. + // /// + // /// # Args: + // /// * 'origin': (Origin): + // /// - The signature of the caller's coldkey. + // /// + // /// * 'hotkey' (T::AccountId): + // /// - The associated hotkey account. + // /// + // /// * 'netuid' (u16): + // /// - Subnetwork UID + // /// + // /// * 'amount_staked' (u64): + // /// - The amount of stake to be added to the hotkey staking account. + // /// + // /// # Event: + // /// * StakeAdded; + // /// - On the successfully adding stake to a global account. + // /// + // /// # Raises: + // /// * 'NotEnoughBalanceToStake': + // /// - Not enough balance on the coldkey to add onto the global account. + // /// + // /// * 'NonAssociatedColdKey': + // /// - The calling coldkey is not associated with this hotkey. + // /// + // /// * 'BalanceWithdrawalError': + // /// - Errors stemming from transaction pallet. + // /// + // #[pallet::call_index(103)] + // #[pallet::weight((Weight::from_parts(162_000_000, 5127) + // .saturating_add(T::DbWeight::get().reads(15_u64)) + // .saturating_add(T::DbWeight::get().writes(12_u64)), DispatchClass::Normal, Pays::No))] + // pub fn add_stake_aggregate( + // origin: OriginFor, + // hotkey: T::AccountId, + // netuid: u16, + // amount_staked: u64, + // ) -> DispatchResult { + // Self::do_add_stake_aggregate(origin, hotkey, netuid, amount_staked) + // } + + // /// --- Removes stake from a hotkey on a subnet with a price limit. + // /// This extrinsic allows to specify the limit price for alpha token + // /// at which or better (higher) the staking should execute. + // /// + // /// In case if slippage occurs and the price shall move beyond the limit + // /// price, the staking order may execute only partially or not execute + // /// at all. + // /// + // /// The operation will be delayed. + // /// + // /// # Args: + // /// * 'origin': (Origin): + // /// - The signature of the caller's coldkey. + // /// + // /// * 'hotkey' (T::AccountId): + // /// - The associated hotkey account. + // /// + // /// * 'netuid' (u16): + // /// - Subnetwork UID + // /// + // /// * 'amount_unstaked' (u64): + // /// - The amount of stake to be added to the hotkey staking account. + // /// + // /// # Event: + // /// * StakeRemoved; + // /// - On the successfully removing stake from the hotkey account. + // /// + // /// # Raises: + // /// * 'NotRegistered': + // /// - Thrown if the account we are attempting to unstake from is non existent. + // /// + // /// * 'NonAssociatedColdKey': + // /// - Thrown if the coldkey does not own the hotkey we are unstaking from. + // /// + // /// * 'NotEnoughStakeToWithdraw': + // /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. + // /// + // #[pallet::call_index(104)] + // #[pallet::weight((Weight::from_parts(213_300_000, 10163) + // .saturating_add(T::DbWeight::get().reads(20_u64)) + // .saturating_add(T::DbWeight::get().writes(12_u64)), DispatchClass::Normal, Pays::No))] + // pub fn remove_stake_aggregate( + // origin: OriginFor, + // hotkey: T::AccountId, + // netuid: u16, + // amount_unstaked: u64, + // ) -> DispatchResult { + // Self::do_remove_stake_aggregate(origin, hotkey, netuid, amount_unstaked) + // } + + // /// --- Adds stake to a hotkey on a subnet with a price limit. + // /// This extrinsic allows to specify the limit price for alpha token + // /// at which or better (lower) the staking should execute. + // /// + // /// In case if slippage occurs and the price shall move beyond the limit + // /// price, the staking order may execute only partially or not execute + // /// at all. + // /// + // /// The operation will be delayed. + // /// + // /// # Args: + // /// * 'origin': (Origin): + // /// - The signature of the caller's coldkey. + // /// + // /// * 'hotkey' (T::AccountId): + // /// - The associated hotkey account. + // /// + // /// * 'netuid' (u16): + // /// - Subnetwork UID + // /// + // /// * 'amount_staked' (u64): + // /// - The amount of stake to be added to the hotkey staking account. + // /// + // /// * 'limit_price' (u64): + // /// - The limit price expressed in units of RAO per one Alpha. + // /// + // /// * 'allow_partial' (bool): + // /// - Allows partial execution of the amount. If set to false, this becomes + // /// fill or kill type or order. + // /// + // /// # Event: + // /// * StakeAdded; + // /// - On the successfully adding stake to a global account. + // /// + // /// # Raises: + // /// * 'NotEnoughBalanceToStake': + // /// - Not enough balance on the coldkey to add onto the global account. + // /// + // /// * 'NonAssociatedColdKey': + // /// - The calling coldkey is not associated with this hotkey. + // /// + // /// * 'BalanceWithdrawalError': + // /// - Errors stemming from transaction pallet. + // /// + // #[pallet::call_index(105)] + // #[pallet::weight((Weight::from_parts(169_200_000, 5127) + // .saturating_add(T::DbWeight::get().reads(14_u64)) + // .saturating_add(T::DbWeight::get().writes(12_u64)), DispatchClass::Normal, Pays::No))] + // pub fn add_stake_limit_aggregate( + // origin: OriginFor, + // hotkey: T::AccountId, + // netuid: u16, + // amount_staked: u64, + // limit_price: u64, + // allow_partial: bool, + // ) -> DispatchResult { + // Self::do_add_stake_limit_aggregate( + // origin, + // hotkey, + // netuid, + // amount_staked, + // limit_price, + // allow_partial, + // ) + // } + + // /// --- Removes stake from a hotkey on a subnet with a price limit. + // /// This extrinsic allows to specify the limit price for alpha token + // /// at which or better (higher) the staking should execute. + // /// + // /// In case if slippage occurs and the price shall move beyond the limit + // /// price, the staking order may execute only partially or not execute + // /// at all. + // /// + // /// The operation will be delayed. + // /// + // /// # Args: + // /// * 'origin': (Origin): + // /// - The signature of the caller's coldkey. + // /// + // /// * 'hotkey' (T::AccountId): + // /// - The associated hotkey account. + // /// + // /// * 'netuid' (u16): + // /// - Subnetwork UID + // /// + // /// * 'amount_unstaked' (u64): + // /// - The amount of stake to be added to the hotkey staking account. + // /// + // /// * 'limit_price' (u64): + // /// - The limit price expressed in units of RAO per one Alpha. + // /// + // /// * 'allow_partial' (bool): + // /// - Allows partial execution of the amount. If set to false, this becomes + // /// fill or kill type or order. + // /// + // /// # Event: + // /// * StakeRemoved; + // /// - On the successfully removing stake from the hotkey account. + // /// + // /// # Raises: + // /// * 'NotRegistered': + // /// - Thrown if the account we are attempting to unstake from is non existent. + // /// + // /// * 'NonAssociatedColdKey': + // /// - Thrown if the coldkey does not own the hotkey we are unstaking from. + // /// + // /// * 'NotEnoughStakeToWithdraw': + // /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. + // /// + // #[pallet::call_index(106)] + // #[pallet::weight((Weight::from_parts(211_700_000, 10163) + // .saturating_add(T::DbWeight::get().reads(19_u64)) + // .saturating_add(T::DbWeight::get().writes(12_u64)), DispatchClass::Normal, Pays::No))] + // pub fn remove_stake_limit_aggregate( + // origin: OriginFor, + // hotkey: T::AccountId, + // netuid: u16, + // amount_unstaked: u64, + // limit_price: u64, + // allow_partial: bool, + // ) -> DispatchResult { + // Self::do_remove_stake_limit_aggregate( + // origin, + // hotkey, + // netuid, + // amount_unstaked, + // limit_price, + // allow_partial, + // ) + // } + + // /// ---- The implementation for the extrinsic unstake_all_aggregate: Removes all stake from a hotkey account across all subnets and adds it onto a coldkey. + // /// + // /// The operation will be delayed. + // /// + // /// # Args: + // /// * `origin` - (::Origin): + // /// - The signature of the caller's coldkey. + // /// + // /// * `hotkey` (T::AccountId): + // /// - The associated hotkey account. + // /// + // /// # Event: + // /// * StakeRemoved; + // /// - On the successfully removing stake from the hotkey account. + // /// + // /// # Raises: + // /// * `NotRegistered`: + // /// - Thrown if the account we are attempting to unstake from is non existent. + // /// + // /// * `NonAssociatedColdKey`: + // /// - Thrown if the coldkey does not own the hotkey we are unstaking from. + // /// + // /// * `NotEnoughStakeToWithdraw`: + // /// - Thrown if there is not enough stake on the hotkey to withdraw this amount. + // /// + // /// * `TxRateLimitExceeded`: + // /// - Thrown if key has hit transaction rate limit + // #[pallet::call_index(107)] + // #[pallet::weight((Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, Pays::No))] + // pub fn unstake_all_aggregate(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { + // Self::do_unstake_all_aggregate(origin, hotkey) + // } + + // /// ---- The implementation for the extrinsic unstake_all_alpha_aggregate: Removes all stake from a hotkey account across all subnets and adds it onto a coldkey. + // /// + // /// The operation will be delayed. + // /// + // /// # Args: + // /// * `origin` - (::Origin): + // /// - The signature of the caller's coldkey. + // /// + // /// * `hotkey` (T::AccountId): + // /// - The associated hotkey account. + // #[pallet::call_index(108)] + // #[pallet::weight((Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, Pays::No))] + // pub fn unstake_all_alpha_aggregate( + // origin: OriginFor, + // hotkey: T::AccountId, + // ) -> DispatchResult { + // Self::do_unstake_all_alpha_aggregate(origin, hotkey) + // } } } diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 052b15d5e6..2a8e5bc346 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -4,6 +4,7 @@ use frame_support::pallet_macros::pallet_section; /// This can later be imported into the pallet using [`import_section`]. #[pallet_section] mod errors { + #[derive(PartialEq)] #[pallet::error] pub enum Error { /// The subnet does not exist. @@ -207,5 +208,11 @@ mod errors { UnableToRecoverPublicKey, /// Recovered public key is invalid. InvalidRecoveredPublicKey, + /// SubToken disabled now + SubtokenDisabled, + /// Zero max stake amount + ZeroMaxStakeAmount, + /// Invalid netuid duplication + SameNetuid, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 8c2e863d0e..9849a517ee 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -17,6 +17,30 @@ mod events { StakeAdded(T::AccountId, T::AccountId, u64, u64, u16, u64), /// stake has been removed from the hotkey staking account onto the coldkey account. StakeRemoved(T::AccountId, T::AccountId, u64, u64, u16, u64), + /// stake has been transferred from the coldkey account onto the hotkey staking account (at the end of the block) + AggregatedStakeAdded(T::AccountId, T::AccountId, u16, u64), + /// adding aggregated stake has failed + FailedToAddAggregatedStake(T::AccountId, T::AccountId, u16, u64), + /// limited stake has been transferred from the coldkey account onto the hotkey staking account (at the end of the block) + AggregatedLimitedStakeAdded(T::AccountId, T::AccountId, u16, u64, u64, bool), + /// adding limited aggregated stake has failed + FailedToAddAggregatedLimitedStake(T::AccountId, T::AccountId, u16, u64, u64, bool), + /// stake has been removed from the hotkey staking account into the coldkey account (at the end of the block). + AggregatedStakeRemoved(T::AccountId, T::AccountId, u16, u64), + /// removing aggregated stake has failed + FailedToRemoveAggregatedStake(T::AccountId, T::AccountId, u16, u64), + /// aggregated limited stake has been removed from the hotkey staking account into the coldkey account (at the end of the block). + AggregatedLimitedStakeRemoved(T::AccountId, T::AccountId, u16, u64, u64, bool), + /// removing limited aggregated stake has failed + FailedToRemoveAggregatedLimitedStake(T::AccountId, T::AccountId, u16, u64, u64, bool), + /// aggregated unstake_all operation has succeeded + AggregatedUnstakeAllSucceeded(T::AccountId, T::AccountId), + /// aggregated unstake_all operation has failed + AggregatedUnstakeAllFailed(T::AccountId, T::AccountId), + /// aggregated unstake_all_alpha operation has succeeded + AggregatedUnstakeAllAlphaSucceeded(T::AccountId, T::AccountId), + /// aggregated unstake_all_alpha operation has failed + AggregatedUnstakeAllAlphaFailed(T::AccountId, T::AccountId), /// stake has been moved from origin (hotkey, subnet ID) to destination (hotkey, subnet ID) of this amount (in TAO). StakeMoved(T::AccountId, T::AccountId, u16, T::AccountId, u16, u64), /// a caller successfully sets their weights on a subnetwork. @@ -43,6 +67,8 @@ mod events { ActivityCutoffSet(u16, u16), /// Rho value is set. RhoSet(u16, u16), + /// steepness of the sigmoid used to compute alpha values. + AlphaSigmoidSteepnessSet(u16, u16), /// Kappa is set for a subnet. KappaSet(u16, u16), /// minimum allowed weight is set for a subnet. @@ -59,6 +85,8 @@ mod events { BondsMovingAverageSet(u16, u64), /// bonds penalty is set for a subnet. BondsPenaltySet(u16, u16), + /// bonds reset is set for a subnet. + BondsResetOnSet(u16, bool), /// setting the max number of allowed validators on a subnet. MaxAllowedValidatorsSet(u16, u16), /// the axon server information is added to the network. diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 49fc4ccfe5..a0e2fc6e72 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -34,6 +34,15 @@ mod hooks { } } + // ---- Called on the finalization of this pallet. The code weight must be taken into account prior to the execution of this macro. + // + // # Args: + // * 'n': (BlockNumberFor): + // - The number of the block we are finalizing. + fn on_finalize(_block_number: BlockNumberFor) { + // Self::do_on_finalize(block_number); + } + fn on_runtime_upgrade() -> frame_support::weights::Weight { // --- Migrate storage let mut weight = frame_support::weights::Weight::from_parts(0, 0); @@ -87,7 +96,26 @@ mod hooks { // Remove all zero value entries in TotalHotkeyAlpha .saturating_add(migrations::migrate_remove_zero_total_hotkey_alpha::migrate_remove_zero_total_hotkey_alpha::()) // Wipe existing items to prevent bad decoding for new type - .saturating_add(migrations::migrate_upgrade_revealed_commitments::migrate_upgrade_revealed_commitments::()); + .saturating_add(migrations::migrate_upgrade_revealed_commitments::migrate_upgrade_revealed_commitments::()) + // Set subtoken enabled for all existed subnets + .saturating_add(migrations::migrate_set_subtoken_enabled::migrate_set_subtoken_enabled::()) + // Remove all entries in TotalHotkeyColdkeyStakesThisInterval + .saturating_add(migrations::migrate_remove_total_hotkey_coldkey_stakes_this_interval::migrate_remove_total_hotkey_coldkey_stakes_this_interval::()) + // Wipe the deprecated RateLimit storage item in the commitments pallet + .saturating_add(migrations::migrate_remove_commitments_rate_limit::migrate_remove_commitments_rate_limit::()); + + weight + // Remove all entries in orphaned storage items + .saturating_add( + migrations::migrate_orphaned_storage_items::migrate_orphaned_storage_items::( + ), + ) + // Reset bonds moving average + .saturating_add(migrations::migrate_reset_bonds_moving_average::migrate_reset_bonds_moving_average::()) + // Reset max burn + .saturating_add(migrations::migrate_reset_max_burn::migrate_reset_max_burn::()) + // Migrate ColdkeySwapScheduled structure to new format + .saturating_add(migrations::migrate_coldkey_swap_scheduled::migrate_coldkey_swap_scheduled::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_coldkey_swap_scheduled.rs b/pallets/subtensor/src/migrations/migrate_coldkey_swap_scheduled.rs new file mode 100644 index 0000000000..e15f468ddc --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_coldkey_swap_scheduled.rs @@ -0,0 +1,78 @@ +use super::*; +use crate::AccountIdOf; +use frame_support::{ + pallet_prelude::{Blake2_128Concat, ValueQuery}, + storage_alias, + traits::Get, + weights::Weight, +}; +pub use frame_system::pallet_prelude::BlockNumberFor; +use scale_info::prelude::string::String; +/// Module containing deprecated storage format for LoadedEmission +pub mod deprecated_coldkey_swap_scheduled_format { + use super::*; + + #[storage_alias] + pub(super) type ColdkeySwapScheduled = + StorageMap, Blake2_128Concat, AccountIdOf, (), ValueQuery>; +} + +/// Migrate the ColdkeySwapScheduled map to the new storage format +pub fn migrate_coldkey_swap_scheduled() -> Weight { + use deprecated_coldkey_swap_scheduled_format as old; + + let migration_name = b"migrate_coldkey_swap_scheduled".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Migrate ColdkeySwapScheduled map + // ------------------------------ + + let curr_keys: Vec> = old::ColdkeySwapScheduled::::iter_keys().collect(); + + // Remove any undecodable entries + for coldkey in &curr_keys { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + if old::ColdkeySwapScheduled::::try_get(coldkey).is_err() { + old::ColdkeySwapScheduled::::remove(coldkey); + log::warn!( + "Was unable to decode old coldkey_swap_scheduled for coldkey {:?}", + &coldkey + ); + } + } + + let default_value = DefaultColdkeySwapScheduled::::get(); + ColdkeySwapScheduled::::translate::<(), _>(|_coldkey: AccountIdOf, _: ()| { + Some((default_value.0, default_value.1.clone())) + }); + // write once for each item in the map, no matter remove or translate + weight.saturating_accrue(T::DbWeight::get().writes(curr_keys.len() as u64)); + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_orphaned_storage_items.rs b/pallets/subtensor/src/migrations/migrate_orphaned_storage_items.rs new file mode 100644 index 0000000000..db00a4c440 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_orphaned_storage_items.rs @@ -0,0 +1,68 @@ +use super::*; +use frame_support::weights::Weight; + +pub fn migrate_orphaned_storage_items() -> Weight { + remove_last_hotkey_coldkey_emission_on_netuid::() + .saturating_add(remove_subnet_alpha_emission_sell::()) + .saturating_add(remove_neurons_to_prune_at_next_epoch::()) + .saturating_add(remove_total_stake_at_dynamic::()) + .saturating_add(remove_subnet_name::()) + .saturating_add(remove_network_min_allowed_uids::()) + .saturating_add(remove_dynamic_block::()) +} + +pub(crate) fn remove_last_hotkey_coldkey_emission_on_netuid() -> Weight { + let migration_name = "migrate_remove_last_hotkey_coldkey_emission_on_netuid"; + let pallet_name = "SubtensorModule"; + let storage_name = "LastHotkeyColdkeyEmissionOnNetuid"; + + migrate_storage::(migration_name, pallet_name, storage_name) +} + +pub(crate) fn remove_subnet_alpha_emission_sell() -> Weight { + let migration_name = "migrate_remove_subnet_alpha_emission_sell"; + let pallet_name = "SubtensorModule"; + let storage_name = "SubnetAlphaEmissionSell"; + + migrate_storage::(migration_name, pallet_name, storage_name) +} + +pub(crate) fn remove_neurons_to_prune_at_next_epoch() -> Weight { + let migration_name = "migrate_remove_neurons_to_prune_at_next_epoch"; + let pallet_name = "SubtensorModule"; + let storage_name = "NeuronsToPruneAtNextEpoch"; + + migrate_storage::(migration_name, pallet_name, storage_name) +} + +pub(crate) fn remove_total_stake_at_dynamic() -> Weight { + let migration_name = "migrate_remove_total_stake_at_dynamic"; + let pallet_name = "SubtensorModule"; + let storage_name = "TotalStakeAtDynamic"; + + migrate_storage::(migration_name, pallet_name, storage_name) +} + +pub(crate) fn remove_subnet_name() -> Weight { + let migration_name = "migrate_remove_subnet_name"; + let pallet_name = "SubtensorModule"; + let storage_name = "SubnetName"; + + migrate_storage::(migration_name, pallet_name, storage_name) +} + +pub(crate) fn remove_network_min_allowed_uids() -> Weight { + let migration_name = "migrate_remove_network_min_allowed_uids"; + let pallet_name = "SubtensorModule"; + let storage_name = "NetworkMinAllowedUids"; + + migrate_storage::(migration_name, pallet_name, storage_name) +} + +pub(crate) fn remove_dynamic_block() -> Weight { + let migration_name = "migrate_remove_dynamic_block"; + let pallet_name = "SubtensorModule"; + let storage_name = "DynamicBlock"; + + migrate_storage::(migration_name, pallet_name, storage_name) +} diff --git a/pallets/subtensor/src/migrations/migrate_rao.rs b/pallets/subtensor/src/migrations/migrate_rao.rs index 623faa1661..975ab55c11 100644 --- a/pallets/subtensor/src/migrations/migrate_rao.rs +++ b/pallets/subtensor/src/migrations/migrate_rao.rs @@ -30,9 +30,6 @@ pub fn migrate_rao() -> Weight { .collect(); weight = weight.saturating_add(T::DbWeight::get().reads_writes(netuids.len() as u64, 0)); - // Set the Dynamic block. - DynamicBlock::::set(Pallet::::get_current_block_as_u64()); - // Migrate all TAO to root. // This migration has already run, leaving this only for reference for now, since this is a recent migration // Stake::::iter().for_each(|(hotkey, coldkey, stake)| { @@ -106,7 +103,6 @@ pub fn migrate_rao() -> Weight { // Set the token symbol for this subnet using Self instead of Pallet:: TokenSymbol::::insert(netuid, Pallet::::get_symbol_for_subnet(*netuid)); - TotalStakeAtDynamic::::insert(netuid, 0); if let Ok(owner_coldkey) = SubnetOwner::::try_get(netuid) { // Set Owner as the coldkey. diff --git a/pallets/subtensor/src/migrations/migrate_remove_commitments_rate_limit.rs b/pallets/subtensor/src/migrations/migrate_remove_commitments_rate_limit.rs new file mode 100644 index 0000000000..b32d4edc9f --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_remove_commitments_rate_limit.rs @@ -0,0 +1,54 @@ +use super::*; +use crate::HasMigrationRun; +use frame_support::{traits::Get, weights::Weight}; +use scale_info::prelude::string::String; +use sp_io::{KillStorageResult, hashing::twox_128, storage::clear_prefix}; + +pub fn migrate_remove_commitments_rate_limit() -> Weight { + let migration_name = b"migrate_remove_commitments_rate_limit".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------------------------------------- + // Step 1: Remove all entries under the `RateLimit` storage key + // ------------------------------------------------------------- + let mut rate_limit_prefix = Vec::new(); + rate_limit_prefix.extend_from_slice(&twox_128("Commitments".as_bytes())); + rate_limit_prefix.extend_from_slice(&twox_128("RateLimit".as_bytes())); + + let removal_result = clear_prefix(&rate_limit_prefix, Some(u32::MAX)); + let removed_entries = match removal_result { + KillStorageResult::AllRemoved(removed) => removed as u64, + KillStorageResult::SomeRemaining(removed) => { + log::warn!("Failed to remove some `RateLimit` entries."); + removed as u64 + } + }; + + weight = weight.saturating_add(T::DbWeight::get().writes(removed_entries)); + log::info!("Removed {} entries from `RateLimit`.", removed_entries); + + // ------------------------------------------------------------- + // Step 2: Mark this migration as completed + // ------------------------------------------------------------- + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_remove_total_hotkey_coldkey_stakes_this_interval.rs b/pallets/subtensor/src/migrations/migrate_remove_total_hotkey_coldkey_stakes_this_interval.rs new file mode 100644 index 0000000000..16782a5d8c --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_remove_total_hotkey_coldkey_stakes_this_interval.rs @@ -0,0 +1,51 @@ +use super::*; +use crate::HasMigrationRun; +use frame_support::{traits::Get, weights::Weight}; +use sp_io::{KillStorageResult, hashing::twox_128, storage::clear_prefix}; + +pub fn migrate_remove_total_hotkey_coldkey_stakes_this_interval() -> Weight { + let migration_name = "migrate_remove_total_hotkey_coldkey_stakes_this_interval"; + let migration_name_bytes = migration_name.as_bytes().to_vec(); + + let mut weight = T::DbWeight::get().reads(1); + if HasMigrationRun::::get(&migration_name_bytes) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!("Running migration '{}'", migration_name); + + let pallet_name = twox_128(b"SubtensorModule"); + let storage_name = twox_128(b"TotalHotkeyColdkeyStakesThisInterval"); + let prefix = [pallet_name, storage_name].concat(); + + // Remove all entries. + let removed_entries_count = match clear_prefix(&prefix, Some(u32::MAX)) { + KillStorageResult::AllRemoved(removed) => { + log::info!("Removed all entries from {:?}.", storage_name); + + // Mark migration as completed + HasMigrationRun::::insert(&migration_name_bytes, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + removed as u64 + } + KillStorageResult::SomeRemaining(removed) => { + log::info!("Failed to remove all entries from {:?}", storage_name); + removed as u64 + } + }; + + weight = weight.saturating_add(T::DbWeight::get().writes(removed_entries_count as u64)); + + log::info!( + "Migration '{:?}' completed successfully. {:?} entries removed.", + migration_name, + removed_entries_count + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_reset_bonds_moving_average.rs b/pallets/subtensor/src/migrations/migrate_reset_bonds_moving_average.rs new file mode 100644 index 0000000000..5bb442af18 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_reset_bonds_moving_average.rs @@ -0,0 +1,59 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; + +pub fn migrate_reset_bonds_moving_average() -> Weight { + let migration_name = b"migrate_reset_bonds_moving_average".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + // ------------------------------ + // Step 0: Check if already run + // ------------------------------ + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Reset all subnet's BondsMovingAverage to 975000 if the value exceeds 975000 + // ------------------------------ + + let mut reset_entries_count = 0u64; + + for netuid in BondsMovingAverage::::iter_keys() { + BondsMovingAverage::::mutate(netuid, |average| { + *average = (*average).min(975000); + }); + reset_entries_count = reset_entries_count.saturating_add(1); + } + + weight = weight + .saturating_add(T::DbWeight::get().reads_writes(reset_entries_count, reset_entries_count)); + + log::info!( + "Reset {} subnets from BondsMovingAverage.", + reset_entries_count + ); + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_reset_max_burn.rs b/pallets/subtensor/src/migrations/migrate_reset_max_burn.rs new file mode 100644 index 0000000000..a2662f7de3 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_reset_max_burn.rs @@ -0,0 +1,56 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; + +pub fn migrate_reset_max_burn() -> Weight { + let migration_name = b"migrate_reset_max_burn".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + // ------------------------------ + // Step 0: Check if already run + // ------------------------------ + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Reset all subnet's MaxBurn to 100 TAO + // ------------------------------ + + let mut reset_entries_count = 0u64; + + for netuid in MaxBurn::::iter_keys() { + MaxBurn::::mutate(netuid, |max| { + *max = 100_000_000_000; + }); + reset_entries_count = reset_entries_count.saturating_add(1); + } + + weight = weight + .saturating_add(T::DbWeight::get().reads_writes(reset_entries_count, reset_entries_count)); + + log::info!("Reset {} subnets from MaxBurn.", reset_entries_count); + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_set_subtoken_enabled.rs b/pallets/subtensor/src/migrations/migrate_set_subtoken_enabled.rs new file mode 100644 index 0000000000..da4386c4b8 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_set_subtoken_enabled.rs @@ -0,0 +1,54 @@ +use super::*; +use crate::HasMigrationRun; +use frame_support::{traits::Get, weights::Weight}; +use scale_info::prelude::string::String; + +pub fn migrate_set_subtoken_enabled() -> Weight { + let migration_name = b"migrate_set_subtoken_enabled".to_vec(); + + let mut weight = T::DbWeight::get().reads(1); + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + log::info!( + "Running migration '{:?}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Set the subnet token enabled for all subnets except new subnet + // ------------------------------ + let netuids = Pallet::::get_all_subnet_netuids(); + for netuid in netuids.iter() { + if *netuid != 0 { + // set it as true if start call executed and value exists for first emission block number + SubtokenEnabled::::insert( + netuid, + FirstEmissionBlockNumber::::get(netuid).is_some(), + ); + } else { + SubtokenEnabled::::insert(netuid, true); + } + } + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + + HasMigrationRun::::insert(&migration_name, true); + + weight = + weight.saturating_add(T::DbWeight::get().writes((netuids.len() as u64).saturating_add(1))); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index 23fb3cde1f..5c6347034f 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -1,5 +1,10 @@ use super::*; +use frame_support::pallet_prelude::Weight; +use sp_io::KillStorageResult; +use sp_io::hashing::twox_128; +use sp_io::storage::clear_prefix; pub mod migrate_chain_identity; +pub mod migrate_coldkey_swap_scheduled; pub mod migrate_commit_reveal_v2; pub mod migrate_create_root_network; pub mod migrate_delete_subnet_21; @@ -7,14 +12,20 @@ pub mod migrate_delete_subnet_3; pub mod migrate_fix_is_network_member; pub mod migrate_identities_v2; pub mod migrate_init_total_issuance; +pub mod migrate_orphaned_storage_items; pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; +pub mod migrate_remove_commitments_rate_limit; pub mod migrate_remove_stake_map; +pub mod migrate_remove_total_hotkey_coldkey_stakes_this_interval; pub mod migrate_remove_unused_maps_and_values; pub mod migrate_remove_zero_total_hotkey_alpha; +pub mod migrate_reset_bonds_moving_average; +pub mod migrate_reset_max_burn; pub mod migrate_set_first_emission_block_number; pub mod migrate_set_min_burn; pub mod migrate_set_min_difficulty; +pub mod migrate_set_subtoken_enabled; pub mod migrate_stake_threshold; pub mod migrate_subnet_volume; pub mod migrate_to_v1_separate_emission; @@ -22,3 +33,53 @@ pub mod migrate_to_v2_fixed_total_stake; pub mod migrate_total_issuance; pub mod migrate_transfer_ownership_to_foundation; pub mod migrate_upgrade_revealed_commitments; + +pub(crate) fn migrate_storage( + migration_name: &'static str, + pallet_name: &'static str, + storage_name: &'static str, +) -> Weight { + let migration_name_bytes = migration_name.as_bytes().to_vec(); + + let mut weight = T::DbWeight::get().reads(1); + if HasMigrationRun::::get(&migration_name_bytes) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!("Running migration '{}'", migration_name); + + let pallet_name = twox_128(pallet_name.as_bytes()); + let storage_name = twox_128(storage_name.as_bytes()); + let prefix = [pallet_name, storage_name].concat(); + + // Remove all entries. + let removed_entries_count = match clear_prefix(&prefix, Some(u32::MAX)) { + KillStorageResult::AllRemoved(removed) => { + log::info!("Removed all entries from {:?}.", storage_name); + + // Mark migration as completed + HasMigrationRun::::insert(&migration_name_bytes, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + removed as u64 + } + KillStorageResult::SomeRemaining(removed) => { + log::info!("Failed to remove all entries from {:?}", storage_name); + removed as u64 + } + }; + + weight = weight.saturating_add(T::DbWeight::get().writes(removed_entries_count as u64)); + + log::info!( + "Migration '{:?}' completed successfully. {:?} entries removed.", + migration_name, + removed_entries_count + ); + + weight +} diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index fe6ab0ed90..04325fef4d 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -213,7 +213,7 @@ where { pub fn merge_value(&mut self, other: &Self, metagraph_index: usize) { match SelectiveMetagraphIndex::from_index(metagraph_index) { - // Name and symbol + Some(SelectiveMetagraphIndex::Netuid) => self.netuid = other.netuid, Some(SelectiveMetagraphIndex::Name) => self.name = other.name.clone(), Some(SelectiveMetagraphIndex::Symbol) => self.symbol = other.symbol.clone(), Some(SelectiveMetagraphIndex::Identity) => self.identity = other.identity.clone(), @@ -450,6 +450,7 @@ where } pub enum SelectiveMetagraphIndex { + Netuid, Name, Symbol, Identity, @@ -526,77 +527,78 @@ pub enum SelectiveMetagraphIndex { impl SelectiveMetagraphIndex { fn from_index(index: usize) -> Option { match index { - 0 => Some(SelectiveMetagraphIndex::Name), - 1 => Some(SelectiveMetagraphIndex::Symbol), - 2 => Some(SelectiveMetagraphIndex::Identity), - 3 => Some(SelectiveMetagraphIndex::NetworkRegisteredAt), - 4 => Some(SelectiveMetagraphIndex::OwnerHotkey), - 5 => Some(SelectiveMetagraphIndex::OwnerColdkey), - 6 => Some(SelectiveMetagraphIndex::Block), - 7 => Some(SelectiveMetagraphIndex::Tempo), - 8 => Some(SelectiveMetagraphIndex::LastStep), - 9 => Some(SelectiveMetagraphIndex::BlocksSinceLastStep), - 10 => Some(SelectiveMetagraphIndex::SubnetEmission), - 11 => Some(SelectiveMetagraphIndex::AlphaIn), - 12 => Some(SelectiveMetagraphIndex::AlphaOut), - 13 => Some(SelectiveMetagraphIndex::TaoIn), - 14 => Some(SelectiveMetagraphIndex::AlphaOutEmission), - 15 => Some(SelectiveMetagraphIndex::AlphaInEmission), - 16 => Some(SelectiveMetagraphIndex::TaoInEmission), - 17 => Some(SelectiveMetagraphIndex::PendingAlphaEmission), - 18 => Some(SelectiveMetagraphIndex::PendingRootEmission), - 19 => Some(SelectiveMetagraphIndex::SubnetVolume), - 20 => Some(SelectiveMetagraphIndex::MovingPrice), - 21 => Some(SelectiveMetagraphIndex::Rho), - 22 => Some(SelectiveMetagraphIndex::Kappa), - 23 => Some(SelectiveMetagraphIndex::MinAllowedWeights), - 24 => Some(SelectiveMetagraphIndex::MaxWeightsLimit), - 25 => Some(SelectiveMetagraphIndex::WeightsVersion), - 26 => Some(SelectiveMetagraphIndex::WeightsRateLimit), - 27 => Some(SelectiveMetagraphIndex::ActivityCutoff), - 28 => Some(SelectiveMetagraphIndex::MaxValidators), - 29 => Some(SelectiveMetagraphIndex::NumUids), - 30 => Some(SelectiveMetagraphIndex::MaxUids), - 31 => Some(SelectiveMetagraphIndex::Burn), - 32 => Some(SelectiveMetagraphIndex::Difficulty), - 33 => Some(SelectiveMetagraphIndex::RegistrationAllowed), - 34 => Some(SelectiveMetagraphIndex::PowRegistrationAllowed), - 35 => Some(SelectiveMetagraphIndex::ImmunityPeriod), - 36 => Some(SelectiveMetagraphIndex::MinDifficulty), - 37 => Some(SelectiveMetagraphIndex::MaxDifficulty), - 38 => Some(SelectiveMetagraphIndex::MinBurn), - 39 => Some(SelectiveMetagraphIndex::MaxBurn), - 40 => Some(SelectiveMetagraphIndex::AdjustmentAlpha), - 41 => Some(SelectiveMetagraphIndex::AdjustmentInterval), - 42 => Some(SelectiveMetagraphIndex::TargetRegsPerInterval), - 43 => Some(SelectiveMetagraphIndex::MaxRegsPerBlock), - 44 => Some(SelectiveMetagraphIndex::ServingRateLimit), - 45 => Some(SelectiveMetagraphIndex::CommitRevealWeightsEnabled), - 46 => Some(SelectiveMetagraphIndex::CommitRevealPeriod), - 47 => Some(SelectiveMetagraphIndex::LiquidAlphaEnabled), - 48 => Some(SelectiveMetagraphIndex::AlphaHigh), - 49 => Some(SelectiveMetagraphIndex::AlphaLow), - 50 => Some(SelectiveMetagraphIndex::BondsMovingAvg), - 51 => Some(SelectiveMetagraphIndex::Hotkeys), - 52 => Some(SelectiveMetagraphIndex::Coldkeys), - 53 => Some(SelectiveMetagraphIndex::Identities), - 54 => Some(SelectiveMetagraphIndex::Axons), - 55 => Some(SelectiveMetagraphIndex::Active), - 56 => Some(SelectiveMetagraphIndex::ValidatorPermit), - 57 => Some(SelectiveMetagraphIndex::PruningScore), - 58 => Some(SelectiveMetagraphIndex::LastUpdate), - 59 => Some(SelectiveMetagraphIndex::Emission), - 60 => Some(SelectiveMetagraphIndex::Dividends), - 61 => Some(SelectiveMetagraphIndex::Incentives), - 62 => Some(SelectiveMetagraphIndex::Consensus), - 63 => Some(SelectiveMetagraphIndex::Trust), - 64 => Some(SelectiveMetagraphIndex::Rank), - 65 => Some(SelectiveMetagraphIndex::BlockAtRegistration), - 66 => Some(SelectiveMetagraphIndex::AlphaStake), - 67 => Some(SelectiveMetagraphIndex::TaoStake), - 68 => Some(SelectiveMetagraphIndex::TotalStake), - 69 => Some(SelectiveMetagraphIndex::TaoDividendsPerHotkey), - 70 => Some(SelectiveMetagraphIndex::AlphaDividendsPerHotkey), + 0 => Some(SelectiveMetagraphIndex::Netuid), + 1 => Some(SelectiveMetagraphIndex::Name), + 2 => Some(SelectiveMetagraphIndex::Symbol), + 3 => Some(SelectiveMetagraphIndex::Identity), + 4 => Some(SelectiveMetagraphIndex::NetworkRegisteredAt), + 5 => Some(SelectiveMetagraphIndex::OwnerHotkey), + 6 => Some(SelectiveMetagraphIndex::OwnerColdkey), + 7 => Some(SelectiveMetagraphIndex::Block), + 8 => Some(SelectiveMetagraphIndex::Tempo), + 9 => Some(SelectiveMetagraphIndex::LastStep), + 10 => Some(SelectiveMetagraphIndex::BlocksSinceLastStep), + 11 => Some(SelectiveMetagraphIndex::SubnetEmission), + 12 => Some(SelectiveMetagraphIndex::AlphaIn), + 13 => Some(SelectiveMetagraphIndex::AlphaOut), + 14 => Some(SelectiveMetagraphIndex::TaoIn), + 15 => Some(SelectiveMetagraphIndex::AlphaOutEmission), + 16 => Some(SelectiveMetagraphIndex::AlphaInEmission), + 17 => Some(SelectiveMetagraphIndex::TaoInEmission), + 18 => Some(SelectiveMetagraphIndex::PendingAlphaEmission), + 19 => Some(SelectiveMetagraphIndex::PendingRootEmission), + 20 => Some(SelectiveMetagraphIndex::SubnetVolume), + 21 => Some(SelectiveMetagraphIndex::MovingPrice), + 22 => Some(SelectiveMetagraphIndex::Rho), + 23 => Some(SelectiveMetagraphIndex::Kappa), + 24 => Some(SelectiveMetagraphIndex::MinAllowedWeights), + 25 => Some(SelectiveMetagraphIndex::MaxWeightsLimit), + 26 => Some(SelectiveMetagraphIndex::WeightsVersion), + 27 => Some(SelectiveMetagraphIndex::WeightsRateLimit), + 28 => Some(SelectiveMetagraphIndex::ActivityCutoff), + 29 => Some(SelectiveMetagraphIndex::MaxValidators), + 30 => Some(SelectiveMetagraphIndex::NumUids), + 31 => Some(SelectiveMetagraphIndex::MaxUids), + 32 => Some(SelectiveMetagraphIndex::Burn), + 33 => Some(SelectiveMetagraphIndex::Difficulty), + 34 => Some(SelectiveMetagraphIndex::RegistrationAllowed), + 35 => Some(SelectiveMetagraphIndex::PowRegistrationAllowed), + 36 => Some(SelectiveMetagraphIndex::ImmunityPeriod), + 37 => Some(SelectiveMetagraphIndex::MinDifficulty), + 38 => Some(SelectiveMetagraphIndex::MaxDifficulty), + 39 => Some(SelectiveMetagraphIndex::MinBurn), + 40 => Some(SelectiveMetagraphIndex::MaxBurn), + 41 => Some(SelectiveMetagraphIndex::AdjustmentAlpha), + 42 => Some(SelectiveMetagraphIndex::AdjustmentInterval), + 43 => Some(SelectiveMetagraphIndex::TargetRegsPerInterval), + 44 => Some(SelectiveMetagraphIndex::MaxRegsPerBlock), + 45 => Some(SelectiveMetagraphIndex::ServingRateLimit), + 46 => Some(SelectiveMetagraphIndex::CommitRevealWeightsEnabled), + 47 => Some(SelectiveMetagraphIndex::CommitRevealPeriod), + 48 => Some(SelectiveMetagraphIndex::LiquidAlphaEnabled), + 49 => Some(SelectiveMetagraphIndex::AlphaHigh), + 50 => Some(SelectiveMetagraphIndex::AlphaLow), + 51 => Some(SelectiveMetagraphIndex::BondsMovingAvg), + 52 => Some(SelectiveMetagraphIndex::Hotkeys), + 53 => Some(SelectiveMetagraphIndex::Coldkeys), + 54 => Some(SelectiveMetagraphIndex::Identities), + 55 => Some(SelectiveMetagraphIndex::Axons), + 56 => Some(SelectiveMetagraphIndex::Active), + 57 => Some(SelectiveMetagraphIndex::ValidatorPermit), + 58 => Some(SelectiveMetagraphIndex::PruningScore), + 59 => Some(SelectiveMetagraphIndex::LastUpdate), + 60 => Some(SelectiveMetagraphIndex::Emission), + 61 => Some(SelectiveMetagraphIndex::Dividends), + 62 => Some(SelectiveMetagraphIndex::Incentives), + 63 => Some(SelectiveMetagraphIndex::Consensus), + 64 => Some(SelectiveMetagraphIndex::Trust), + 65 => Some(SelectiveMetagraphIndex::Rank), + 66 => Some(SelectiveMetagraphIndex::BlockAtRegistration), + 67 => Some(SelectiveMetagraphIndex::AlphaStake), + 68 => Some(SelectiveMetagraphIndex::TaoStake), + 69 => Some(SelectiveMetagraphIndex::TotalStake), + 70 => Some(SelectiveMetagraphIndex::TaoDividendsPerHotkey), + 71 => Some(SelectiveMetagraphIndex::AlphaDividendsPerHotkey), _ => None, } } @@ -724,7 +726,7 @@ impl Pallet { coldkeys, // coldkey per UID axons, // Axon information per UID. identities, - active: Active::::get(netuid), // Avtive per UID + active: Active::::get(netuid), // Active per UID validator_permit: ValidatorPermit::::get(netuid), // Val permit per UID pruning_score: PruningScores::::get(netuid) .into_iter() @@ -808,6 +810,10 @@ impl Pallet { ) -> SelectiveMetagraph { match SelectiveMetagraphIndex::from_index(metagraph_index as usize) { // Name and symbol + Some(SelectiveMetagraphIndex::Netuid) => SelectiveMetagraph { + netuid: netuid.into(), + ..Default::default() + }, Some(SelectiveMetagraphIndex::Name) => SelectiveMetagraph { netuid: netuid.into(), name: Some( @@ -1166,7 +1172,7 @@ impl Pallet { }, Some(SelectiveMetagraphIndex::ValidatorPermit) => SelectiveMetagraph { netuid: netuid.into(), - active: Some(ValidatorPermit::::get(netuid)), + validator_permit: Some(ValidatorPermit::::get(netuid)), ..Default::default() }, @@ -1451,11 +1457,11 @@ fn test_selective_metagraph() { metagraph.merge_value(&metagraph_name, wrong_index); assert!(metagraph.name.is_none()); - let name_index: usize = 0; + let name_index: usize = 1; metagraph.merge_value(&metagraph_name, name_index); assert!(metagraph.name.is_some()); - let alph_low_index: usize = 49; + let alph_low_index: usize = 50; let metagraph_alpha_low = SelectiveMetagraph:: { netuid: 0_u16.into(), alpha_low: Some(0_u16.into()), diff --git a/pallets/subtensor/src/staking/add_stake.rs b/pallets/subtensor/src/staking/add_stake.rs index e599262cef..8886806cd9 100644 --- a/pallets/subtensor/src/staking/add_stake.rs +++ b/pallets/subtensor/src/staking/add_stake.rs @@ -11,6 +11,9 @@ impl Pallet { /// * 'hotkey' (T::AccountId): /// - The associated hotkey account. /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// /// * 'stake_to_be_added' (u64): /// - The amount of stake to be added to the hotkey staking account. /// @@ -47,6 +50,8 @@ impl Pallet { stake_to_be_added ); + Self::ensure_subtoken_enabled(netuid)?; + // 2. Validate user input Self::validate_add_stake( &coldkey, @@ -76,6 +81,154 @@ impl Pallet { Ok(()) } + /// ---- The implementation for the extrinsic add_stake_aggregate: Adds stake to a hotkey account. + /// The operation will be delayed until the end of the block. + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The associated hotkey account. + /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// + /// * 'stake_to_be_added' (u64): + /// - The amount of stake to be added to the hotkey staking account. + /// + /// # Event: + /// * StakeAdded; + /// - On the successfully adding stake to a global account. + /// + /// # Raises: + /// * 'NotEnoughBalanceToStake': + /// - Not enough balance on the coldkey to add onto the global account. + /// + /// * 'NonAssociatedColdKey': + /// - The calling coldkey is not associated with this hotkey. + /// + /// * 'BalanceWithdrawalError': + /// - Errors stemming from transaction pallet. + /// + /// * 'TxRateLimitExceeded': + /// - Thrown if key has hit transaction rate limit + /// + pub fn do_add_stake_aggregate( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + netuid: u16, + stake_to_be_added: u64, + ) -> dispatch::DispatchResult { + // We check that the transaction is signed by the caller and retrieve the T::AccountId coldkey information. + let coldkey = ensure_signed(origin)?; + + // Consider the weight from on_finalize + if cfg!(feature = "runtime-benchmarks") && !cfg!(test) { + Self::do_add_stake( + crate::dispatch::RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone(), + netuid, + stake_to_be_added, + )?; + } + + // Save the staking job for the on_finalize + let stake_job = StakeJob::AddStake { + hotkey, + coldkey, + netuid, + stake_to_be_added, + }; + + let stake_job_id = NextStakeJobId::::get(); + let current_blocknumber = >::block_number(); + + StakeJobs::::insert(current_blocknumber, stake_job_id, stake_job); + NextStakeJobId::::set(stake_job_id.saturating_add(1)); + + Ok(()) + } + + /// ---- The implementation for the extrinsic add_stake_limit_aggregate: Adds stake to a hotkey + /// account on a subnet with price limit. The operation will be delayed until the end of the + /// block. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The associated hotkey account. + /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// + /// * 'stake_to_be_added' (u64): + /// - The amount of stake to be added to the hotkey staking account. + /// + /// * 'limit_price' (u64): + /// - The limit price expressed in units of RAO per one Alpha. + /// + /// * 'allow_partial' (bool): + /// - Allows partial execution of the amount. If set to false, this becomes + /// fill or kill type or order. + /// + /// # Event: + /// * StakeAdded; + /// - On the successfully adding stake to a global account. + /// + /// # Raises: + /// * 'NotEnoughBalanceToStake': + /// - Not enough balance on the coldkey to add onto the global account. + /// + /// * 'NonAssociatedColdKey': + /// - The calling coldkey is not associated with this hotkey. + /// + /// * 'BalanceWithdrawalError': + /// - Errors stemming from transaction pallet. + /// + /// * 'TxRateLimitExceeded': + /// - Thrown if key has hit transaction rate limit + /// + pub fn do_add_stake_limit_aggregate( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + netuid: u16, + stake_to_be_added: u64, + limit_price: u64, + allow_partial: bool, + ) -> dispatch::DispatchResult { + let coldkey = ensure_signed(origin)?; + + if cfg!(feature = "runtime-benchmarks") && !cfg!(test) { + Self::do_add_stake_limit( + crate::dispatch::RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone(), + netuid, + stake_to_be_added, + limit_price, + allow_partial, + )?; + } + + let stake_job = StakeJob::AddStakeLimit { + hotkey, + coldkey, + netuid, + stake_to_be_added, + limit_price, + allow_partial, + }; + + let stake_job_id = NextStakeJobId::::get(); + let current_blocknumber = >::block_number(); + + StakeJobs::::insert(current_blocknumber, stake_job_id, stake_job); + NextStakeJobId::::set(stake_job_id.saturating_add(1)); + + Ok(()) + } + /// ---- The implementation for the extrinsic add_stake_limit: Adds stake to a hotkey /// account on a subnet with price limit. /// @@ -86,6 +239,9 @@ impl Pallet { /// * 'hotkey' (T::AccountId): /// - The associated hotkey account. /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// /// * 'stake_to_be_added' (u64): /// - The amount of stake to be added to the hotkey staking account. /// @@ -131,8 +287,8 @@ impl Pallet { stake_to_be_added ); - // 2. Calcaulate the maximum amount that can be executed with price limit - let max_amount = Self::get_max_amount_add(netuid, limit_price); + // 2. Calculate the maximum amount that can be executed with price limit + let max_amount = Self::get_max_amount_add(netuid, limit_price)?; let mut possible_stake = stake_to_be_added; if possible_stake > max_amount { possible_stake = max_amount; @@ -173,29 +329,29 @@ impl Pallet { } // Returns the maximum amount of RAO that can be executed with price limit - pub fn get_max_amount_add(netuid: u16, limit_price: u64) -> u64 { + pub fn get_max_amount_add(netuid: u16, limit_price: u64) -> Result> { // Corner case: root and stao // There's no slippage for root or stable subnets, so if limit price is 1e9 rao or // higher, then max_amount equals u64::MAX, otherwise it is 0. if (netuid == Self::get_root_netuid()) || (SubnetMechanism::::get(netuid)) == 0 { if limit_price >= 1_000_000_000 { - return u64::MAX; + return Ok(u64::MAX); } else { - return 0; + return Err(Error::ZeroMaxStakeAmount); } } // Corner case: SubnetAlphaIn is zero. Staking can't happen, so max amount is zero. let alpha_in = SubnetAlphaIn::::get(netuid); if alpha_in == 0 { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let alpha_in_u128 = alpha_in as u128; // Corner case: SubnetTAO is zero. Staking can't happen, so max amount is zero. let tao_reserve = SubnetTAO::::get(netuid); if tao_reserve == 0 { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let tao_reserve_u128 = tao_reserve as u128; @@ -208,7 +364,7 @@ impl Pallet { .saturating_mul(tao)) || (limit_price == 0u64) { - return 0; + return Err(Error::ZeroMaxStakeAmount); } // Main case: return limit_price * SubnetAlphaIn - SubnetTAO @@ -219,10 +375,15 @@ impl Pallet { .checked_div(tao) .unwrap_or(0) .saturating_sub(tao_reserve_u128); + + if result == 0 { + return Err(Error::ZeroMaxStakeAmount); + } + if result < u64::MAX as u128 { - result as u64 + Ok(result as u64) } else { - u64::MAX + Ok(u64::MAX) } } } diff --git a/pallets/subtensor/src/staking/move_stake.rs b/pallets/subtensor/src/staking/move_stake.rs index 4198d29efc..ef576f3607 100644 --- a/pallets/subtensor/src/staking/move_stake.rs +++ b/pallets/subtensor/src/staking/move_stake.rs @@ -311,7 +311,7 @@ impl Pallet { ) -> Result> { // Calculate the maximum amount that can be executed let max_amount = if let Some(limit_price) = maybe_limit_price { - Self::get_max_amount_move(origin_netuid, destination_netuid, limit_price) + Self::get_max_amount_move(origin_netuid, destination_netuid, limit_price)? } else { alpha_amount }; @@ -401,7 +401,7 @@ impl Pallet { origin_netuid: u16, destination_netuid: u16, limit_price: u64, - ) -> u64 { + ) -> Result> { let tao: U64F64 = U64F64::saturating_from_num(1_000_000_000); // Corner case: both subnet IDs are root or stao @@ -413,9 +413,9 @@ impl Pallet { || (SubnetMechanism::::get(destination_netuid)) == 0) { if limit_price > tao.saturating_to_num::() { - return 0; + return Err(Error::ZeroMaxStakeAmount); } else { - return u64::MAX; + return Ok(u64::MAX); } } @@ -426,7 +426,7 @@ impl Pallet { && ((SubnetMechanism::::get(destination_netuid)) == 1) { if limit_price == 0 { - return u64::MAX; + return Ok(u64::MAX); } else { // The destination price is reverted because the limit_price is origin_price / destination_price let destination_subnet_price = tao @@ -450,7 +450,7 @@ impl Pallet { let subnet_tao_1 = SubnetTAO::::get(origin_netuid); let subnet_tao_2 = SubnetTAO::::get(destination_netuid); if (subnet_tao_1 == 0) || (subnet_tao_2 == 0) { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let subnet_tao_1_float: U64F64 = U64F64::saturating_from_num(subnet_tao_1); let subnet_tao_2_float: U64F64 = U64F64::saturating_from_num(subnet_tao_2); @@ -459,7 +459,7 @@ impl Pallet { let alpha_in_1 = SubnetAlphaIn::::get(origin_netuid); let alpha_in_2 = SubnetAlphaIn::::get(destination_netuid); if (alpha_in_1 == 0) || (alpha_in_2 == 0) { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let alpha_in_1_float: U64F64 = U64F64::saturating_from_num(alpha_in_1); let alpha_in_2_float: U64F64 = U64F64::saturating_from_num(alpha_in_2); @@ -474,12 +474,12 @@ impl Pallet { let current_price = Self::get_alpha_price(origin_netuid) .safe_div(Self::get_alpha_price(destination_netuid)); if limit_price_float > current_price { - return 0; + return Err(Error::ZeroMaxStakeAmount); } // Corner case: limit_price is zero if limit_price == 0 { - return u64::MAX; + return Ok(u64::MAX); } // Main case @@ -491,10 +491,16 @@ impl Pallet { let t1_over_sum: U64F64 = subnet_tao_1_float.safe_div(tao_sum); let t2_over_sum: U64F64 = subnet_tao_2_float.safe_div(tao_sum); - alpha_in_2_float + let final_result = alpha_in_2_float .saturating_mul(t1_over_sum) .safe_div(limit_price_float) .saturating_sub(alpha_in_1_float.saturating_mul(t2_over_sum)) - .saturating_to_num::() + .saturating_to_num::(); + + if final_result != 0 { + Ok(final_result) + } else { + Err(Error::ZeroMaxStakeAmount) + } } } diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs index b5e6762e6a..ef3cea2b29 100644 --- a/pallets/subtensor/src/staking/recycle_alpha.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -32,6 +32,8 @@ impl Pallet { Error::::CannotBurnOrRecycleOnRootSubnet ); + Self::ensure_subtoken_enabled(netuid)?; + // Ensure that the hotkey account exists this is only possible through registration. ensure!( Self::hotkey_account_exists(&hotkey), @@ -99,6 +101,8 @@ impl Pallet { Error::::CannotBurnOrRecycleOnRootSubnet ); + Self::ensure_subtoken_enabled(netuid)?; + // Ensure that the hotkey account exists this is only possible through registration. ensure!( Self::hotkey_account_exists(&hotkey), diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index 842e9d0b9d..378ea8001b 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -11,6 +11,9 @@ impl Pallet { /// * 'hotkey' (T::AccountId): /// - The associated hotkey account. /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// /// * 'stake_to_be_added' (u64): /// - The amount of stake to be added to the hotkey staking account. /// @@ -47,6 +50,8 @@ impl Pallet { alpha_unstaked ); + Self::ensure_subtoken_enabled(netuid)?; + // 2. Validate the user input Self::validate_remove_stake( &coldkey, @@ -85,6 +90,74 @@ impl Pallet { Ok(()) } + /// ---- The implementation for the extrinsic remove_stake_aggregate: Removes stake from a hotkey account and adds it onto a coldkey. + /// The operation will be delayed until the end of the block. + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The associated hotkey account. + /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// + /// * 'stake_to_be_added' (u64): + /// - The amount of stake to be added to the hotkey staking account. + /// + /// # Event: + /// * StakeRemoved; + /// - On the successfully removing stake from the hotkey account. + /// + /// # Raises: + /// * 'NotRegistered': + /// - Thrown if the account we are attempting to unstake from is non existent. + /// + /// * 'NonAssociatedColdKey': + /// - Thrown if the coldkey does not own the hotkey we are unstaking from. + /// + /// * 'NotEnoughStakeToWithdraw': + /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. + /// + /// * 'TxRateLimitExceeded': + /// - Thrown if key has hit transaction rate limit + /// + pub fn do_remove_stake_aggregate( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + netuid: u16, + alpha_unstaked: u64, + ) -> dispatch::DispatchResult { + // We check the transaction is signed by the caller and retrieve the T::AccountId coldkey information. + let coldkey = ensure_signed(origin)?; + + // Consider the weight from on_finalize + if cfg!(feature = "runtime-benchmarks") && !cfg!(test) { + Self::do_remove_stake( + crate::dispatch::RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone(), + netuid, + alpha_unstaked, + )?; + } + + // Save the staking job for the on_finalize + let stake_job = StakeJob::RemoveStake { + hotkey, + coldkey, + netuid, + alpha_unstaked, + }; + + let stake_job_id = NextStakeJobId::::get(); + let current_blocknumber = >::block_number(); + + StakeJobs::::insert(current_blocknumber, stake_job_id, stake_job); + NextStakeJobId::::set(stake_job_id.saturating_add(1)); + + Ok(()) + } + /// ---- The implementation for the extrinsic unstake_all: Removes all stake from a hotkey account across all subnets and adds it onto a coldkey. /// /// # Args: @@ -131,6 +204,9 @@ impl Pallet { // 4. Iterate through all subnets and remove stake. for netuid in netuids.into_iter() { + if !SubtokenEnabled::::get(netuid) { + continue; + } // Ensure that the hotkey has enough stake to withdraw. let alpha_unstaked = Self::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); @@ -174,6 +250,41 @@ impl Pallet { Ok(()) } + /// ---- The implementation for the extrinsic unstake_all_aggregate: Removes all stake from a hotkey account across all subnets and adds it onto a coldkey. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The associated hotkey account. + pub fn do_unstake_all_aggregate( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + ) -> dispatch::DispatchResult { + // We check the transaction is signed by the caller and retrieve the T::AccountId coldkey information. + let coldkey = ensure_signed(origin)?; + + // Consider the weight from on_finalize + if cfg!(feature = "runtime-benchmarks") && !cfg!(test) { + Self::do_unstake_all( + crate::dispatch::RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone(), + )?; + } + + // Save the unstake_all job for the on_finalize + let stake_job = StakeJob::UnstakeAll { hotkey, coldkey }; + + let stake_job_id = NextStakeJobId::::get(); + let current_blocknumber = >::block_number(); + + StakeJobs::::insert(current_blocknumber, stake_job_id, stake_job); + NextStakeJobId::::set(stake_job_id.saturating_add(1)); + + Ok(()) + } + /// ---- The implementation for the extrinsic unstake_all: Removes all stake from a hotkey account across all subnets and adds it onto a coldkey. /// /// # Args: @@ -221,6 +332,9 @@ impl Pallet { // 4. Iterate through all subnets and remove stake. let mut total_tao_unstaked: u64 = 0; for netuid in netuids.into_iter() { + if !SubtokenEnabled::::get(netuid) { + continue; + } // If not Root network. if netuid != Self::get_root_netuid() { // Ensure that the hotkey has enough stake to withdraw. @@ -276,6 +390,41 @@ impl Pallet { Ok(()) } + /// ---- The implementation for the extrinsic unstake_all_alpha_aggregate: Removes all stake from a hotkey account across all subnets and adds it onto a coldkey. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The associated hotkey account. + pub fn do_unstake_all_alpha_aggregate( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + ) -> dispatch::DispatchResult { + // We check the transaction is signed by the caller and retrieve the T::AccountId coldkey information. + let coldkey = ensure_signed(origin)?; + + // Consider the weight from on_finalize + if cfg!(feature = "runtime-benchmarks") && !cfg!(test) { + Self::do_unstake_all_alpha( + crate::dispatch::RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone(), + )?; + } + + // Save the unstake_all_alpha job for the on_finalize + let stake_job = StakeJob::UnstakeAllAlpha { hotkey, coldkey }; + + let stake_job_id = NextStakeJobId::::get(); + let current_blocknumber = >::block_number(); + + StakeJobs::::insert(current_blocknumber, stake_job_id, stake_job); + NextStakeJobId::::set(stake_job_id.saturating_add(1)); + + Ok(()) + } + /// ---- The implementation for the extrinsic remove_stake_limit: Removes stake from /// a hotkey on a subnet with a price limit. /// @@ -290,6 +439,9 @@ impl Pallet { /// * 'hotkey' (T::AccountId): /// - The associated hotkey account. /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// /// * 'amount_unstaked' (u64): /// - The amount of stake to be added to the hotkey staking account. /// @@ -332,8 +484,8 @@ impl Pallet { alpha_unstaked ); - // 2. Calcaulate the maximum amount that can be executed with price limit - let max_amount = Self::get_max_amount_remove(netuid, limit_price); + // 2. Calculate the maximum amount that can be executed with price limit + let max_amount = Self::get_max_amount_remove(netuid, limit_price)?; let mut possible_alpha = alpha_unstaked; if possible_alpha > max_amount { possible_alpha = max_amount; @@ -377,37 +529,121 @@ impl Pallet { Ok(()) } + /// ---- The implementation for the extrinsic remove_stake_limit_aggregate: Removes stake from + /// a hotkey on a subnet with a price limit. + /// + /// In case if slippage occurs and the price shall move beyond the limit + /// price, the staking order may execute only partially or not execute + /// at all. + /// + /// The operation will be delayed until the end of the block. + /// + /// # Args: + /// * 'origin': (Origin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The associated hotkey account. + /// + /// * 'netuid' (u16): + /// - Subnetwork UID + /// + /// * 'amount_unstaked' (u64): + /// - The amount of stake to be added to the hotkey staking account. + /// + /// * 'limit_price' (u64): + /// - The limit price expressed in units of RAO per one Alpha. + /// + /// * 'allow_partial' (bool): + /// - Allows partial execution of the amount. If set to false, this becomes + /// fill or kill type or order. + /// + /// # Event: + /// * StakeRemoved; + /// - On the successfully removing stake from the hotkey account. + /// + /// # Raises: + /// * 'NotRegistered': + /// - Thrown if the account we are attempting to unstake from is non existent. + /// + /// * 'NonAssociatedColdKey': + /// - Thrown if the coldkey does not own the hotkey we are unstaking from. + /// + /// * 'NotEnoughStakeToWithdraw': + /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. + /// + pub fn do_remove_stake_limit_aggregate( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + netuid: u16, + alpha_unstaked: u64, + limit_price: u64, + allow_partial: bool, + ) -> dispatch::DispatchResult { + let coldkey = ensure_signed(origin)?; + + // Consider the weight from on_finalize + if cfg!(feature = "runtime-benchmarks") && !cfg!(test) { + Self::do_remove_stake_limit( + crate::dispatch::RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone(), + netuid, + alpha_unstaked, + limit_price, + allow_partial, + )?; + } + + let stake_job = StakeJob::RemoveStakeLimit { + hotkey, + coldkey, + netuid, + alpha_unstaked, + limit_price, + allow_partial, + }; + + let stake_job_id = NextStakeJobId::::get(); + let current_blocknumber = >::block_number(); + + StakeJobs::::insert(current_blocknumber, stake_job_id, stake_job); + NextStakeJobId::::set(stake_job_id.saturating_add(1)); + + // Done and ok. + Ok(()) + } + // Returns the maximum amount of RAO that can be executed with price limit - pub fn get_max_amount_remove(netuid: u16, limit_price: u64) -> u64 { + pub fn get_max_amount_remove(netuid: u16, limit_price: u64) -> Result> { // Corner case: root and stao // There's no slippage for root or stable subnets, so if limit price is 1e9 rao or - // higher, then max_amount equals u64::MAX, otherwise it is 0. + // lower, then max_amount equals u64::MAX, otherwise it is 0. if (netuid == Self::get_root_netuid()) || (SubnetMechanism::::get(netuid)) == 0 { if limit_price <= 1_000_000_000 { - return u64::MAX; + return Ok(u64::MAX); } else { - return 0; + return Err(Error::ZeroMaxStakeAmount); } } // Corner case: SubnetAlphaIn is zero. Staking can't happen, so max amount is zero. let alpha_in = SubnetAlphaIn::::get(netuid); if alpha_in == 0 { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let alpha_in_u128 = alpha_in as u128; // Corner case: SubnetTAO is zero. Staking can't happen, so max amount is zero. let tao_reserve = SubnetTAO::::get(netuid); if tao_reserve == 0 { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let tao_reserve_u128 = tao_reserve as u128; // Corner case: limit_price == 0 (because there's division by limit price) // => can sell all if limit_price == 0 { - return u64::MAX; + return Ok(u64::MAX); } // Corner case: limit_price >= current_price (price cannot increase with unstaking) @@ -421,7 +657,7 @@ impl Pallet { .checked_div(alpha_in_u128) .unwrap_or(0) { - return 0; + return Err(Error::ZeroMaxStakeAmount); } // Main case: SubnetTAO / limit_price - SubnetAlphaIn @@ -434,9 +670,13 @@ impl Pallet { .saturating_sub(alpha_in_u128); if result < u64::MAX as u128 { - result as u64 + if result == 0 { + return Err(Error::ZeroMaxStakeAmount); + } + + Ok(result as u64) } else { - u64::MAX + Ok(u64::MAX) } } } diff --git a/pallets/subtensor/src/staking/set_children.rs b/pallets/subtensor/src/staking/set_children.rs index 12b6bf852c..2d2ebaec47 100644 --- a/pallets/subtensor/src/staking/set_children.rs +++ b/pallets/subtensor/src/staking/set_children.rs @@ -317,15 +317,19 @@ impl Pallet { Error::::InvalidChildkeyTake ); - // Ensure the hotkey passes the rate limit. - ensure!( - Self::passes_rate_limit_on_subnet( - &TransactionType::SetChildkeyTake, // Set childkey take. - &hotkey, // Specific to a hotkey. - netuid, // Specific to a subnet. - ), - Error::::TxChildkeyTakeRateLimitExceeded - ); + let current_take = Self::get_childkey_take(&hotkey, netuid); + // Check the rate limit for increasing childkey take case + if take > current_take { + // Ensure the hotkey passes the rate limit. + ensure!( + Self::passes_rate_limit_on_subnet( + &TransactionType::SetChildkeyTake, // Set childkey take. + &hotkey, // Specific to a hotkey. + netuid, // Specific to a subnet. + ), + Error::::TxChildkeyTakeRateLimitExceeded + ); + } // Set last transaction block let current_block = Self::get_current_block_as_u64(); @@ -363,12 +367,12 @@ impl Pallet { /// If no specific take value has been set, it returns the default childkey take. /// /// # Arguments: - /// * `hotkey` (&T::AccountId): - /// - The hotkey for which to retrieve the childkey take. + /// * `hotkey` (&T::AccountId): The hotkey for which to retrieve the childkey take. /// /// # Returns: - /// * `u16` - The childkey take value. This is a percentage represented as a value between 0 and 10000, - /// where 10000 represents 100%. + /// * `u16` + /// - The childkey take value. This is a percentage represented as a value between 0 + /// and 10000, where 10000 represents 100%. pub fn get_childkey_take(hotkey: &T::AccountId, netuid: u16) -> u16 { ChildkeyTake::::get(hotkey, netuid) } diff --git a/pallets/subtensor/src/staking/stake_utils.rs b/pallets/subtensor/src/staking/stake_utils.rs index ee8326b6c1..058622177e 100644 --- a/pallets/subtensor/src/staking/stake_utils.rs +++ b/pallets/subtensor/src/staking/stake_utils.rs @@ -1,6 +1,8 @@ use super::*; +//use frame_system::pallet_prelude::BlockNumberFor; use safe_math::*; use share_pool::{SharePool, SharePoolDataOperations}; +//use sp_runtime::Saturating; use sp_std::ops::Neg; use substrate_fixed::types::{I64F64, I96F32, U64F64, U96F32, U110F18}; @@ -57,6 +59,7 @@ impl Pallet { U96F32::saturating_from_num(SubnetMovingPrice::::get(netuid)) } } + pub fn update_moving_price(netuid: u16) { let blocks_since_start_call = U96F32::saturating_from_num({ // We expect FirstEmissionBlockNumber to be set earlier, and we take the block when @@ -68,6 +71,10 @@ impl Pallet { Self::get_current_block_as_u64().saturating_sub(start_call_block) }); + // Use halving time hyperparameter. The meaning of this parameter can be best explained under + // the assumption of a constant price and SubnetMovingAlpha == 0.5: It is how many blocks it + // will take in order for the distance between current EMA of price and current price to shorten + // by half. let halving_time = EMAPriceHalvingBlocks::::get(netuid); let current_ma_unsigned = U96F32::saturating_from_num(SubnetMovingAlpha::::get()); let alpha: U96F32 = current_ma_unsigned.saturating_mul(blocks_since_start_call.safe_div( @@ -238,8 +245,8 @@ impl Pallet { /// * `netuid` - Network unique identifier specifying the subnet context. /// /// # Returns - /// * `u64` - The total inherited alpha for the hotkey on the subnet after considering the stakes - /// allocated to children and inherited from parents. + /// * `u64`: The total inherited alpha for the hotkey on the subnet after considering the + /// stakes allocated to children and inherited from parents. /// /// # Note /// This function uses saturating arithmetic to prevent overflows. @@ -564,6 +571,14 @@ impl Pallet { netuid: u16, amount: u64, ) -> u64 { + if amount > 0 { + let mut staking_hotkeys = StakingHotkeys::::get(coldkey); + if !staking_hotkeys.contains(hotkey) { + staking_hotkeys.push(hotkey.clone()); + StakingHotkeys::::insert(coldkey, staking_hotkeys.clone()); + } + } + let mut alpha_share_pool = Self::get_alpha_share_pool(hotkey.clone(), netuid); // We expect to add a positive amount here. let actual_alpha = alpha_share_pool.update_value_for_one(coldkey, amount as i64); @@ -841,16 +856,9 @@ impl Pallet { actual_alpha = Self::increase_stake_for_hotkey_and_coldkey_on_subnet( hotkey, coldkey, netuid, alpha, ); - - // Step 4: Update the list of hotkeys staking for this coldkey - let mut staking_hotkeys = StakingHotkeys::::get(coldkey); - if !staking_hotkeys.contains(hotkey) { - staking_hotkeys.push(hotkey.clone()); - StakingHotkeys::::insert(coldkey, staking_hotkeys.clone()); - } } - // Step 5. Increase Tao reserves by the fee amount. + // Step 4. Increase Tao reserves by the fee amount. SubnetTAO::::mutate(netuid, |total| { *total = total.saturating_add(actual_fee); }); @@ -859,7 +867,7 @@ impl Pallet { }); LastColdkeyHotkeyStakeBlock::::insert(coldkey, hotkey, Self::get_current_block_as_u64()); - // Step 6. Deposit and log the staking event. + // Step 5. Deposit and log the staking event. Self::deposit_event(Event::StakeAdded( coldkey.clone(), hotkey.clone(), @@ -878,7 +886,7 @@ impl Pallet { actual_fee ); - // Step 7: Return the amount of alpha staked + // Step 6: Return the amount of alpha staked actual_alpha } @@ -1028,7 +1036,7 @@ impl Pallet { /// pub fn validate_stake_transition( origin_coldkey: &T::AccountId, - _destination_coldkey: &T::AccountId, + destination_coldkey: &T::AccountId, origin_hotkey: &T::AccountId, destination_hotkey: &T::AccountId, origin_netuid: u16, @@ -1038,6 +1046,11 @@ impl Pallet { maybe_allow_partial: Option, check_transfer_toggle: bool, ) -> Result<(), Error> { + // Ensure stake transition is actually happening + if origin_coldkey == destination_coldkey && origin_hotkey == destination_hotkey { + ensure!(origin_netuid != destination_netuid, Error::::SameNetuid); + } + // Ensure that both subnets exist. ensure!( Self::if_subnet_exist(origin_netuid), @@ -1050,6 +1063,16 @@ impl Pallet { ); } + ensure!( + SubtokenEnabled::::get(origin_netuid), + Error::::SubtokenDisabled + ); + + ensure!( + SubtokenEnabled::::get(destination_netuid), + Error::::SubtokenDisabled + ); + // Ensure that the origin hotkey account exists ensure!( Self::hotkey_account_exists(origin_hotkey), @@ -1182,6 +1205,368 @@ impl Pallet { None => DefaultStakingFee::::get(), } } + + // // Process staking job for on_finalize() hook. + // pub(crate) fn do_on_finalize(current_block_number: BlockNumberFor) { + // // We delay job execution + // const DELAY_IN_BLOCKS: u32 = 1u32; + // let actual_block_with_delay = current_block_number.saturating_sub(DELAY_IN_BLOCKS.into()); + // + // let stake_jobs = StakeJobs::::drain_prefix(actual_block_with_delay).collect::>(); + // + // // Sort jobs by job type + // let mut add_stake = vec![]; + // let mut remove_stake = vec![]; + // let mut add_stake_limit = vec![]; + // let mut remove_stake_limit = vec![]; + // let mut unstake_all = vec![]; + // let mut unstake_all_aplha = vec![]; + // + // for (_, job) in stake_jobs.into_iter() { + // match &job { + // StakeJob::AddStake { .. } => add_stake.push(job), + // StakeJob::RemoveStake { .. } => remove_stake.push(job), + // StakeJob::AddStakeLimit { .. } => add_stake_limit.push(job), + // StakeJob::RemoveStakeLimit { .. } => remove_stake_limit.push(job), + // StakeJob::UnstakeAll { .. } => unstake_all.push(job), + // StakeJob::UnstakeAllAlpha { .. } => unstake_all_aplha.push(job), + // } + // } + // // Reorder jobs based on the previous block hash + // let previous_block_hash = >::parent_hash(); + // let hash_bytes = previous_block_hash.as_ref(); + // let first_byte = hash_bytes.first().expect("hash operation is infallible"); + // // Extract the first bit + // let altered_order = (first_byte & 0b10000000) != 0; + // + // // Ascending sort by coldkey + // remove_stake_limit.sort_by(|a, b| match (a, b) { + // ( + // StakeJob::RemoveStakeLimit { coldkey: a_key, .. }, + // StakeJob::RemoveStakeLimit { coldkey: b_key, .. }, + // ) => { + // let direct_order = a_key.cmp(b_key); // ascending + // + // if altered_order { + // direct_order.reverse() + // } else { + // direct_order + // } + // } + // _ => sp_std::cmp::Ordering::Equal, // unreachable + // }); + // + // remove_stake.sort_by(|a, b| match (a, b) { + // ( + // StakeJob::RemoveStake { coldkey: a_key, .. }, + // StakeJob::RemoveStake { coldkey: b_key, .. }, + // ) => { + // let direct_order = a_key.cmp(b_key); // ascending + // + // if altered_order { + // direct_order.reverse() + // } else { + // direct_order + // } + // } + // _ => sp_std::cmp::Ordering::Equal, // unreachable + // }); + // + // unstake_all.sort_by(|a, b| match (a, b) { + // ( + // StakeJob::UnstakeAll { coldkey: a_key, .. }, + // StakeJob::UnstakeAll { coldkey: b_key, .. }, + // ) => { + // let direct_order = a_key.cmp(b_key); // ascending + // + // if altered_order { + // direct_order.reverse() + // } else { + // direct_order + // } + // } + // _ => sp_std::cmp::Ordering::Equal, // unreachable + // }); + // + // unstake_all_aplha.sort_by(|a, b| match (a, b) { + // ( + // StakeJob::UnstakeAllAlpha { coldkey: a_key, .. }, + // StakeJob::UnstakeAllAlpha { coldkey: b_key, .. }, + // ) => { + // let direct_order = a_key.cmp(b_key); // ascending + // + // if altered_order { + // direct_order.reverse() + // } else { + // direct_order + // } + // } + // _ => sp_std::cmp::Ordering::Equal, // unreachable + // }); + // + // // Descending sort by coldkey + // add_stake_limit.sort_by(|a, b| match (a, b) { + // ( + // StakeJob::AddStakeLimit { coldkey: a_key, .. }, + // StakeJob::AddStakeLimit { coldkey: b_key, .. }, + // ) => { + // let direct_order = b_key.cmp(a_key); // descending + // + // if altered_order { + // direct_order.reverse() + // } else { + // direct_order + // } + // } + // _ => sp_std::cmp::Ordering::Equal, // unreachable + // }); + // + // add_stake.sort_by(|a, b| match (a, b) { + // ( + // StakeJob::AddStake { coldkey: a_key, .. }, + // StakeJob::AddStake { coldkey: b_key, .. }, + // ) => { + // let direct_order = b_key.cmp(a_key); // descending + // + // if altered_order { + // direct_order.reverse() + // } else { + // direct_order + // } + // } + // _ => sp_std::cmp::Ordering::Equal, // unreachable + // }); + // + // // direct job order + // let mut job_batches = vec![ + // remove_stake_limit, + // remove_stake, + // unstake_all, + // unstake_all_aplha, + // add_stake_limit, + // add_stake, + // ]; + // if altered_order { + // job_batches.reverse(); + // } + // + // for jobs in job_batches.into_iter() { + // for job in jobs.into_iter() { + // match job { + // StakeJob::RemoveStakeLimit { + // hotkey, + // coldkey, + // netuid, + // alpha_unstaked, + // limit_price, + // allow_partial, + // } => { + // let result = Self::do_remove_stake_limit( + // dispatch::RawOrigin::Signed(coldkey.clone()).into(), + // hotkey.clone(), + // netuid, + // alpha_unstaked, + // limit_price, + // allow_partial, + // ); + // + // if let Err(err) = result { + // log::debug!( + // "Failed to remove aggregated limited stake: {:?}, {:?}, {:?}, {:?}, {:?}, {:?}, {:?}", + // coldkey, + // hotkey, + // netuid, + // alpha_unstaked, + // limit_price, + // allow_partial, + // err + // ); + // Self::deposit_event(Event::FailedToRemoveAggregatedLimitedStake( + // coldkey, + // hotkey, + // netuid, + // alpha_unstaked, + // limit_price, + // allow_partial, + // )); + // } else { + // Self::deposit_event(Event::AggregatedLimitedStakeRemoved( + // coldkey, + // hotkey, + // netuid, + // alpha_unstaked, + // limit_price, + // allow_partial, + // )); + // } + // } + // StakeJob::RemoveStake { + // coldkey, + // hotkey, + // netuid, + // alpha_unstaked, + // } => { + // let result = Self::do_remove_stake( + // dispatch::RawOrigin::Signed(coldkey.clone()).into(), + // hotkey.clone(), + // netuid, + // alpha_unstaked, + // ); + // + // if let Err(err) = result { + // log::debug!( + // "Failed to remove aggregated stake: {:?}, {:?}, {:?}, {:?}, {:?}", + // coldkey, + // hotkey, + // netuid, + // alpha_unstaked, + // err + // ); + // Self::deposit_event(Event::FailedToRemoveAggregatedStake( + // coldkey, + // hotkey, + // netuid, + // alpha_unstaked, + // )); + // } else { + // Self::deposit_event(Event::AggregatedStakeRemoved( + // coldkey, + // hotkey, + // netuid, + // alpha_unstaked, + // )); + // } + // } + // StakeJob::UnstakeAll { hotkey, coldkey } => { + // let result = Self::do_unstake_all( + // dispatch::RawOrigin::Signed(coldkey.clone()).into(), + // hotkey.clone(), + // ); + // + // if let Err(err) = result { + // log::debug!( + // "Failed to unstake all: {:?}, {:?}, {:?}", + // coldkey, + // hotkey, + // err + // ); + // Self::deposit_event(Event::AggregatedUnstakeAllFailed(coldkey, hotkey)); + // } else { + // Self::deposit_event(Event::AggregatedUnstakeAllSucceeded( + // coldkey, hotkey, + // )); + // } + // } + // StakeJob::UnstakeAllAlpha { hotkey, coldkey } => { + // let result = Self::do_unstake_all_alpha( + // dispatch::RawOrigin::Signed(coldkey.clone()).into(), + // hotkey.clone(), + // ); + // + // if let Err(err) = result { + // log::debug!( + // "Failed to unstake all alpha: {:?}, {:?}, {:?}", + // coldkey, + // hotkey, + // err + // ); + // Self::deposit_event(Event::AggregatedUnstakeAllAlphaFailed( + // coldkey, hotkey, + // )); + // } else { + // Self::deposit_event(Event::AggregatedUnstakeAllAlphaSucceeded( + // coldkey, hotkey, + // )); + // } + // } + // StakeJob::AddStakeLimit { + // hotkey, + // coldkey, + // netuid, + // stake_to_be_added, + // limit_price, + // allow_partial, + // } => { + // let result = Self::do_add_stake_limit( + // dispatch::RawOrigin::Signed(coldkey.clone()).into(), + // hotkey.clone(), + // netuid, + // stake_to_be_added, + // limit_price, + // allow_partial, + // ); + // + // if let Err(err) = result { + // log::debug!( + // "Failed to add aggregated limited stake: {:?}, {:?}, {:?}, {:?}, {:?}, {:?}, {:?}", + // coldkey, + // hotkey, + // netuid, + // stake_to_be_added, + // limit_price, + // allow_partial, + // err + // ); + // Self::deposit_event(Event::FailedToAddAggregatedLimitedStake( + // coldkey, + // hotkey, + // netuid, + // stake_to_be_added, + // limit_price, + // allow_partial, + // )); + // } else { + // Self::deposit_event(Event::AggregatedLimitedStakeAdded( + // coldkey, + // hotkey, + // netuid, + // stake_to_be_added, + // limit_price, + // allow_partial, + // )); + // } + // } + // StakeJob::AddStake { + // hotkey, + // coldkey, + // netuid, + // stake_to_be_added, + // } => { + // let result = Self::do_add_stake( + // dispatch::RawOrigin::Signed(coldkey.clone()).into(), + // hotkey.clone(), + // netuid, + // stake_to_be_added, + // ); + // + // if let Err(err) = result { + // log::debug!( + // "Failed to add aggregated stake: {:?}, {:?}, {:?}, {:?}, {:?}", + // coldkey, + // hotkey, + // netuid, + // stake_to_be_added, + // err + // ); + // Self::deposit_event(Event::FailedToAddAggregatedStake( + // coldkey, + // hotkey, + // netuid, + // stake_to_be_added, + // )); + // } else { + // Self::deposit_event(Event::AggregatedStakeAdded( + // coldkey, + // hotkey, + // netuid, + // stake_to_be_added, + // )); + // } + // } + // } + // } + // } + // } } /////////////////////////////////////////// diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index e4721c03f5..b122bfa049 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -211,7 +211,6 @@ impl Pallet { SubnetAlphaIn::::insert(netuid_to_register, pool_initial_tao); SubnetOwner::::insert(netuid_to_register, coldkey.clone()); SubnetOwnerHotkey::::insert(netuid_to_register, hotkey.clone()); - TotalStakeAtDynamic::::insert(netuid_to_register, TotalStake::::get()); if actual_tao_lock_amount_less_pool_tao > 0 { Self::burn_tokens(actual_tao_lock_amount_less_pool_tao); @@ -363,6 +362,7 @@ impl Pallet { let next_block_number = current_block_number.saturating_add(1); FirstEmissionBlockNumber::::insert(netuid, next_block_number); + SubtokenEnabled::::insert(netuid, true); Self::deposit_event(Event::FirstEmissionBlockNumberSet( netuid, next_block_number, @@ -370,6 +370,73 @@ impl Pallet { Ok(()) } + /// Sets or updates the hotkey account associated with the owner of a specific subnet. + /// + /// This function allows either the root origin or the current subnet owner to set or update + /// the hotkey for a given subnet. The subnet must already exist. To prevent abuse, the call is + /// rate-limited to once per configured interval (default: one week) per subnet. + /// + /// # Parameters + /// - `origin`: The dispatch origin of the call. Must be either root or the current owner of the subnet. + /// - `netuid`: The unique identifier of the subnet whose owner hotkey is being set. + /// - `hotkey`: The new hotkey account to associate with the subnet owner. + /// + /// # Returns + /// - `DispatchResult`: Returns `Ok(())` if the hotkey was successfully set, or an appropriate error otherwise. + /// + /// # Errors + /// - `Error::SubnetNotExists`: If the specified subnet does not exist. + /// - `Error::TxRateLimitExceeded`: If the function is called more frequently than the allowed rate limit. + /// + /// # Access Control + /// Only callable by: + /// - Root origin, or + /// - The coldkey account that owns the subnet. + /// + /// # Storage + /// - Updates [`SubnetOwnerHotkey`] for the given `netuid`. + /// - Reads and updates [`LastRateLimitedBlock`] for rate-limiting. + /// - Reads [`DefaultSetSNOwnerHotkeyRateLimit`] to determine the interval between allowed updates. + /// + /// # Rate Limiting + /// This function is rate-limited to one call per subnet per interval (e.g., one week). + pub fn do_set_sn_owner_hotkey( + origin: T::RuntimeOrigin, + netuid: u16, + hotkey: &T::AccountId, + ) -> DispatchResult { + // Ensure the caller is either root or subnet owner. + Self::ensure_subnet_owner_or_root(origin, netuid)?; + + // Ensure that the subnet exists. + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); + + // Rate limit: 1 call per week + ensure!( + Self::passes_rate_limit_on_subnet( + &TransactionType::SetSNOwnerHotkey, + hotkey, // ignored + netuid, // Specific to a subnet. + ), + Error::::TxRateLimitExceeded + ); + + // Set last transaction block + let current_block = Self::get_current_block_as_u64(); + Self::set_last_transaction_block_on_subnet( + hotkey, + netuid, + &TransactionType::SetSNOwnerHotkey, + current_block, + ); + + // Insert/update the hotkey + SubnetOwnerHotkey::::insert(netuid, hotkey); + + // Return success. + Ok(()) + } + pub fn is_valid_subnet_for_emission(netuid: u16) -> bool { FirstEmissionBlockNumber::::get(netuid).is_some() } diff --git a/pallets/subtensor/src/subnets/symbols.rs b/pallets/subtensor/src/subnets/symbols.rs index e68b08e8b5..1aae9c3a0c 100644 --- a/pallets/subtensor/src/subnets/symbols.rs +++ b/pallets/subtensor/src/subnets/symbols.rs @@ -3,522 +3,458 @@ use super::*; /// Returns the Unicode symbol as a Vec for a given netuid. impl Pallet { pub fn get_name_for_subnet(netuid: u16) -> Vec { - if SubnetName::::contains_key(netuid) { - SubnetName::::get(netuid) - } else { - match netuid { - 0 => b"root".to_vec(), // Τ (Upper case Tau) - 1 => b"apex".to_vec(), // α (Alpha) - 2 => b"omron".to_vec(), // β (Beta) - 3 => b"templar".to_vec(), // γ (Gamma) - 4 => b"targon".to_vec(), // δ (Delta) - 5 => b"kaito".to_vec(), // ε (Epsilon) - 6 => b"infinite".to_vec(), // ζ (Zeta) - 7 => b"subvortex".to_vec(), // η (Eta) - 8 => b"ptn".to_vec(), // θ (Theta) - 9 => b"pretrain".to_vec(), // ι (Iota) - 10 => b"sturdy".to_vec(), // κ (Kappa) - 11 => b"dippy".to_vec(), // λ (Lambda) - 12 => b"horde".to_vec(), // μ (Mu) - 13 => b"dataverse".to_vec(), // ν (Nu) - 14 => b"palaidn".to_vec(), // ξ (Xi) - 15 => b"deval".to_vec(), // ο (Omicron) - 16 => b"bitads".to_vec(), // π (Pi) - 17 => b"3gen".to_vec(), // ρ (Rho) - 18 => b"cortex".to_vec(), // σ (Sigma) - 19 => b"inference".to_vec(), // t (Tau) - 20 => b"bitagent".to_vec(), // υ (Upsilon) - 21 => b"any-any".to_vec(), // φ (Phi) - 22 => b"meta".to_vec(), // χ (Chi) - 23 => b"social".to_vec(), // ψ (Psi) - 24 => b"omega".to_vec(), // ω (Omega) - 25 => b"protein".to_vec(), // א (Aleph) - 26 => b"alchemy".to_vec(), // ב (Bet) - 27 => b"compute".to_vec(), // ג (Gimel) - 28 => b"oracle".to_vec(), // ד (Dalet) - 29 => b"coldint".to_vec(), // ה (He) - 30 => b"bet".to_vec(), // ו (Vav) - 31 => b"naschain".to_vec(), // ז (Zayin) - 32 => b"itsai".to_vec(), // ח (Het) - 33 => b"ready".to_vec(), // ט (Tet) - 34 => b"mind".to_vec(), // י (Yod) - 35 => b"logic".to_vec(), // ך (Final Kaf) - 36 => b"automata".to_vec(), // כ (Kaf) - 37 => b"tuning".to_vec(), // ל (Lamed) - 38 => b"distributed".to_vec(), // ם (Final Mem) - 39 => b"edge".to_vec(), // מ (Mem) - 40 => b"chunk".to_vec(), // ן (Final Nun) - 41 => b"sportsensor".to_vec(), // נ (Nun) - 42 => b"masa".to_vec(), // ס (Samekh) - 43 => b"graphite".to_vec(), // ע (Ayin) - 44 => b"score".to_vec(), // ף (Final Pe) - 45 => b"gen42".to_vec(), // פ (Pe) - 46 => b"neural".to_vec(), // ץ (Final Tsadi) - 47 => b"condense".to_vec(), // צ (Tsadi) - 48 => b"nextplace".to_vec(), // ק (Qof) - 49 => b"automl".to_vec(), // ר (Resh) - 50 => b"audio".to_vec(), // ש (Shin) - 51 => b"celium".to_vec(), // ת (Tav) - 52 => b"dojo".to_vec(), // ا (Alif) - 53 => b"frontier".to_vec(), // ب (Ba) - 54 => b"safescan".to_vec(), // ت (Ta) - 55 => b"unknown".to_vec(), // ث (Tha) - 56 => b"gradients".to_vec(), // ج (Jim) - 57 => b"gaia".to_vec(), // ح (Ha) - 58 => b"dippy-speach".to_vec(), // خ (Kha) - 59 => b"agent-arena".to_vec(), // د (Dal) - 60 => b"unknown".to_vec(), // ذ (Dhal) - 61 => b"red team".to_vec(), // ر (Ra) - 62 => b"agentao".to_vec(), // ز (Zay) - 63 => b"lean-in".to_vec(), // س (Sin) - 64 => b"chutes".to_vec(), // ش (Shin) - 65 => b"sad".to_vec(), - 66 => b"dad".to_vec(), - 67 => b"ta".to_vec(), - 68 => b"dha".to_vec(), - 69 => b"ain".to_vec(), - 70 => b"ghayn".to_vec(), - 71 => b"fa".to_vec(), - 72 => b"qaf".to_vec(), - 73 => b"kaf".to_vec(), - 74 => b"lam".to_vec(), - 75 => b"mim".to_vec(), - 76 => b"nun".to_vec(), - 77 => b"ha".to_vec(), - 78 => b"waw".to_vec(), - 79 => b"ya".to_vec(), - 80 => b"alef".to_vec(), - 81 => b"fehu".to_vec(), - 82 => b"uruz".to_vec(), - 83 => b"thurisaz".to_vec(), - 84 => b"ansuz".to_vec(), - 85 => b"raidho".to_vec(), - 86 => b"kaunan".to_vec(), - 87 => b"cyr_yeru".to_vec(), - 88 => b"algiz".to_vec(), - 89 => b"berkanan".to_vec(), - 90 => b"ogham".to_vec(), - 91 => b"beith".to_vec(), - 92 => b"luis".to_vec(), - 93 => b"fearn".to_vec(), - 94 => b"sail".to_vec(), - 95 => b"nion".to_vec(), - 96 => b"forfeda".to_vec(), - 97 => b"ani".to_vec(), - 98 => b"bani".to_vec(), - 99 => b"gani".to_vec(), - 100 => b"doni".to_vec(), - 101 => b"eni".to_vec(), - 102 => b"vini".to_vec(), - 103 => b"ayp".to_vec(), - 104 => b"ben".to_vec(), - 105 => b"gim".to_vec(), - 106 => b"da".to_vec(), - 107 => b"ech".to_vec(), - 108 => b"za".to_vec(), - 109 => b"armeni".to_vec(), - 110 => b"grave".to_vec(), - 111 => b"io".to_vec(), - 112 => b"dje".to_vec(), - 113 => b"gje".to_vec(), - 114 => b"ie".to_vec(), - 115 => b"dze".to_vec(), - 116 => b"hard_sign".to_vec(), - 117 => b"alfa".to_vec(), - 118 => b"alfas".to_vec(), - 119 => b"vida".to_vec(), // Ⲃ (Vida, 119) - 120 => b"vida_small".to_vec(), // ⲃ (Small Vida, 120) - 121 => b"gamma".to_vec(), // Ⲅ (Gamma, 121) - 122 => b"gamma_small".to_vec(), // ⲅ (Small Gamma, 122) - 123 => b"brahmi_a".to_vec(), // 𑀀 (A, 123) - 124 => b"brahmi_aa".to_vec(), // 𑀁 (Aa, 124) - 125 => b"brahmi_i".to_vec(), // 𑀂 (I, 125) - 126 => b"brahmi_ii".to_vec(), // 𑀃 (Ii, 126) - 127 => b"brahmi_u".to_vec(), // 𑀅 (U, 127) - 128 => b"la".to_vec(), - 129 => b"va".to_vec(), - 130 => b"sha".to_vec(), - 131 => b"ssa".to_vec(), - 132 => b"sa".to_vec(), - 133 => b"ha".to_vec(), - 134 => b"glagolitic_az".to_vec(), // Ⰰ (Az, 134) - 135 => b"glagolitic_buky".to_vec(), // Ⰱ (Buky, 135) - 136 => b"glagolitic_vede".to_vec(), // Ⰲ (Vede, 136) - 137 => b"glagolitic_glagoli".to_vec(), // Ⰳ (Glagoli, 137) - 138 => b"glagolitic_dobro".to_vec(), // Ⰴ (Dobro, 138) - 139 => b"glagolitic_yest".to_vec(), // Ⰵ (Yest, 139) - 140 => b"glagolitic_zhivete".to_vec(), // Ⰶ (Zhivete, 140) - 141 => b"glagolitic_zemlja".to_vec(), // Ⰷ (Zemlja, 141) - 142 => b"glagolitic_izhe".to_vec(), // Ⰸ (Izhe, 142) - 143 => b"glagolitic_initial_izhe".to_vec(), // Ⰹ (Initial Izhe, 143) - 144 => b"glagolitic_i".to_vec(), // Ⰺ (I, 144) - 145 => b"glagolitic_djerv".to_vec(), // Ⰻ (Djerv, 145) - 146 => b"glagolitic_kako".to_vec(), // Ⰼ (Kako, 146) - 147 => b"glagolitic_ljudije".to_vec(), // Ⰽ (Ljudije, 147) - 148 => b"glagolitic_myse".to_vec(), // Ⰾ (Myse, 148) - 149 => b"glagolitic_nash".to_vec(), // Ⰿ (Nash, 149) - 150 => b"glagolitic_on".to_vec(), // Ⱀ (On, 150) - 151 => b"glagolitic_pokoj".to_vec(), // Ⱁ (Pokoj, 151) - 152 => b"glagolitic_rtsy".to_vec(), // Ⱂ (Rtsy, 152) - 153 => b"glagolitic_slovo".to_vec(), // Ⱃ (Slovo, 153) - 154 => b"glagolitic_tvrido".to_vec(), // Ⱄ (Tvrido, 154) - 155 => b"glagolitic_uku".to_vec(), // Ⱅ (Uku, 155) - 156 => b"glagolitic_fert".to_vec(), // Ⱆ (Fert, 156) - 157 => b"glagolitic_xrivi".to_vec(), // Ⱇ (Xrivi, 157) - 158 => b"glagolitic_ot".to_vec(), // Ⱈ (Ot, 158) - 159 => b"glagolitic_cy".to_vec(), // Ⱉ (Cy, 159) - 160 => b"glagolitic_shcha".to_vec(), // Ⱊ (Shcha, 160) - 161 => b"glagolitic_er".to_vec(), // Ⱋ (Er, 161) - 162 => b"glagolitic_yeru".to_vec(), // Ⱌ (Yeru, 162) - 163 => b"glagolitic_small_yer".to_vec(), // Ⱍ (Small Yer, 163) - 164 => b"glagolitic_yo".to_vec(), // Ⱎ (Yo, 164) - 165 => b"glagolitic_yu".to_vec(), // Ⱏ (Yu, 165) - 166 => b"glagolitic_ja".to_vec(), // Ⱐ (Ja, 166) - 167 => b"thai_ko_kai".to_vec(), // ก (Ko Kai, 167) - 168 => b"thai_kho_khai".to_vec(), // ข (Kho Khai, 168) - 169 => b"thai_kho_khuat".to_vec(), // ฃ (Kho Khuat, 169) - 170 => b"thai_kho_khon".to_vec(), // ค (Kho Khon, 170) - 171 => b"thai_kho_rakhang".to_vec(), // ฅ (Kho Rakhang, 171) - 172 => b"thai_kho_khwai".to_vec(), // ฆ (Kho Khwai, 172) - 173 => b"thai_ngo_ngu".to_vec(), // ง (Ngo Ngu, 173) - 174 => b"thai_cho_chan".to_vec(), // จ (Cho Chan, 174) - 175 => b"thai_cho_ching".to_vec(), // ฉ (Cho Ching, 175) - 176 => b"thai_cho_chang".to_vec(), // ช (Cho Chang, 176) - 177 => b"thai_so_so".to_vec(), // ซ (So So, 177) - 178 => b"thai_cho_choe".to_vec(), // ฌ (Cho Choe, 178) - 179 => b"thai_yo_ying".to_vec(), // ญ (Yo Ying, 179) - 180 => b"thai_do_chada".to_vec(), // ฎ (Do Chada, 180) - 181 => b"thai_to_patak".to_vec(), // ฏ (To Patak, 181) - 182 => b"thai_tho_than".to_vec(), // ฐ (Tho Than, 182) - 183 => b"thai_tho_nangmontho".to_vec(), // ฑ (Tho Nangmontho, 183) - 184 => b"thai_tho_phuthao".to_vec(), // ฒ (Tho Phuthao, 184) - 185 => b"thai_no_nen".to_vec(), // ณ (No Nen, 185) - 186 => b"thai_do_dek".to_vec(), // ด (Do Dek, 186) - 187 => b"thai_to_tao".to_vec(), // ต (To Tao, 187) - 188 => b"thai_tho_thung".to_vec(), // ถ (Tho Thung, 188) - 189 => b"thai_tho_thahan".to_vec(), // ท (Tho Thahan, 189) - 190 => b"thai_tho_thong".to_vec(), // ธ (Tho Thong, 190) - 191 => b"thai_no_nu".to_vec(), // น (No Nu, 191) - 192 => b"thai_bo_baimai".to_vec(), // บ (Bo Baimai, 192) - 193 => b"thai_po_pla".to_vec(), // ป (Po Pla, 193) - 194 => b"thai_pho_phung".to_vec(), // ผ (Pho Phung, 194) - 195 => b"thai_fo_fa".to_vec(), // ฝ (Fo Fa, 195) - 196 => b"thai_pho_phan".to_vec(), // พ (Pho Phan, 196) - 197 => b"thai_fo_fan".to_vec(), // ฟ (Fo Fan, 197) - 198 => b"thai_pho_samphao".to_vec(), // ภ (Pho Samphao, 198) - 199 => b"thai_mo_ma".to_vec(), // ม (Mo Ma, 199) - 200 => b"thai_yo_yak".to_vec(), // ย (Yo Yak, 200) - 201 => b"thai_ro_rua".to_vec(), // ร (Ro Rua, 201) - 202 => b"thai_lo_ling".to_vec(), // ล (Lo Ling, 202) - 203 => b"thai_wo_waen".to_vec(), // ว (Wo Waen, 203) - 204 => b"thai_so_sala".to_vec(), // ศ (So Sala, 204) - 205 => b"thai_so_rusi".to_vec(), // ษ (So Rusi, 205) - 206 => b"thai_so_sua".to_vec(), // ส (So Sua, 206) - 207 => b"thai_ho_hip".to_vec(), // ห (Ho Hip, 207) - 208 => b"thai_lo_chula".to_vec(), // ฬ (Lo Chula, 208) - 209 => b"thai_o_ang".to_vec(), // อ (O Ang, 209) - 210 => b"thai_ho_nokhuk".to_vec(), // ฮ (Ho Nokhuk, 210) - 211 => b"hangul_giyeok".to_vec(), // ㄱ (Giyeok, 211) - 212 => b"hangul_nieun".to_vec(), // ㄴ (Nieun, 212) - 213 => b"hangul_digeut".to_vec(), // ㄷ (Digeut, 213) - 214 => b"hangul_rieul".to_vec(), // ㄹ (Rieul, 214) - 215 => b"hangul_mieum".to_vec(), // ㅁ (Mieum, 215) - 216 => b"hangul_bieup".to_vec(), // ㅂ (Bieup, 216) - 217 => b"hangul_siot".to_vec(), // ㅅ (Siot, 217) - 218 => b"hangul_ieung".to_vec(), // ㅇ (Ieung, 218) - 219 => b"hangul_jieut".to_vec(), // ㅈ (Jieut, 219) - 220 => b"hangul_chieut".to_vec(), // ㅊ (Chieut, 220) - 221 => b"hangul_kieuk".to_vec(), // ㅋ (Kieuk, 221) - 222 => b"hangul_tieut".to_vec(), // ㅌ (Tieut, 222) - 223 => b"hangul_pieup".to_vec(), // ㅍ (Pieup, 223) - 224 => b"hangul_hieut".to_vec(), // ㅎ (Hieut, 224) - 225 => b"hangul_a".to_vec(), // ㅏ (A, 225) - 226 => b"hangul_ae".to_vec(), // ㅐ (Ae, 226) - 227 => b"hangul_ya".to_vec(), // ㅑ (Ya, 227) - 228 => b"hangul_yae".to_vec(), // ㅒ (Yae, 228) - 229 => b"hangul_eo".to_vec(), // ㅓ (Eo, 229) - 230 => b"hangul_e".to_vec(), // ㅔ (E, 230) - 231 => b"hangul_yeo".to_vec(), // ㅕ (Yeo, 231) - 232 => b"hangul_ye".to_vec(), // ㅖ (Ye, 232) - 233 => b"hangul_o".to_vec(), // ㅗ (O, 233) - 234 => b"hangul_wa".to_vec(), // ㅘ (Wa, 234) - 235 => b"hangul_wae".to_vec(), // ㅙ (Wae, 235) - 236 => b"hangul_oe".to_vec(), // ㅚ (Oe, 236) - 237 => b"hangul_yo".to_vec(), // ㅛ (Yo, 237) - 238 => b"hangul_u".to_vec(), // ㅜ (U, 238) - 239 => b"hangul_weo".to_vec(), // ㅝ (Weo, 239) - 240 => b"hangul_we".to_vec(), // ㅞ (We, 240) - 241 => b"hangul_wi".to_vec(), // ㅟ (Wi, 241) - 242 => b"hangul_yu".to_vec(), // ㅠ (Yu, 242) - 243 => b"hangul_eu".to_vec(), // ㅡ (Eu, 243) - 244 => b"hangul_ui".to_vec(), // ㅢ (Ui, 244) - 245 => b"hangul_i".to_vec(), // ㅣ (I, 245) - 246 => b"ethiopic_glottal_a".to_vec(), // አ (Glottal A, 246) - 247 => b"ethiopic_glottal_u".to_vec(), // ኡ (Glottal U, 247) - 248 => b"ethiopic_glottal_i".to_vec(), // ኢ (Glottal I, 248) - 249 => b"ethiopic_glottal_aa".to_vec(), // ኣ (Glottal Aa, 249) - 250 => b"ethiopic_glottal_e".to_vec(), // ኤ (Glottal E, 250) - 251 => b"ethiopic_glottal_ie".to_vec(), // እ (Glottal Ie, 251) - 252 => b"ethiopic_glottal_o".to_vec(), // ኦ (Glottal O, 252) - 253 => b"ethiopic_glottal_wa".to_vec(), // ኧ (Glottal Wa, 253) - 254 => b"ethiopic_wa".to_vec(), // ወ (Wa, 254) - 255 => b"ethiopic_wu".to_vec(), // ዉ (Wu, 255) - 256 => b"ethiopic_wi".to_vec(), // ዊ (Wi, 256) - 257 => b"ethiopic_waa".to_vec(), // ዋ (Waa, 257) - 258 => b"ethiopic_we".to_vec(), // ዌ (We, 258) - 259 => b"ethiopic_wye".to_vec(), // ው (Wye, 259) - 260 => b"ethiopic_wo".to_vec(), // ዎ (Wo, 260) - 261 => b"ethiopic_ko".to_vec(), // ኰ (Ko, 261) - 262 => b"ethiopic_ku".to_vec(), // ኱ (Ku, 262) - 263 => b"ethiopic_ki".to_vec(), // ኲ (Ki, 263) - 264 => b"ethiopic_kua".to_vec(), // ኳ (Kua, 264) - 265 => b"ethiopic_ke".to_vec(), // ኴ (Ke, 265) - 266 => b"ethiopic_kwe".to_vec(), // ኵ (Kwe, 266) - 267 => b"ethiopic_ko_alt".to_vec(), // ኶ (Ko, 267) - 268 => b"ethiopic_go".to_vec(), // ጐ (Go, 268) - 269 => b"ethiopic_gu".to_vec(), // ጑ (Gu, 269) - 270 => b"ethiopic_gi".to_vec(), // ጒ (Gi, 270) - 271 => b"ethiopic_gua".to_vec(), // መ (Gua, 271) - 272 => b"ethiopic_ge".to_vec(), // ጔ (Ge, 272) - 273 => b"ethiopic_gwe".to_vec(), // ጕ (Gwe, 273) - 274 => b"ethiopic_go_alt".to_vec(), // ጖ (Go, 274) - 275 => b"devanagari_a".to_vec(), // अ (A, 275) - 276 => b"devanagari_aa".to_vec(), // आ (Aa, 276) - 277 => b"devanagari_i".to_vec(), // इ (I, 277) - 278 => b"devanagari_ii".to_vec(), // ई (Ii, 278) - 279 => b"devanagari_u".to_vec(), // उ (U, 279) - 280 => b"devanagari_uu".to_vec(), // ऊ (Uu, 280) - 281 => b"devanagari_r".to_vec(), // ऋ (R, 281) - 282 => b"devanagari_e".to_vec(), // ए (E, 282) - 283 => b"devanagari_ai".to_vec(), // ऐ (Ai, 283) - 284 => b"devanagari_o".to_vec(), // ओ (O, 284) - 285 => b"devanagari_au".to_vec(), // औ (Au, 285) - 286 => b"devanagari_ka".to_vec(), // क (Ka, 286) - 287 => b"devanagari_kha".to_vec(), // ख (Kha, 287) - 288 => b"devanagari_ga".to_vec(), // ग (Ga, 288) - 289 => b"devanagari_gha".to_vec(), // घ (Gha, 289) - 290 => b"devanagari_nga".to_vec(), // ङ (Nga, 290) - 291 => b"devanagari_cha".to_vec(), // च (Cha, 291) - 292 => b"devanagari_chha".to_vec(), // छ (Chha, 292) - 293 => b"devanagari_ja".to_vec(), // ज (Ja, 293) - 294 => b"devanagari_jha".to_vec(), // झ (Jha, 294) - 295 => b"devanagari_nya".to_vec(), // ञ (Nya, 295) - 296 => b"devanagari_ta".to_vec(), // ट (Ta, 296) - 297 => b"devanagari_tha".to_vec(), // ठ (Tha, 297) - 298 => b"devanagari_da".to_vec(), // ड (Da, 298) - 299 => b"devanagari_dha".to_vec(), // ढ (Dha, 299) - 300 => b"devanagari_na".to_vec(), // ण (Na, 300) - 301 => b"devanagari_ta_alt".to_vec(), // त (Ta, 301) - 302 => b"devanagari_tha_alt".to_vec(), // थ (Tha, 302) - 303 => b"devanagari_da_alt".to_vec(), // द (Da, 303) - 304 => b"devanagari_dha_alt".to_vec(), // ध (Dha, 304) - 305 => b"devanagari_na_alt".to_vec(), // न (Na, 305) - 306 => b"devanagari_pa".to_vec(), // प (Pa, 306) - 307 => b"devanagari_pha".to_vec(), // फ (Pha, 307) - 308 => b"devanagari_ba".to_vec(), // ब (Ba, 308) - 309 => b"devanagari_bha".to_vec(), // भ (Bha, 309) - 310 => b"devanagari_ma".to_vec(), // म (Ma, 310) - 311 => b"devanagari_ya".to_vec(), // य (Ya, 311) - 312 => b"devanagari_ra".to_vec(), // र (Ra, 312) - 313 => b"devanagari_la".to_vec(), // ल (La, 313) - 314 => b"devanagari_va".to_vec(), // व (Va, 314) - 315 => b"devanagari_sha".to_vec(), // श (Sha, 315) - 316 => b"devanagari_ssa".to_vec(), // ष (Ssa, 316) - 317 => b"devanagari_sa".to_vec(), // स (Sa, 317) - 318 => b"devanagari_ha".to_vec(), // ह (Ha, 318) - 319 => b"katakana_a".to_vec(), // ア (A, 319) - 320 => b"kana_i".to_vec(), - 321 => b"kana_u".to_vec(), - 322 => b"kana_e".to_vec(), - 323 => b"kana_o".to_vec(), - 324 => b"kana_a".to_vec(), - 325 => b"kana_ki".to_vec(), - 326 => b"kana_ku".to_vec(), - 327 => b"kana_ke".to_vec(), - 328 => b"kana_ko".to_vec(), - 329 => b"kana_sa".to_vec(), - 330 => b"kana_shi".to_vec(), - 331 => b"kana_su".to_vec(), - 332 => b"kana_se".to_vec(), - 333 => b"kana_so".to_vec(), - 334 => b"kana_ta".to_vec(), - 335 => b"kana_chi".to_vec(), - 336 => b"kana_tsu".to_vec(), - 337 => b"kana_te".to_vec(), - 338 => b"kana_to".to_vec(), - 339 => b"kana_na".to_vec(), - 340 => b"kana_ni".to_vec(), - 341 => b"kana_nu".to_vec(), - 342 => b"kana_ne".to_vec(), - 343 => b"kana_no".to_vec(), - 344 => b"kana_ha".to_vec(), - 345 => b"kana_hi".to_vec(), - 346 => b"kana_fu".to_vec(), - 347 => b"kana_he".to_vec(), - 348 => b"kana_ho".to_vec(), - 349 => b"kana_ma".to_vec(), - 350 => b"kana_mi".to_vec(), - 351 => b"kana_mu".to_vec(), - 352 => b"kana_me".to_vec(), - 353 => b"kana_mo".to_vec(), - 354 => b"kana_ya".to_vec(), - 355 => b"kana_yu".to_vec(), - 356 => b"kana_yo".to_vec(), - 357 => b"kana_ra".to_vec(), - 358 => b"kana_ri".to_vec(), - 359 => b"kana_ru".to_vec(), - 360 => b"kana_re".to_vec(), - 361 => b"kana_ro".to_vec(), - 362 => b"kana_wa".to_vec(), - 363 => b"kana_wo".to_vec(), - 364 => b"kana_n".to_vec(), - 365 => b"ya".to_vec(), - 366 => b"yab".to_vec(), - 367 => b"yabh".to_vec(), - 368 => b"yag".to_vec(), - 369 => b"yagh".to_vec(), - 370 => b"yaj".to_vec(), - 371 => b"yach".to_vec(), - 372 => b"yad".to_vec(), - 373 => b"yadh".to_vec(), - 374 => b"yadhe".to_vec(), - 375 => b"yaz".to_vec(), - 376 => b"yazh".to_vec(), - 377 => b"yaf".to_vec(), - 378 => b"yak".to_vec(), - 379 => b"yakv".to_vec(), - 380 => b"yaq".to_vec(), - 381 => b"yah".to_vec(), - 382 => b"yahh".to_vec(), - 383 => b"yahl".to_vec(), - 384 => b"yahm".to_vec(), - 385 => b"yayn".to_vec(), - 386 => b"yakh".to_vec(), - 387 => b"yakl".to_vec(), - 388 => b"yahq".to_vec(), - 389 => b"yash".to_vec(), - 390 => b"yi".to_vec(), - 391 => b"yij".to_vec(), - 392 => b"yizh".to_vec(), - 393 => b"yink".to_vec(), - 394 => b"yal".to_vec(), - 395 => b"yam".to_vec(), - 396 => b"yan".to_vec(), - 397 => b"yang".to_vec(), - 398 => b"yany".to_vec(), - 399 => b"yap".to_vec(), - 400 => b"yu".to_vec(), - 401 => b"a".to_vec(), - 402 => b"aa".to_vec(), - 403 => b"i".to_vec(), - 404 => b"ii".to_vec(), - 405 => b"u".to_vec(), - 406 => b"uu".to_vec(), - 407 => b"r".to_vec(), - 408 => b"rr".to_vec(), - 409 => b"l".to_vec(), - 410 => b"ll".to_vec(), - 411 => b"e".to_vec(), - 412 => b"ee".to_vec(), - 413 => b"ai".to_vec(), - 414 => b"o".to_vec(), - 415 => b"oo".to_vec(), - 416 => b"au".to_vec(), - 417 => b"ka".to_vec(), - 418 => b"kha".to_vec(), - 419 => b"ga".to_vec(), - 420 => b"gha".to_vec(), - 421 => b"nga".to_vec(), - 422 => b"cha".to_vec(), - 423 => b"chha".to_vec(), - 424 => b"ja".to_vec(), - 425 => b"jha".to_vec(), - 426 => b"nya".to_vec(), - 427 => b"ta".to_vec(), - 428 => b"tha".to_vec(), - 429 => b"da".to_vec(), - 430 => b"dha".to_vec(), - 431 => b"na".to_vec(), - 432 => b"pa".to_vec(), - 433 => b"pha".to_vec(), - 434 => b"ba".to_vec(), - 435 => b"bha".to_vec(), - 436 => b"ma".to_vec(), - 437 => b"ya".to_vec(), - 438 => b"ra".to_vec(), - _ => b"unknown".to_vec(), - } - // match netuid { - // // Greek Alphabet (Lowercase) - // 0 => b"root".to_vec(), // Τ (Upper case Tau) - // 1 => b"apex".to_vec(), // α (Alpha) - // 2 => b"omron".to_vec(), // β (Beta) - // 3 => b"templar".to_vec(), // γ (Gamma) - // 4 => b"targon".to_vec(), // δ (Delta) - // 5 => b"kaito".to_vec(), // ε (Epsilon) - // 6 => b"infinite".to_vec(), // ζ (Zeta) - // 7 => b"subvortex".to_vec(), // η (Eta) - // 8 => b"ptn".to_vec(), // θ (Theta) - // 9 => b"pretrain".to_vec(), // ι (Iota) - // 10 => b"sturdy".to_vec(), // κ (Kappa) - // 11 => b"dippy".to_vec(), // λ (Lambda) - // 12 => b"horde".to_vec(), // μ (Mu) - // 13 => b"dataverse".to_vec(), // ν (Nu) - // 14 => b"palaidn".to_vec(), // ξ (Xi) - // 15 => b"deval".to_vec(), // ο (Omicron) - // 16 => b"bitads".to_vec(), // π (Pi) - // 17 => b"3gen".to_vec(), // ρ (Rho) - // 18 => b"cortex".to_vec(), // σ (Sigma) - // 19 => b"inference".to_vec(), // t (Tau) - // 20 => b"bitagent".to_vec(), // υ (Upsilon) - // 21 => b"any-any".to_vec(), // φ (Phi) - // 22 => b"meta".to_vec(), // χ (Chi) - // 23 => b"social".to_vec(), // ψ (Psi) - // 24 => b"omega".to_vec(), // ω (Omega) - // 25 => b"protein".to_vec(), // א (Aleph) - // 26 => b"alchemy".to_vec(), // ב (Bet) - // 27 => b"compute".to_vec(), // ג (Gimel) - // 28 => b"oracle".to_vec(), // ד (Dalet) - // 29 => b"coldint".to_vec(), // ה (He) - // 30 => b"bet".to_vec(), // ו (Vav) - // 31 => b"naschain".to_vec(), // ז (Zayin) - // 32 => b"itsai".to_vec(), // ח (Het) - // 33 => b"ready".to_vec(), // ט (Tet) - // 34 => b"mind".to_vec(), // י (Yod) - // 35 => b"logic".to_vec(), // ך (Final Kaf) - // 36 => b"automata".to_vec(), // כ (Kaf) - // 37 => b"tuning".to_vec(), // ל (Lamed) - // 38 => b"distributed".to_vec(), // ם (Final Mem) - // 39 => b"edge".to_vec(), // מ (Mem) - // 40 => b"chunk".to_vec(), // ן (Final Nun) - // 41 => b"sportsensor".to_vec(), // נ (Nun) - // 42 => b"masa".to_vec(), // ס (Samekh) - // 43 => b"graphite".to_vec(), // ע (Ayin) - // 44 => b"score".to_vec(), // ף (Final Pe) - // 45 => b"gen42".to_vec(), // פ (Pe) - // 46 => b"neural".to_vec(), // ץ (Final Tsadi) - // 47 => b"condense".to_vec(), // צ (Tsadi) - // 48 => b"nextplace".to_vec(), // ק (Qof) - // 49 => b"automl".to_vec(), // ר (Resh) - // 50 => b"audio".to_vec(), // ש (Shin) - // 51 => b"celium".to_vec(), // ת (Tav) - // 52 => b"dojo".to_vec(), // ا (Alif) - // 53 => b"frontier".to_vec(), // ب (Ba) - // 54 => b"safescan".to_vec(), // ت (Ta) - // 55 => b"unknown".to_vec(), // ث (Tha) - // 56 => b"gradients".to_vec(), // ج (Jim) - // 57 => b"gaia".to_vec(), // ح (Ha) - // 58 => b"dippy-speach".to_vec(), // خ (Kha) - // 59 => b"agent-arena".to_vec(), // د (Dal) - // 60 => b"unknown".to_vec(), // ذ (Dhal) - // 61 => b"red team".to_vec(), // ر (Ra) - // 62 => b"agentao".to_vec(), // ز (Zay) - // 63 => b"lean-in".to_vec(), // س (Sin) - // 64 => b"chutes".to_vec(), // ش (Shin) - // // Default case - // _ => b"unknown".to_vec(), // unknown subnet. - // } - } + SubnetIdentitiesV2::::try_get(netuid) + .and_then(|identity| { + if !identity.subnet_name.is_empty() { + Ok(identity.subnet_name) + } else { + Err(()) + } + }) + .unwrap_or_else(|_| { + match netuid { + 0 => b"root".to_vec(), // Τ (Upper case Tau) + 1 => b"apex".to_vec(), // α (Alpha) + 2 => b"omron".to_vec(), // β (Beta) + 3 => b"templar".to_vec(), // γ (Gamma) + 4 => b"targon".to_vec(), // δ (Delta) + 5 => b"kaito".to_vec(), // ε (Epsilon) + 6 => b"infinite".to_vec(), // ζ (Zeta) + 7 => b"subvortex".to_vec(), // η (Eta) + 8 => b"ptn".to_vec(), // θ (Theta) + 9 => b"pretrain".to_vec(), // ι (Iota) + 10 => b"sturdy".to_vec(), // κ (Kappa) + 11 => b"dippy".to_vec(), // λ (Lambda) + 12 => b"horde".to_vec(), // μ (Mu) + 13 => b"dataverse".to_vec(), // ν (Nu) + 14 => b"palaidn".to_vec(), // ξ (Xi) + 15 => b"deval".to_vec(), // ο (Omicron) + 16 => b"bitads".to_vec(), // π (Pi) + 17 => b"3gen".to_vec(), // ρ (Rho) + 18 => b"cortex".to_vec(), // σ (Sigma) + 19 => b"inference".to_vec(), // t (Tau) + 20 => b"bitagent".to_vec(), // υ (Upsilon) + 21 => b"any-any".to_vec(), // φ (Phi) + 22 => b"meta".to_vec(), // χ (Chi) + 23 => b"social".to_vec(), // ψ (Psi) + 24 => b"omega".to_vec(), // ω (Omega) + 25 => b"protein".to_vec(), // א (Aleph) + 26 => b"alchemy".to_vec(), // ב (Bet) + 27 => b"compute".to_vec(), // ג (Gimel) + 28 => b"oracle".to_vec(), // ד (Dalet) + 29 => b"coldint".to_vec(), // ה (He) + 30 => b"bet".to_vec(), // ו (Vav) + 31 => b"naschain".to_vec(), // ז (Zayin) + 32 => b"itsai".to_vec(), // ח (Het) + 33 => b"ready".to_vec(), // ט (Tet) + 34 => b"mind".to_vec(), // י (Yod) + 35 => b"logic".to_vec(), // ך (Final Kaf) + 36 => b"automata".to_vec(), // כ (Kaf) + 37 => b"tuning".to_vec(), // ל (Lamed) + 38 => b"distributed".to_vec(), // ם (Final Mem) + 39 => b"edge".to_vec(), // מ (Mem) + 40 => b"chunk".to_vec(), // ן (Final Nun) + 41 => b"sportsensor".to_vec(), // נ (Nun) + 42 => b"masa".to_vec(), // ס (Samekh) + 43 => b"graphite".to_vec(), // ע (Ayin) + 44 => b"score".to_vec(), // ף (Final Pe) + 45 => b"gen42".to_vec(), // פ (Pe) + 46 => b"neural".to_vec(), // ץ (Final Tsadi) + 47 => b"condense".to_vec(), // צ (Tsadi) + 48 => b"nextplace".to_vec(), // ק (Qof) + 49 => b"automl".to_vec(), // ר (Resh) + 50 => b"audio".to_vec(), // ש (Shin) + 51 => b"celium".to_vec(), // ת (Tav) + 52 => b"dojo".to_vec(), // ا (Alif) + 53 => b"frontier".to_vec(), // ب (Ba) + 54 => b"safescan".to_vec(), // ت (Ta) + 55 => b"unknown".to_vec(), // ث (Tha) + 56 => b"gradients".to_vec(), // ج (Jim) + 57 => b"gaia".to_vec(), // ح (Ha) + 58 => b"dippy-speach".to_vec(), // خ (Kha) + 59 => b"agent-arena".to_vec(), // د (Dal) + 60 => b"unknown".to_vec(), // ذ (Dhal) + 61 => b"red team".to_vec(), // ر (Ra) + 62 => b"agentao".to_vec(), // ز (Zay) + 63 => b"lean-in".to_vec(), // س (Sin) + 64 => b"chutes".to_vec(), // ش (Shin) + 65 => b"sad".to_vec(), + 66 => b"dad".to_vec(), + 67 => b"ta".to_vec(), + 68 => b"dha".to_vec(), + 69 => b"ain".to_vec(), + 70 => b"ghayn".to_vec(), + 71 => b"fa".to_vec(), + 72 => b"qaf".to_vec(), + 73 => b"kaf".to_vec(), + 74 => b"lam".to_vec(), + 75 => b"mim".to_vec(), + 76 => b"nun".to_vec(), + 77 => b"ha".to_vec(), + 78 => b"waw".to_vec(), + 79 => b"ya".to_vec(), + 80 => b"alef".to_vec(), + 81 => b"fehu".to_vec(), + 82 => b"uruz".to_vec(), + 83 => b"thurisaz".to_vec(), + 84 => b"ansuz".to_vec(), + 85 => b"raidho".to_vec(), + 86 => b"kaunan".to_vec(), + 87 => b"cyr_yeru".to_vec(), + 88 => b"algiz".to_vec(), + 89 => b"berkanan".to_vec(), + 90 => b"ogham".to_vec(), + 91 => b"beith".to_vec(), + 92 => b"luis".to_vec(), + 93 => b"fearn".to_vec(), + 94 => b"sail".to_vec(), + 95 => b"nion".to_vec(), + 96 => b"forfeda".to_vec(), + 97 => b"ani".to_vec(), + 98 => b"bani".to_vec(), + 99 => b"gani".to_vec(), + 100 => b"doni".to_vec(), + 101 => b"eni".to_vec(), + 102 => b"vini".to_vec(), + 103 => b"ayp".to_vec(), + 104 => b"ben".to_vec(), + 105 => b"gim".to_vec(), + 106 => b"da".to_vec(), + 107 => b"ech".to_vec(), + 108 => b"za".to_vec(), + 109 => b"armeni".to_vec(), + 110 => b"grave".to_vec(), + 111 => b"io".to_vec(), + 112 => b"dje".to_vec(), + 113 => b"gje".to_vec(), + 114 => b"ie".to_vec(), + 115 => b"dze".to_vec(), + 116 => b"hard_sign".to_vec(), + 117 => b"alfa".to_vec(), + 118 => b"alfas".to_vec(), + 119 => b"vida".to_vec(), // Ⲃ (Vida, 119) + 120 => b"vida_small".to_vec(), // ⲃ (Small Vida, 120) + 121 => b"gamma".to_vec(), // Ⲅ (Gamma, 121) + 122 => b"gamma_small".to_vec(), // ⲅ (Small Gamma, 122) + 123 => b"brahmi_a".to_vec(), // 𑀀 (A, 123) + 124 => b"brahmi_aa".to_vec(), // 𑀁 (Aa, 124) + 125 => b"brahmi_i".to_vec(), // 𑀂 (I, 125) + 126 => b"brahmi_ii".to_vec(), // 𑀃 (Ii, 126) + 127 => b"brahmi_u".to_vec(), // 𑀅 (U, 127) + 128 => b"la".to_vec(), + 129 => b"va".to_vec(), + 130 => b"sha".to_vec(), + 131 => b"ssa".to_vec(), + 132 => b"sa".to_vec(), + 133 => b"ha".to_vec(), + 134 => b"glagolitic_az".to_vec(), // Ⰰ (Az, 134) + 135 => b"glagolitic_buky".to_vec(), // Ⰱ (Buky, 135) + 136 => b"glagolitic_vede".to_vec(), // Ⰲ (Vede, 136) + 137 => b"glagolitic_glagoli".to_vec(), // Ⰳ (Glagoli, 137) + 138 => b"glagolitic_dobro".to_vec(), // Ⰴ (Dobro, 138) + 139 => b"glagolitic_yest".to_vec(), // Ⰵ (Yest, 139) + 140 => b"glagolitic_zhivete".to_vec(), // Ⰶ (Zhivete, 140) + 141 => b"glagolitic_zemlja".to_vec(), // Ⰷ (Zemlja, 141) + 142 => b"glagolitic_izhe".to_vec(), // Ⰸ (Izhe, 142) + 143 => b"glagolitic_initial_izhe".to_vec(), // Ⰹ (Initial Izhe, 143) + 144 => b"glagolitic_i".to_vec(), // Ⰺ (I, 144) + 145 => b"glagolitic_djerv".to_vec(), // Ⰻ (Djerv, 145) + 146 => b"glagolitic_kako".to_vec(), // Ⰼ (Kako, 146) + 147 => b"glagolitic_ljudije".to_vec(), // Ⰽ (Ljudije, 147) + 148 => b"glagolitic_myse".to_vec(), // Ⰾ (Myse, 148) + 149 => b"glagolitic_nash".to_vec(), // Ⰿ (Nash, 149) + 150 => b"glagolitic_on".to_vec(), // Ⱀ (On, 150) + 151 => b"glagolitic_pokoj".to_vec(), // Ⱁ (Pokoj, 151) + 152 => b"glagolitic_rtsy".to_vec(), // Ⱂ (Rtsy, 152) + 153 => b"glagolitic_slovo".to_vec(), // Ⱃ (Slovo, 153) + 154 => b"glagolitic_tvrido".to_vec(), // Ⱄ (Tvrido, 154) + 155 => b"glagolitic_uku".to_vec(), // Ⱅ (Uku, 155) + 156 => b"glagolitic_fert".to_vec(), // Ⱆ (Fert, 156) + 157 => b"glagolitic_xrivi".to_vec(), // Ⱇ (Xrivi, 157) + 158 => b"glagolitic_ot".to_vec(), // Ⱈ (Ot, 158) + 159 => b"glagolitic_cy".to_vec(), // Ⱉ (Cy, 159) + 160 => b"glagolitic_shcha".to_vec(), // Ⱊ (Shcha, 160) + 161 => b"glagolitic_er".to_vec(), // Ⱋ (Er, 161) + 162 => b"glagolitic_yeru".to_vec(), // Ⱌ (Yeru, 162) + 163 => b"glagolitic_small_yer".to_vec(), // Ⱍ (Small Yer, 163) + 164 => b"glagolitic_yo".to_vec(), // Ⱎ (Yo, 164) + 165 => b"glagolitic_yu".to_vec(), // Ⱏ (Yu, 165) + 166 => b"glagolitic_ja".to_vec(), // Ⱐ (Ja, 166) + 167 => b"thai_ko_kai".to_vec(), // ก (Ko Kai, 167) + 168 => b"thai_kho_khai".to_vec(), // ข (Kho Khai, 168) + 169 => b"thai_kho_khuat".to_vec(), // ฃ (Kho Khuat, 169) + 170 => b"thai_kho_khon".to_vec(), // ค (Kho Khon, 170) + 171 => b"thai_kho_rakhang".to_vec(), // ฅ (Kho Rakhang, 171) + 172 => b"thai_kho_khwai".to_vec(), // ฆ (Kho Khwai, 172) + 173 => b"thai_ngo_ngu".to_vec(), // ง (Ngo Ngu, 173) + 174 => b"thai_cho_chan".to_vec(), // จ (Cho Chan, 174) + 175 => b"thai_cho_ching".to_vec(), // ฉ (Cho Ching, 175) + 176 => b"thai_cho_chang".to_vec(), // ช (Cho Chang, 176) + 177 => b"thai_so_so".to_vec(), // ซ (So So, 177) + 178 => b"thai_cho_choe".to_vec(), // ฌ (Cho Choe, 178) + 179 => b"thai_yo_ying".to_vec(), // ญ (Yo Ying, 179) + 180 => b"thai_do_chada".to_vec(), // ฎ (Do Chada, 180) + 181 => b"thai_to_patak".to_vec(), // ฏ (To Patak, 181) + 182 => b"thai_tho_than".to_vec(), // ฐ (Tho Than, 182) + 183 => b"thai_tho_nangmontho".to_vec(), // ฑ (Tho Nangmontho, 183) + 184 => b"thai_tho_phuthao".to_vec(), // ฒ (Tho Phuthao, 184) + 185 => b"thai_no_nen".to_vec(), // ณ (No Nen, 185) + 186 => b"thai_do_dek".to_vec(), // ด (Do Dek, 186) + 187 => b"thai_to_tao".to_vec(), // ต (To Tao, 187) + 188 => b"thai_tho_thung".to_vec(), // ถ (Tho Thung, 188) + 189 => b"thai_tho_thahan".to_vec(), // ท (Tho Thahan, 189) + 190 => b"thai_tho_thong".to_vec(), // ธ (Tho Thong, 190) + 191 => b"thai_no_nu".to_vec(), // น (No Nu, 191) + 192 => b"thai_bo_baimai".to_vec(), // บ (Bo Baimai, 192) + 193 => b"thai_po_pla".to_vec(), // ป (Po Pla, 193) + 194 => b"thai_pho_phung".to_vec(), // ผ (Pho Phung, 194) + 195 => b"thai_fo_fa".to_vec(), // ฝ (Fo Fa, 195) + 196 => b"thai_pho_phan".to_vec(), // พ (Pho Phan, 196) + 197 => b"thai_fo_fan".to_vec(), // ฟ (Fo Fan, 197) + 198 => b"thai_pho_samphao".to_vec(), // ภ (Pho Samphao, 198) + 199 => b"thai_mo_ma".to_vec(), // ม (Mo Ma, 199) + 200 => b"thai_yo_yak".to_vec(), // ย (Yo Yak, 200) + 201 => b"thai_ro_rua".to_vec(), // ร (Ro Rua, 201) + 202 => b"thai_lo_ling".to_vec(), // ล (Lo Ling, 202) + 203 => b"thai_wo_waen".to_vec(), // ว (Wo Waen, 203) + 204 => b"thai_so_sala".to_vec(), // ศ (So Sala, 204) + 205 => b"thai_so_rusi".to_vec(), // ษ (So Rusi, 205) + 206 => b"thai_so_sua".to_vec(), // ส (So Sua, 206) + 207 => b"thai_ho_hip".to_vec(), // ห (Ho Hip, 207) + 208 => b"thai_lo_chula".to_vec(), // ฬ (Lo Chula, 208) + 209 => b"thai_o_ang".to_vec(), // อ (O Ang, 209) + 210 => b"thai_ho_nokhuk".to_vec(), // ฮ (Ho Nokhuk, 210) + 211 => b"hangul_giyeok".to_vec(), // ㄱ (Giyeok, 211) + 212 => b"hangul_nieun".to_vec(), // ㄴ (Nieun, 212) + 213 => b"hangul_digeut".to_vec(), // ㄷ (Digeut, 213) + 214 => b"hangul_rieul".to_vec(), // ㄹ (Rieul, 214) + 215 => b"hangul_mieum".to_vec(), // ㅁ (Mieum, 215) + 216 => b"hangul_bieup".to_vec(), // ㅂ (Bieup, 216) + 217 => b"hangul_siot".to_vec(), // ㅅ (Siot, 217) + 218 => b"hangul_ieung".to_vec(), // ㅇ (Ieung, 218) + 219 => b"hangul_jieut".to_vec(), // ㅈ (Jieut, 219) + 220 => b"hangul_chieut".to_vec(), // ㅊ (Chieut, 220) + 221 => b"hangul_kieuk".to_vec(), // ㅋ (Kieuk, 221) + 222 => b"hangul_tieut".to_vec(), // ㅌ (Tieut, 222) + 223 => b"hangul_pieup".to_vec(), // ㅍ (Pieup, 223) + 224 => b"hangul_hieut".to_vec(), // ㅎ (Hieut, 224) + 225 => b"hangul_a".to_vec(), // ㅏ (A, 225) + 226 => b"hangul_ae".to_vec(), // ㅐ (Ae, 226) + 227 => b"hangul_ya".to_vec(), // ㅑ (Ya, 227) + 228 => b"hangul_yae".to_vec(), // ㅒ (Yae, 228) + 229 => b"hangul_eo".to_vec(), // ㅓ (Eo, 229) + 230 => b"hangul_e".to_vec(), // ㅔ (E, 230) + 231 => b"hangul_yeo".to_vec(), // ㅕ (Yeo, 231) + 232 => b"hangul_ye".to_vec(), // ㅖ (Ye, 232) + 233 => b"hangul_o".to_vec(), // ㅗ (O, 233) + 234 => b"hangul_wa".to_vec(), // ㅘ (Wa, 234) + 235 => b"hangul_wae".to_vec(), // ㅙ (Wae, 235) + 236 => b"hangul_oe".to_vec(), // ㅚ (Oe, 236) + 237 => b"hangul_yo".to_vec(), // ㅛ (Yo, 237) + 238 => b"hangul_u".to_vec(), // ㅜ (U, 238) + 239 => b"hangul_weo".to_vec(), // ㅝ (Weo, 239) + 240 => b"hangul_we".to_vec(), // ㅞ (We, 240) + 241 => b"hangul_wi".to_vec(), // ㅟ (Wi, 241) + 242 => b"hangul_yu".to_vec(), // ㅠ (Yu, 242) + 243 => b"hangul_eu".to_vec(), // ㅡ (Eu, 243) + 244 => b"hangul_ui".to_vec(), // ㅢ (Ui, 244) + 245 => b"hangul_i".to_vec(), // ㅣ (I, 245) + 246 => b"ethiopic_glottal_a".to_vec(), // አ (Glottal A, 246) + 247 => b"ethiopic_glottal_u".to_vec(), // ኡ (Glottal U, 247) + 248 => b"ethiopic_glottal_i".to_vec(), // ኢ (Glottal I, 248) + 249 => b"ethiopic_glottal_aa".to_vec(), // ኣ (Glottal Aa, 249) + 250 => b"ethiopic_glottal_e".to_vec(), // ኤ (Glottal E, 250) + 251 => b"ethiopic_glottal_ie".to_vec(), // እ (Glottal Ie, 251) + 252 => b"ethiopic_glottal_o".to_vec(), // ኦ (Glottal O, 252) + 253 => b"ethiopic_glottal_wa".to_vec(), // ኧ (Glottal Wa, 253) + 254 => b"ethiopic_wa".to_vec(), // ወ (Wa, 254) + 255 => b"ethiopic_wu".to_vec(), // ዉ (Wu, 255) + 256 => b"ethiopic_wi".to_vec(), // ዊ (Wi, 256) + 257 => b"ethiopic_waa".to_vec(), // ዋ (Waa, 257) + 258 => b"ethiopic_we".to_vec(), // ዌ (We, 258) + 259 => b"ethiopic_wye".to_vec(), // ው (Wye, 259) + 260 => b"ethiopic_wo".to_vec(), // ዎ (Wo, 260) + 261 => b"ethiopic_ko".to_vec(), // ኰ (Ko, 261) + 262 => b"ethiopic_ku".to_vec(), // ኱ (Ku, 262) + 263 => b"ethiopic_ki".to_vec(), // ኲ (Ki, 263) + 264 => b"ethiopic_kua".to_vec(), // ኳ (Kua, 264) + 265 => b"ethiopic_ke".to_vec(), // ኴ (Ke, 265) + 266 => b"ethiopic_kwe".to_vec(), // ኵ (Kwe, 266) + 267 => b"ethiopic_ko_alt".to_vec(), // ኶ (Ko, 267) + 268 => b"ethiopic_go".to_vec(), // ጐ (Go, 268) + 269 => b"ethiopic_gu".to_vec(), // ጑ (Gu, 269) + 270 => b"ethiopic_gi".to_vec(), // ጒ (Gi, 270) + 271 => b"ethiopic_gua".to_vec(), // መ (Gua, 271) + 272 => b"ethiopic_ge".to_vec(), // ጔ (Ge, 272) + 273 => b"ethiopic_gwe".to_vec(), // ጕ (Gwe, 273) + 274 => b"ethiopic_go_alt".to_vec(), // ጖ (Go, 274) + 275 => b"devanagari_a".to_vec(), // अ (A, 275) + 276 => b"devanagari_aa".to_vec(), // आ (Aa, 276) + 277 => b"devanagari_i".to_vec(), // इ (I, 277) + 278 => b"devanagari_ii".to_vec(), // ई (Ii, 278) + 279 => b"devanagari_u".to_vec(), // उ (U, 279) + 280 => b"devanagari_uu".to_vec(), // ऊ (Uu, 280) + 281 => b"devanagari_r".to_vec(), // ऋ (R, 281) + 282 => b"devanagari_e".to_vec(), // ए (E, 282) + 283 => b"devanagari_ai".to_vec(), // ऐ (Ai, 283) + 284 => b"devanagari_o".to_vec(), // ओ (O, 284) + 285 => b"devanagari_au".to_vec(), // औ (Au, 285) + 286 => b"devanagari_ka".to_vec(), // क (Ka, 286) + 287 => b"devanagari_kha".to_vec(), // ख (Kha, 287) + 288 => b"devanagari_ga".to_vec(), // ग (Ga, 288) + 289 => b"devanagari_gha".to_vec(), // घ (Gha, 289) + 290 => b"devanagari_nga".to_vec(), // ङ (Nga, 290) + 291 => b"devanagari_cha".to_vec(), // च (Cha, 291) + 292 => b"devanagari_chha".to_vec(), // छ (Chha, 292) + 293 => b"devanagari_ja".to_vec(), // ज (Ja, 293) + 294 => b"devanagari_jha".to_vec(), // झ (Jha, 294) + 295 => b"devanagari_nya".to_vec(), // ञ (Nya, 295) + 296 => b"devanagari_ta".to_vec(), // ट (Ta, 296) + 297 => b"devanagari_tha".to_vec(), // ठ (Tha, 297) + 298 => b"devanagari_da".to_vec(), // ड (Da, 298) + 299 => b"devanagari_dha".to_vec(), // ढ (Dha, 299) + 300 => b"devanagari_na".to_vec(), // ण (Na, 300) + 301 => b"devanagari_ta_alt".to_vec(), // त (Ta, 301) + 302 => b"devanagari_tha_alt".to_vec(), // थ (Tha, 302) + 303 => b"devanagari_da_alt".to_vec(), // द (Da, 303) + 304 => b"devanagari_dha_alt".to_vec(), // ध (Dha, 304) + 305 => b"devanagari_na_alt".to_vec(), // न (Na, 305) + 306 => b"devanagari_pa".to_vec(), // प (Pa, 306) + 307 => b"devanagari_pha".to_vec(), // फ (Pha, 307) + 308 => b"devanagari_ba".to_vec(), // ब (Ba, 308) + 309 => b"devanagari_bha".to_vec(), // भ (Bha, 309) + 310 => b"devanagari_ma".to_vec(), // म (Ma, 310) + 311 => b"devanagari_ya".to_vec(), // य (Ya, 311) + 312 => b"devanagari_ra".to_vec(), // र (Ra, 312) + 313 => b"devanagari_la".to_vec(), // ल (La, 313) + 314 => b"devanagari_va".to_vec(), // व (Va, 314) + 315 => b"devanagari_sha".to_vec(), // श (Sha, 315) + 316 => b"devanagari_ssa".to_vec(), // ष (Ssa, 316) + 317 => b"devanagari_sa".to_vec(), // स (Sa, 317) + 318 => b"devanagari_ha".to_vec(), // ह (Ha, 318) + 319 => b"katakana_a".to_vec(), // ア (A, 319) + 320 => b"kana_i".to_vec(), + 321 => b"kana_u".to_vec(), + 322 => b"kana_e".to_vec(), + 323 => b"kana_o".to_vec(), + 324 => b"kana_a".to_vec(), + 325 => b"kana_ki".to_vec(), + 326 => b"kana_ku".to_vec(), + 327 => b"kana_ke".to_vec(), + 328 => b"kana_ko".to_vec(), + 329 => b"kana_sa".to_vec(), + 330 => b"kana_shi".to_vec(), + 331 => b"kana_su".to_vec(), + 332 => b"kana_se".to_vec(), + 333 => b"kana_so".to_vec(), + 334 => b"kana_ta".to_vec(), + 335 => b"kana_chi".to_vec(), + 336 => b"kana_tsu".to_vec(), + 337 => b"kana_te".to_vec(), + 338 => b"kana_to".to_vec(), + 339 => b"kana_na".to_vec(), + 340 => b"kana_ni".to_vec(), + 341 => b"kana_nu".to_vec(), + 342 => b"kana_ne".to_vec(), + 343 => b"kana_no".to_vec(), + 344 => b"kana_ha".to_vec(), + 345 => b"kana_hi".to_vec(), + 346 => b"kana_fu".to_vec(), + 347 => b"kana_he".to_vec(), + 348 => b"kana_ho".to_vec(), + 349 => b"kana_ma".to_vec(), + 350 => b"kana_mi".to_vec(), + 351 => b"kana_mu".to_vec(), + 352 => b"kana_me".to_vec(), + 353 => b"kana_mo".to_vec(), + 354 => b"kana_ya".to_vec(), + 355 => b"kana_yu".to_vec(), + 356 => b"kana_yo".to_vec(), + 357 => b"kana_ra".to_vec(), + 358 => b"kana_ri".to_vec(), + 359 => b"kana_ru".to_vec(), + 360 => b"kana_re".to_vec(), + 361 => b"kana_ro".to_vec(), + 362 => b"kana_wa".to_vec(), + 363 => b"kana_wo".to_vec(), + 364 => b"kana_n".to_vec(), + 365 => b"ya".to_vec(), + 366 => b"yab".to_vec(), + 367 => b"yabh".to_vec(), + 368 => b"yag".to_vec(), + 369 => b"yagh".to_vec(), + 370 => b"yaj".to_vec(), + 371 => b"yach".to_vec(), + 372 => b"yad".to_vec(), + 373 => b"yadh".to_vec(), + 374 => b"yadhe".to_vec(), + 375 => b"yaz".to_vec(), + 376 => b"yazh".to_vec(), + 377 => b"yaf".to_vec(), + 378 => b"yak".to_vec(), + 379 => b"yakv".to_vec(), + 380 => b"yaq".to_vec(), + 381 => b"yah".to_vec(), + 382 => b"yahh".to_vec(), + 383 => b"yahl".to_vec(), + 384 => b"yahm".to_vec(), + 385 => b"yayn".to_vec(), + 386 => b"yakh".to_vec(), + 387 => b"yakl".to_vec(), + 388 => b"yahq".to_vec(), + 389 => b"yash".to_vec(), + 390 => b"yi".to_vec(), + 391 => b"yij".to_vec(), + 392 => b"yizh".to_vec(), + 393 => b"yink".to_vec(), + 394 => b"yal".to_vec(), + 395 => b"yam".to_vec(), + 396 => b"yan".to_vec(), + 397 => b"yang".to_vec(), + 398 => b"yany".to_vec(), + 399 => b"yap".to_vec(), + 400 => b"yu".to_vec(), + 401 => b"a".to_vec(), + 402 => b"aa".to_vec(), + 403 => b"i".to_vec(), + 404 => b"ii".to_vec(), + 405 => b"u".to_vec(), + 406 => b"uu".to_vec(), + 407 => b"r".to_vec(), + 408 => b"rr".to_vec(), + 409 => b"l".to_vec(), + 410 => b"ll".to_vec(), + 411 => b"e".to_vec(), + 412 => b"ee".to_vec(), + 413 => b"ai".to_vec(), + 414 => b"o".to_vec(), + 415 => b"oo".to_vec(), + 416 => b"au".to_vec(), + 417 => b"ka".to_vec(), + 418 => b"kha".to_vec(), + 419 => b"ga".to_vec(), + 420 => b"gha".to_vec(), + 421 => b"nga".to_vec(), + 422 => b"cha".to_vec(), + 423 => b"chha".to_vec(), + 424 => b"ja".to_vec(), + 425 => b"jha".to_vec(), + 426 => b"nya".to_vec(), + 427 => b"ta".to_vec(), + 428 => b"tha".to_vec(), + 429 => b"da".to_vec(), + 430 => b"dha".to_vec(), + 431 => b"na".to_vec(), + 432 => b"pa".to_vec(), + 433 => b"pha".to_vec(), + 434 => b"ba".to_vec(), + 435 => b"bha".to_vec(), + 436 => b"ma".to_vec(), + 437 => b"ya".to_vec(), + 438 => b"ra".to_vec(), + _ => b"unknown".to_vec(), + } + }) } pub fn get_symbol_for_subnet(netuid: u16) -> Vec { diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index 06a6e4be03..ec6c9949bc 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -810,7 +810,7 @@ impl Pallet { /// /// * 'weights' ( Vec, Compact)>> ): /// - Tuples of (uid, value) of the weights to be set on the chain, - /// one Vec for each netuid in the batch. + /// one Vec for each netuid in the batch. /// /// * 'version_keys' ( Vec> ): /// - The network version key, one u64 for each netuid in the batch. diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index 34c7b5459b..1a9016f13f 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -3988,3 +3988,109 @@ fn test_pending_cooldown_one_day() { assert_eq!(pending_children.1, curr_block + expected_cooldown); }); } + +#[test] +fn test_do_set_childkey_take_success() { + new_test_ext(1).execute_with(|| { + // Setup + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid: u16 = 1; + let take = 5000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set childkey take + assert_ok!(SubtensorModule::do_set_childkey_take( + coldkey, hotkey, netuid, take + )); + + // Verify the take was set correctly + assert_eq!(SubtensorModule::get_childkey_take(&hotkey, netuid), take); + let tx_type: u16 = TransactionType::SetChildkeyTake.into(); + assert_eq!( + TransactionKeyLastBlock::::get((hotkey, netuid, tx_type,)), + System::block_number() + ); + }); +} + +#[test] +fn test_do_set_childkey_take_non_associated_coldkey() { + new_test_ext(1).execute_with(|| { + // Setup + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let hotkey2 = U256::from(3); + let netuid: u16 = 1; + let take = 5000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set childkey take + assert_noop!( + SubtensorModule::do_set_childkey_take(coldkey, hotkey2, netuid, take), + Error::::NonAssociatedColdKey + ); + }); +} + +#[test] +fn test_do_set_childkey_take_invalid_take_value() { + new_test_ext(1).execute_with(|| { + // Setup + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid: u16 = 1; + let take = SubtensorModule::get_max_childkey_take() + 1; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set childkey take + assert_noop!( + SubtensorModule::do_set_childkey_take(coldkey, hotkey, netuid, take), + Error::::InvalidChildkeyTake + ); + }); +} + +#[test] +fn test_do_set_childkey_take_rate_limit_exceeded() { + new_test_ext(1).execute_with(|| { + // Setup + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid: u16 = 1; + let initial_take = 3000; + let higher_take = 5000; + let lower_take = 1000; + + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set initial childkey take + assert_ok!(SubtensorModule::do_set_childkey_take( + coldkey, + hotkey, + netuid, + initial_take + )); + + // Try to increase the take value, should hit rate limit + assert_noop!( + SubtensorModule::do_set_childkey_take(coldkey, hotkey, netuid, higher_take), + Error::::TxChildkeyTakeRateLimitExceeded + ); + + // lower take value should be ok + assert_ok!(SubtensorModule::do_set_childkey_take( + coldkey, hotkey, netuid, lower_take + )); + }); +} diff --git a/pallets/subtensor/src/tests/consensus.rs b/pallets/subtensor/src/tests/consensus.rs new file mode 100644 index 0000000000..e1db49203e --- /dev/null +++ b/pallets/subtensor/src/tests/consensus.rs @@ -0,0 +1,547 @@ +#![allow( + clippy::arithmetic_side_effects, + clippy::indexing_slicing, + clippy::unwrap_used +)] + +use super::mock::*; +use crate::*; + +use frame_support::assert_ok; +use rand::{Rng, SeedableRng, distributions::Uniform, rngs::StdRng, seq::SliceRandom, thread_rng}; +use sp_core::U256; +use std::time::Instant; +use substrate_fixed::transcendental::{PI, cos, ln, sqrt}; +use substrate_fixed::types::{I32F32, I64F64}; + +pub fn fixed(val: f32) -> I32F32 { + I32F32::from_num(val) +} + +pub fn fixed_to_u16(x: I32F32) -> u16 { + x.to_num::() +} + +pub fn fixed_proportion_to_u16(x: I32F32) -> u16 { + fixed_to_u16(x * I32F32::from_num(u16::MAX)) +} + +// Normalizes (sum to 1 except 0) the input vector directly in-place. +#[allow(dead_code)] +pub fn inplace_normalize(x: &mut [I32F32]) { + let x_sum: I32F32 = x.iter().sum(); + if x_sum == I32F32::from_num(0.0_f32) { + return; + } + for i in x.iter_mut() { + *i /= x_sum; + } +} + +// Inplace normalize the passed positive integer weights so that they sum to u16 max value. +fn normalize_weights(mut weights: Vec) -> Vec { + let sum: u64 = weights.iter().map(|x| *x as u64).sum(); + if sum == 0 { + return weights; + } + weights.iter_mut().for_each(|x| { + *x = (*x as u64 * u16::MAX as u64 / sum) as u16; + }); + weights +} + +// Return as usize an I32F32 ratio of a usize input, avoiding the 0% and 100% extremes. +fn non_extreme_fixed_ratio(ratio: I32F32, total: usize) -> usize { + if total == 0 { + return total; + } + let mut subset: usize = (ratio * I32F32::from_num(total)).to_num::(); + if subset == 0 { + subset = 1; + } else if subset == total { + subset = total - 1; + } + subset +} + +// Box-Muller Transform converting two uniform random samples to a normal random sample. +fn normal(size: usize, rng: &mut StdRng, dist: &Uniform) -> Vec { + let max: I32F32 = I32F32::from_num(u16::MAX); + let two: I32F32 = I32F32::from_num(2); + let eps: I32F32 = I32F32::from_num(0.000001); + let pi: I32F32 = I32F32::from_num(PI); + + let uniform_u16: Vec = (0..(2 * size)).map(|_| rng.sample(dist)).collect(); + let uniform: Vec = uniform_u16 + .iter() + .map(|&x| I32F32::from_num(x) / max) + .collect(); + let mut normal: Vec = vec![I32F32::from_num(0); size]; + + for i in 0..size { + let u1: I32F32 = uniform[i] + eps; + let u2: I32F32 = uniform[i + size] + eps; + normal[i] = sqrt::(-two * ln::(u1).expect("")).expect("") + * cos(two * pi * u2); + } + normal +} + +// Returns validators and servers uids with either blockwise, regular, or random interleaving. +fn distribute_nodes( + validators_n: usize, + network_n: usize, + interleave: usize, +) -> (Vec, Vec) { + let mut validators: Vec = vec![]; + let mut servers: Vec = vec![]; + + if interleave == 0 { + // blockwise [validator_block, server_block] + validators = (0..validators_n as u16).collect(); + servers = (validators_n as u16..network_n as u16).collect(); + } else if interleave == 1 { + // regular interleaving [val, srv, srv, ..., srv, val, srv, srv, ..., srv, val, srv, ..., srv] + (validators, servers) = (0..network_n as u16) + .collect::>() + .iter() + .partition(|&i| *i as usize % (network_n / validators_n) == 0); + } else if interleave == 2 { + // random interleaving + let mut permuted_uids: Vec = (0..network_n as u16).collect(); + permuted_uids.shuffle(&mut thread_rng()); + validators = permuted_uids[0..validators_n].into(); + servers = permuted_uids[validators_n..network_n].into(); + } + + (validators, servers) +} + +#[allow(dead_code)] +fn uid_stats(netuid: u16, uid: u16) { + log::info!( + "stake: {:?}", + SubtensorModule::get_total_stake_for_hotkey(&(U256::from(uid))) + ); + log::info!("rank: {:?}", SubtensorModule::get_rank_for_uid(netuid, uid)); + log::info!( + "trust: {:?}", + SubtensorModule::get_trust_for_uid(netuid, uid) + ); + log::info!( + "consensus: {:?}", + SubtensorModule::get_consensus_for_uid(netuid, uid) + ); + log::info!( + "incentive: {:?}", + SubtensorModule::get_incentive_for_uid(netuid, uid) + ); + log::info!( + "dividend: {:?}", + SubtensorModule::get_dividends_for_uid(netuid, uid) + ); + log::info!( + "emission: {:?}", + SubtensorModule::get_emission_for_uid(netuid, uid) + ); +} + +#[allow(clippy::too_many_arguments)] +fn init_run_epochs( + netuid: u16, + n: u16, + validators: &[u16], + servers: &[u16], + epochs: u16, + stake_per_validator: u64, + server_self: bool, + input_stake: &[u64], + use_input_stake: bool, + input_weights: &[Vec<(u16, u16)>], + use_input_weights: bool, + random_weights: bool, + random_seed: u64, + sparse: bool, + bonds_penalty: u16, +) { + // === Create the network + add_network(netuid, u16::MAX - 1, 0); // set higher tempo to avoid built-in epoch, then manual epoch instead + + // === Set bonds penalty + SubtensorModule::set_bonds_penalty(netuid, bonds_penalty); + + // === Register uids + SubtensorModule::set_max_allowed_uids(netuid, n); + for key in 0..n { + let stake = if use_input_stake { + input_stake[key as usize] + } else if validators.contains(&key) { + stake_per_validator + } else { + // only validators receive stake + 0 + }; + + // let stake: u64 = 1; // alternative test: all nodes receive stake, should be same outcome, except stake + SubtensorModule::add_balance_to_coldkey_account(&(U256::from(key)), stake); + SubtensorModule::append_neuron(netuid, &(U256::from(key)), 0); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &U256::from(key), + &U256::from(key), + netuid, + stake, + ); + } + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), n); + + // === Issue validator permits + SubtensorModule::set_max_allowed_validators(netuid, validators.len() as u16); + assert_eq!( + SubtensorModule::get_max_allowed_validators(netuid), + validators.len() as u16 + ); + SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators + run_to_block(1); // run to next block to ensure weights are set on nodes after their registration block + + // === Set weights + let mut rng = StdRng::seed_from_u64(random_seed); // constant seed so weights over multiple runs are equal + let range = Uniform::new(0, u16::MAX); + let mut weights: Vec = vec![u16::MAX / n; servers.len()]; + for uid in validators { + if random_weights { + weights = (0..servers.len()).map(|_| rng.sample(range)).collect(); + weights = normalize_weights(weights); + // assert_eq!(weights.iter().map(|x| *x as u64).sum::(), u16::MAX as u64); // normalized weight sum not always u16::MAX + } + if use_input_weights { + let sparse_weights = input_weights[*uid as usize].clone(); + weights = sparse_weights.iter().map(|(_, w)| *w).collect(); + let srvs: Vec = sparse_weights.iter().map(|(s, _)| *s).collect(); + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(*uid as u64)), + netuid, + srvs, + weights.clone(), + 0 + )); + } else { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(*uid as u64)), + netuid, + servers.to_vec(), + weights.clone(), + 0 + )); + } + } + if server_self { + for uid in servers { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(*uid as u64)), + netuid, + vec![*uid], + vec![u16::MAX], + 0 + )); // server self-weight + } + } + + // === Run the epochs. + log::info!("Start {epochs} epoch(s)"); + let start = Instant::now(); + for _ in 0..epochs { + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + } + let duration = start.elapsed(); + log::info!( + "Time elapsed in (sparse={sparse}) epoch() is: {:?}", + duration + ); + + // let bonds = SubtensorModule::get_bonds( netuid ); + // for (uid, node) in [ (validators[0], "validator"), (servers[0], "server") ] { + // log::info!("\n{node}" ); + // uid_stats(netuid, uid); + // log::info!("bonds: {:?} (on validator), {:?} (on server)", bonds[uid as usize][0], bonds[uid as usize][servers[0] as usize]); + // } +} + +// Generate a random graph that is split into a major and minor set, each setting specific weight on itself and the complement on the other. +fn split_graph( + major_stake: I32F32, + major_weight: I32F32, + minor_weight: I32F32, + weight_stddev: I32F32, + validators_n: usize, + network_n: usize, + interleave: usize, +) -> ( + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec>, + I32F32, +) { + let servers_n: usize = network_n - validators_n; + let major_servers_n: usize = non_extreme_fixed_ratio(major_stake, servers_n); + let major_validators_n: usize = non_extreme_fixed_ratio(major_stake, validators_n); + + let (validators, servers) = distribute_nodes(validators_n, network_n, interleave); + let major_validators: Vec = (0..major_validators_n).map(|i| validators[i]).collect(); + let minor_validators: Vec = (major_validators_n..validators_n) + .map(|i| validators[i]) + .collect(); + let major_servers: Vec = (0..major_servers_n).map(|i| servers[i]).collect(); + let minor_servers: Vec = (major_servers_n..servers_n).map(|i| servers[i]).collect(); + + let zero: I32F32 = I32F32::from_num(0); + let one: I32F32 = I32F32::from_num(1); + let stddev: I32F32 = I32F32::from_num(0.3); + let total_stake: I64F64 = I64F64::from_num(21_000_000_000_000_000_u64); + let mut rng = StdRng::seed_from_u64(0); // constant seed so weights over multiple runs are equal + let dist = Uniform::new(0, u16::MAX); + + let mut stake: Vec = vec![0; network_n]; + let mut stake_fixed: Vec = vec![zero; network_n]; + for (ratio, vals) in [ + (major_stake, &major_validators), + (one - major_stake, &minor_validators), + ] { + let mut sample: Vec = normal(vals.len(), &mut rng, &dist) + .iter() + .map(|x: &I32F32| { + let v: I32F32 = (stddev * x) + one; + if v < zero { zero } else { v } + }) + .collect(); + inplace_normalize(&mut sample); + for (i, &val) in vals.iter().enumerate() { + stake[val as usize] = + (I64F64::from_num(ratio) * I64F64::from_num(sample[i]) * total_stake) + .to_num::(); + stake_fixed[val as usize] = + I32F32::from_num(I64F64::from_num(ratio) * I64F64::from_num(sample[i])); + } + } + + let mut weights: Vec> = vec![vec![]; network_n]; + let mut weights_fixed: Vec> = vec![vec![zero; network_n]; network_n]; + for (first, second, vals) in [ + (major_weight, one - major_weight, &major_validators), + (one - minor_weight, minor_weight, &minor_validators), + ] { + for &val in vals { + for (weight, srvs) in [(first, &major_servers), (second, &minor_servers)] { + let mut sample: Vec = normal(srvs.len(), &mut rng, &dist) + .iter() + .map(|x: &I32F32| { + let v: I32F32 = (weight_stddev * x) + one; + if v < zero { zero } else { v } + }) + .collect(); + inplace_normalize(&mut sample); + + for (i, &srv) in srvs.iter().enumerate() { + weights[val as usize].push((srv, fixed_proportion_to_u16(weight * sample[i]))); + weights_fixed[val as usize][srv as usize] = weight * sample[i]; + } + } + inplace_normalize(&mut weights_fixed[val as usize]); + } + } + + inplace_normalize(&mut stake_fixed); + + // Calculate stake-weighted mean per server + let mut weight_mean: Vec = vec![zero; network_n]; + for val in 0..network_n { + if stake_fixed[val] > zero { + for (srv, weight_mean_row) in weight_mean.iter_mut().enumerate().take(network_n) { + *weight_mean_row += stake_fixed[val] * weights_fixed[val][srv]; + } + } + } + + // Calculate stake-weighted absolute standard deviation + let mut weight_dev: Vec = vec![zero; network_n]; + for val in 0..network_n { + if stake_fixed[val] > zero { + for srv in 0..network_n { + weight_dev[srv] += + stake_fixed[val] * (weight_mean[srv] - weights_fixed[val][srv]).abs(); + } + } + } + + // Calculate rank-weighted mean of weight_dev + let avg_weight_dev: I32F32 = + weight_dev.iter().sum::() / weight_mean.iter().sum::(); + + ( + validators, + servers, + major_validators, + minor_validators, + major_servers, + minor_servers, + stake, + weights, + avg_weight_dev, + ) +} + +// Test consensus guarantees with an epoch on a graph with 4096 nodes, of which the first 128 are validators, the graph is split into a major and minor set, each setting specific weight on itself and the complement on the other. Asserts that the major emission ratio >= major stake ratio. +// #[test] +// fn test_consensus_guarantees() { +// let netuid: u16 = 0; +// let network_n: u16 = 512; +// let validators_n: u16 = 64; +// let epochs: u16 = 1; +// let interleave = 2; +// log::info!("test_consensus_guarantees ({network_n:?}, {validators_n:?} validators)"); +// for (major_stake, major_weight, minor_weight, weight_stddev) in [ +// (0.51, 1., 1., 0.001), +// (0.51, 0.03, 0., 0.001), +// (0.51, 0.51, 0.49, 0.001), +// (0.51, 0.51, 1., 0.001), +// (0.51, 0.61, 0.8, 0.1), +// (0.6, 0.67, 0.65, 0.2), +// (0.6, 0.74, 0.77, 0.4), +// (0.6, 0.76, 0.8, 0.4), +// (0.6, 0.76, 1., 0.4), +// (0.6, 0.92, 1., 0.4), +// (0.6, 0.94, 1., 0.4), +// (0.65, 0.78, 0.85, 0.6), +// (0.7, 0.81, 0.85, 0.8), +// (0.7, 0.83, 0.85, 1.), +// ] { +// let ( +// validators, +// servers, +// major_validators, +// minor_validators, +// major_servers, +// minor_servers, +// stake, +// weights, +// _avg_weight_dev, +// ) = split_graph( +// fixed(major_stake), +// fixed(major_weight), +// fixed(minor_weight), +// fixed(weight_stddev), +// validators_n as usize, +// network_n as usize, +// interleave as usize, +// ); + +// new_test_ext(1).execute_with(|| { +// init_run_epochs( +// netuid, +// network_n, +// &validators, +// &servers, +// epochs, +// 1, +// true, +// &stake, +// true, +// &weights, +// true, +// false, +// 0, +// false, +// ); + +// let mut major_emission: I64F64 = I64F64::from_num(0); +// let mut minor_emission: I64F64 = I64F64::from_num(0); +// for set in [major_validators, major_servers] { +// for uid in set { +// major_emission += +// I64F64::from_num(SubtensorModule::get_emission_for_uid(netuid, uid)); +// } +// } +// for set in [minor_validators, minor_servers] { +// for uid in set { +// minor_emission += +// I64F64::from_num(SubtensorModule::get_emission_for_uid(netuid, uid)); +// } +// } +// let major_ratio: I32F32 = +// I32F32::from_num(major_emission / (major_emission + minor_emission)); +// assert!(major_stake <= major_ratio); +// }); +// } +// } + +// Map the retention graph for consensus guarantees with an single epoch on a graph with 512 nodes, of which the first 64 are validators, the graph is split into a major and minor set, each setting specific weight on itself and the complement on the other. +#[test] +#[ignore] // Not an automated test! +fn map_consensus_guarantees() { + let netuid: u16 = 1; + let network_n: u16 = 512; + let validators_n: u16 = 64; + let epochs: u16 = 1; + let interleave = 0; + let weight_stddev: I32F32 = fixed(0.4); + let bonds_penalty: u16 = + (std::env::args().nth(2).unwrap().parse::().unwrap() * f32::from(u16::MAX - 1)) as u16; + println!("["); + for _major_stake in [0.51, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.99] { + let major_stake: I32F32 = I32F32::from_num(_major_stake); + for _major_weight in 0..51 { + let major_weight: I32F32 = I32F32::from_num(50 - _major_weight) / I32F32::from_num(50); + for _minor_weight in 0..51 { + let minor_weight: I32F32 = + I32F32::from_num(50 - _minor_weight) / I32F32::from_num(50); + let ( + validators, + servers, + major_validators, + minor_validators, + major_servers, + minor_servers, + stake, + weights, + avg_weight_dev, + ) = split_graph( + major_stake, + major_weight, + minor_weight, + weight_stddev, + validators_n as usize, + network_n as usize, + interleave as usize, + ); + + new_test_ext(1).execute_with(|| { + init_run_epochs(netuid, network_n, &validators, &servers, epochs, 1, true, &stake, true, &weights, true, false, 0, true, bonds_penalty); + + let mut major_emission: I64F64 = I64F64::from_num(0); + let mut minor_emission: I64F64 = I64F64::from_num(0); + for set in [major_validators, major_servers] { + for uid in set { + major_emission += I64F64::from_num(SubtensorModule::get_emission_for_uid( netuid, uid )); + } + } + for set in [minor_validators, minor_servers] { + for uid in set { + minor_emission += I64F64::from_num(SubtensorModule::get_emission_for_uid( netuid, uid )); + } + } + let major_ratio: I32F32 = I32F32::from_num(major_emission / (major_emission + minor_emission)); + println!("[{major_stake}, {major_weight:.2}, {minor_weight:.2}, {avg_weight_dev:.3}, {major_ratio:.3}], "); + }); + } + } + } + println!("]"); +} diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index aaaf93e086..2557709912 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -5,7 +5,7 @@ )] use super::mock::*; -use crate::epoch::math::safe_exp; +use crate::epoch::math::{fixed, u16_proportion_to_fixed}; use crate::*; use approx::assert_abs_diff_eq; @@ -983,7 +983,7 @@ fn test_512_graph_random_weights() { // }); // } -// Test bonds exponential moving average over a sequence of epochs. +// Test bonds exponential moving average over a sequence of epochs - no liquid alpha #[test] fn test_bonds() { new_test_ext(1).execute_with(|| { @@ -1287,223 +1287,6 @@ fn test_bonds() { }); } -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::epoch::test_512_graph_random_weights --exact --show-output --nocapture -#[test] -fn test_bonds_with_liquid_alpha() { - new_test_ext(1).execute_with(|| { - let sparse: bool = true; - let n: u16 = 8; - let netuid: u16 = 1; - let tempo: u16 = 1; - let max_stake: u64 = 4; - let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; - let block_number = System::block_number(); - add_network(netuid, tempo, 0); - SubtensorModule::set_max_allowed_uids(netuid, n); - SubtensorModule::set_max_registrations_per_block(netuid, n); - SubtensorModule::set_target_registrations_per_interval(netuid, n); - SubtensorModule::set_weights_set_rate_limit(netuid, 0); - SubtensorModule::set_min_allowed_weights(netuid, 1); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); - - // Register validators and servers - for key in 0..n as u64 { - SubtensorModule::add_balance_to_coldkey_account(&U256::from(key), max_stake); - let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - block_number, - key * 1_000_000, - &U256::from(key), - ); - assert_ok!(SubtensorModule::register( - RuntimeOrigin::signed(U256::from(key)), - netuid, - block_number, - nonce, - work, - U256::from(key), - U256::from(key) - )); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( - &U256::from(key), - &U256::from(key), - netuid, - stakes[key as usize], - ); - } - - // Initilize with first epoch - SubtensorModule::epoch(netuid, 1_000_000_000); - next_block_no_epoch(netuid); - - // Set weights - for uid in 0..(n / 2) { - SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); - assert_ok!(SubtensorModule::set_weights( - RuntimeOrigin::signed(U256::from(uid)), - netuid, - ((n / 2)..n).collect(), - vec![u16::MAX / 4, u16::MAX / 2, (u16::MAX / 4) * 3, u16::MAX], - 0 - )); - } - - // Enable Liquid Alpha - SubtensorModule::set_liquid_alpha_enabled(netuid, true); - // Run epoch with Liquid Alpha - if sparse { - SubtensorModule::epoch(netuid, 1_000_000_000); - } else { - SubtensorModule::epoch_dense(netuid, 1_000_000_000); - } - - // Check bonds and emissions - let bonds = SubtensorModule::get_bonds(netuid); - - /* n: 8 - current_block: 2; activity_cutoff: 5000; - Last update: [1, 1, 1, 1, 0, 0, 0, 0] - activity_cutoff: 5000 - Last update: [2, 2, 2, 2, 1, 1, 1, 1] - Inactive: [false, false, false, false, false, false, false, false] - Block at registration: [1, 1, 1, 1, 1, 1, 1, 1] - hotkeys: [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)] - Stake: [1, 2, 3, 4, 0, 0, 0, 0] - Normalised Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] - validator_permits: [true, true, true, true, true, true, true, true] - max_allowed_validators: 8 - new_validator_permits: [true, true, true, true, true, true, true, true] - Active Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] - Weights: [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit): [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit+diag): [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit+diag+outdate): [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (mask+norm): [[(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] - Ranks (before): [0, 0, 0, 0, 0.099997558, 0.2000012202, 0.2999926745, 0.4000085443] - Consensus: [0, 0, 0, 0, 0.0999975584, 0.2000012207, 0.2999926754, 0.400008545] - Weights: [[(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] - Validator Trust: [0.9999999995, 0.9999999995, 0.9999999995, 0.9999999995, 0, 0, 0, 0] - Ranks (after): [0, 0, 0, 0, 0.099997558, 0.2000012202, 0.2999926745, 0.4000085443] - T: [0, 0, 0, 0, 1, 1, 1, 1] - Incentive (=Rank): [0, 0, 0, 0, 0.0999975582, 0.2000012207, 0, 0.0999975582, 0.2000012207, 0.2999926752, 0.4000085455] - B: [[], [], [], [], [], [], [], []] - B (outdatedmask): [[], [], [], [], [], [], [], []] - B (mask+norm): [[], [], [], [], [], [], [], []] - ΔB: [[(4, 0.0099997558), (5, 0.020000122), (6, 0.0299992673), (7, 0.0400008543)], [(4, 0.0199995115), (5, 0.040000244), (6, 0.0599985349), (7, 0.0800017088)], [(4, 0.0299992673), (5, 0.060000366), (6, 0.0899978024), (7, 0.1200025633)], [(4, 0.0399990233), (5, 0.080000488), (6, 0.11999707), (7, 0.1600034179)], [], [], [], []] - ΔB (norm): [[(4, 0.0999999996), (5, 0.0999999999), (6, 0.0999999994), (7, 0.0999999996)], [(4, 0.1999999995), (5, 0.2), (6, 0.1999999997), (7, 0.1999999997)], [(4, 0.299999999), (5, 0.2999999998), (6, 0.3), (7, 0.3)], [(4, 0.4000000013), (5, 0.4), (6, 0.4000000004), (7, 0.4000000001)], [], [], [], []] - Exponential Moving Average Bonds Liquid Alpha: [[(4, 0.0499983232), (5, 0.0899999999), (6, 0.0899999994), (7, 0.0899999996)], [(4, 0.0999966469), (5, 0.18), (6, 0.1799999997), (7, 0.1799999997)], [(4, 0.1499949703), (5, 0.2699999998), (6, 0.2699999998), (7, 0.2699999998)], [(4, 0.199993295), (5, 0.3599999999), (6, 0.36), (7, 0.3599999999)], [], [], [], []] - Exponential Moving Average Bonds: [[(4, 0.0999999992), (5, 0.0999999999), (6, 0.0999999994), (7, 0.0999999996)], [(4, 0.1999999995), (5, 0.2), (6, 0.1999999997), (7, 0.1999999997)], [(4, 0.2999999993), (5, 0.2999999998), (6, 0.3), (7, 0.3)], [(4, 0.4000000015), (5, 0.4), (6, 0.4000000004), (7, 0.4000000001)], [], [], [], []] - Dividends: [0.0999999994, 0.1999999997, 0.3, 0.4000000006, 0, 0, 0, 0] - Normalized Server Emission: [0, 0, 0, 0, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] - Server Emission: [0, 0, 0, 0, 49998779, 100000610, 149996337, 200004272] - Normalized Validator Emission: [0.0499999996, 0.0999999999, 0.15, 0.2000000002, 0, 0, 0, 0] - Validator Emission: [49999999, 99999999, 149999999, 200000000, 0, 0, 0, 0] - Normalized Combined Emission: [0.0499999996, 0.0999999999, 0.15, 0.2000000002, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] - Combined Emission: [49999999, 99999999, 149999999, 200000000, 49998779, 100000610, 149996337, 200004272] - Pruning Scores: [0.0499999996, 0.0999999999, 0.15, 0.2000000002, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] - */ - - // Expected bonds calculations - // For uid 0: - // Initial weights: [0.25, 0.5, 0.75, 1.0] - // Active stake: [1, 2, 3, 4] - // ΔB = W◦S = [0.25*1, 0.5*2, 0.75*3, 1.0*4] = [0.25, 1.0, 2.25, 4.0] - // Normalize ΔB: [0.25/7.5, 1.0/7.5, 2.25/7.5, 4.0/7.5] = [0.0333, 0.1333, 0.3, 0.5333] - // Final bonds for netuid: [16383, 32767, 49151, 65535] - - assert_eq!(bonds[0][4], 16383); // Note: Calculated as explained above - assert_eq!(bonds[1][4], 32767); // Note: Calculated as explained above - assert_eq!(bonds[2][4], 49151); // Note: Calculated as explained above - assert_eq!(bonds[3][4], 65535); // Note: Calculated as explained above - - // === Set self-weight only on val1 - let uid = 0; - assert_ok!(SubtensorModule::set_weights( - RuntimeOrigin::signed(U256::from(uid)), - netuid, - vec![uid], - vec![u16::MAX], - 0 - )); - next_block_no_epoch(netuid); - if sparse { - SubtensorModule::epoch(netuid, 1_000_000_000); - } else { - SubtensorModule::epoch_dense(netuid, 1_000_000_000); - } - - let bonds = SubtensorModule::get_bonds(netuid); - assert_eq!(bonds[0][4], 2862); - assert_eq!(bonds[1][4], 32767); - assert_eq!(bonds[2][4], 49151); - assert_eq!(bonds[3][4], 65535); - - // === Set self-weight only on val2 - let uid = 1; - assert_ok!(SubtensorModule::set_weights( - RuntimeOrigin::signed(U256::from(uid)), - netuid, - vec![uid], - vec![u16::MAX], - 0 - )); - next_block_no_epoch(netuid); - if sparse { - SubtensorModule::epoch(netuid, 1_000_000_000); - } else { - SubtensorModule::epoch_dense(netuid, 1_000_000_000); - } - let bonds = SubtensorModule::get_bonds(netuid); - - /* n: 8 - current_block: 4; activity_cutoff: 5000; - Last update: [2, 3, 2, 2, 1, 1, 1, 1] - Inactive: [false, false, false, false, false, false, false, false] - Block at registration: [1, 1, 1, 1, 1, 1, 1, 1] - hotkeys: [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)] - Stake: [1, 2, 3, 4, 0, 0, 0, 0] - Normalised Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] - validator_permits: [true, true, true, true, true, true, true, true] - max_allowed_validators: 64 - new_validator_permits: [true, true, true, true, true, true, true, true] - Active Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] - Weights: [[(0, 65535)], [(1, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit): [[(0, 65535)], [(1, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit+diag): [[], [], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit+diag+outdate): [[], [], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (mask+norm): [[], [], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] - Ranks (before): [0, 0, 0, 0, 0.0699982906, 0.1400008542, 0.2099948723, 0.2800059812] - Consensus: [0, 0, 0, 0, 0.0999975584, 0.2000012207, 0.2999926754, 0.400008545] - Weights: [[], [], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] - Validator Trust: [0, 0, 0.9999999995, 0.9999999995, 0, 0, 0, 0] - Ranks (after): [0, 0, 0, 0, 0.0699982906, 0.1400008542, 0.2099948723, 0.2800059812] - T: [0, 0, 0, 0, 1, 1, 1, 1] - Incentive (=Rank): [0, 0, 0, 0, 0.0999975582, 0.2000012207, 0.2999926754, 0.4000085455] - B: [[(4, 7760), (5, 1489), (6, 1489), (7, 1489)], [(4, 32767), (5, 32767), (6, 32767), (7, 32767)], [(4, 49151), (5, 49151), (6, 49151), (7, 49151)], [(4, 65535), (5, 65535), (6, 65535), (7, 65535)], [], [], [], []] - B (outdatedmask): [[(4, 7760), (5, 1489), (6, 1489), (7, 1489)], [(4, 32767), (5, 32767), (6, 32767), (7, 32767)], [(4, 49151), (5, 49151), (6, 49151), (7, 49151)], [(4, 65535), (5, 65535), (6, 65535), (7, 65535)], [], [], [], []] - B (mask+norm): [[(4, 0.0499958121), (5, 0.00999718), (6, 0.00999718), (7, 0.00999718)], [(4, 0.211109894), (5, 0.2199983886), (6, 0.2199983886), (7, 0.2199983886)], [(4, 0.3166680625), (5, 0.3300009398), (6, 0.3300009398), (7, 0.3300009398)], [(4, 0.4222262308), (5, 0.4400034912), (6, 0.4400034912), (7, 0.4400034912)], [], [], [], []] - ΔB: [[], [], [(4, 0.0299992673), (5, 0.060000366), (6, 0.0899978024), (7, 0.1200025633)], [(4, 0.0399990233), (5, 0.080000488), (6, 0.11999707), (7, 0.1600034179)], [], [], [], []] - ΔB (norm): [[], [], [(4, 0.428571427), (5, 0.4285714284), (6, 0.4285714284), (7, 0.4285714284)], [(4, 0.5714285728), (5, 0.5714285714), (6, 0.5714285714), (7, 0.5714285714)], [], [], [], []] - Exponential Moving Average Bonds Liquid Alpha: [[(4, 0.024998744), (5, 0.000999718), (6, 0.000999718), (7, 0.000999718)], [(4, 0.105558486), (5, 0.0219998388), (6, 0.0219998388), (7, 0.0219998388)], [(4, 0.3726178685), (5, 0.4187143792), (6, 0.4187143792), (7, 0.4187143792)], [(4, 0.4968249004), (5, 0.5582860631), (6, 0.5582860631), (7, 0.5582860631)], [], [], [], []] - Exponential Moving Average Bonds: [[(4, 0.024998744), (5, 0.000999718), (6, 0.000999718), (7, 0.000999718)], [(4, 0.105558486), (5, 0.0219998388), (6, 0.0219998388), (7, 0.0219998388)], [(4, 0.3726178687), (5, 0.4187143794), (6, 0.4187143794), (7, 0.4187143794)], [(4, 0.4968249009), (5, 0.5582860636), (6, 0.5582860636), (7, 0.5582860636)], [], [], [], []] - Dividends: [0.0033995616, 0.030355499, 0.4141048414, 0.5521400978, 0, 0, 0, 0] - Normalized Server Emission: [0, 0, 0, 0, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] - Server Emission: [0, 0, 0, 0, 49998779, 100000610, 149996337, 200004272] - Normalized Validator Emission: [0.0016997808, 0.0151777493, 0.2070524206, 0.2760700488, 0, 0, 0, 0] - Validator Emission: [1699780, 15177749, 207052420, 276070048, 0, 0, 0, 0] - Normalized Combined Emission: [0.0016997808, 0.0151777493, 0.2070524206, 0.2760700488, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] - Combined Emission: [1699780, 15177749, 207052420, 276070048, 49998779, 100000610, 149996337, 200004272] - Pruning Scores: [0.0016997808, 0.0151777493, 0.2070524206, 0.2760700488, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] - */ - - assert_eq!(bonds[0][4], 435); - assert_eq!(bonds[1][4], 4985); - assert_eq!(bonds[2][4], 49151); - assert_eq!(bonds[3][4], 65535); - }); -} - -// #[test] fn test_set_alpha_disabled() { new_test_ext(1).execute_with(|| { @@ -1995,7 +1778,7 @@ fn test_zero_weights() { S: [1, 0]; S (mask): [1, 0]; S (mask+norm): [1, 0]; Block at registration: [0, 0] W: [[], []]; W (diagmask): [[], []]; W (diag+outdatemask): [[], []]; W (mask+norm): [[], []] R: [0, 0]; W (threshold): [[], []]; T: [0, 0]; C: [0.006693358, 0.006693358]; I: [0, 0] - B: [[], []]; B (outdatedmask): [[], []]; B (mask+norm): [[], []]; + B: [[], []]; B (mask+norm): [[], []]; ΔB: [[], []]; ΔB (norm): [[], []]; emaB: [[], []]; D: [0, 0] E: [1000000000, 0]; P: [1, 0] */ for validator in 0..(n / 2) { @@ -2031,7 +1814,7 @@ fn test_zero_weights() { W: [[], [(1, 1)]] W (diagmask): [[], []]; W (diag+outdatemask): [[], []]; W (mask+norm): [[], []] R: [0, 0]; W (threshold): [[], []]; T: [0, 0]; C: [0.006693358, 0.006693358]; I: [0, 0] - B: [[], []]: B (outdatedmask): [[], []]; B (mask+norm): [[], []] + B: [[], []]: B (mask+norm): [[], []] ΔB: [[], []]; ΔB (norm): [[], []]; emaB: [[], []]; D: [0, 0] E: [1000000000, 0]; P: [1, 0] */ for validator in 0..(n / 2) { @@ -2086,7 +1869,7 @@ fn test_zero_weights() { S: [1, 0]; S (mask): [1, 0]; S (mask+norm): [1, 0]; Block at registration: [0, 2]; W: [[(1, 1)], []]; W (diagmask): [[(1, 1)], []]; W (diag+outdatemask): [[], []]; W (mask+norm): [[], []]; R: [0, 0]; W (threshold): [[], []]; T: [0, 0]; C: [0.006693358, 0.006693358]; I: [0, 0]; - B: [[], []]; B (outdatedmask): [[], []]; B (mask+norm): [[], []]; + B: [[], []]; B (mask+norm): [[], []]; ΔB: [[], []]; ΔB (norm): [[], []]; emaB: [[], []]; D: [0, 0]; E: [1000000000, 0]; P: [1, 0] */ for validator in 0..(n / 2) { @@ -2120,7 +1903,7 @@ fn test_zero_weights() { S: [1, 0]; S (mask): [1, 0]; S (mask+norm): [1, 0]; Block at registration: [0, 2]; W: [[(1, 1)], []]; W (diagmask): [[(1, 1)], []]; W (diag+outdatemask): [[(1, 1)], []]; W (mask+norm): [[(1, 1)], []]; R: [0, 1]; W (threshold): [[(1, 1)], []]; T: [0, 1]; C: [0.006693358, 0.9933076561]; I: [0, 1]; - B: [[], []]; B (outdatedmask): [[], []]; B (mask+norm): [[], []]; + B: [[], []]; B (mask+norm): [[], []]; ΔB: [[(1, 1)], []]; ΔB (norm): [[(1, 1)], []]; emaB: [[(1, 1)], []]; D: [1, 0]; emaB (max-upscale): [[(1, 1)], []] E: [500000000, 500000000]; P: [0.5, 0.5] */ for validator in 0..n { @@ -2312,13 +2095,14 @@ fn test_deregistered_miner_bonds() { }); } -// Test that epoch assigns validator permits to highest stake uids, varies uid interleaving and stake values. +// Test that epoch assigns validator permits to highest stake uids that are over the stake threshold, varies uid interleaving and stake values. #[test] fn test_validator_permits() { let netuid: u16 = 1; let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead for interleave in 0..3 { for (network_n, validators_n) in [(2, 1), (4, 2), (8, 4)] { + let min_stake = validators_n as u64; for assignment in 0..=1 { let (validators, servers) = distribute_nodes(validators_n as usize, network_n, interleave as usize); @@ -2349,6 +2133,7 @@ fn test_validator_permits() { netuid, network_n as u16, ); + SubtensorModule::set_stake_threshold(min_stake); // === Register [validator1, validator2, server1, server2] for key in 0..network_n as u64 { @@ -2390,7 +2175,7 @@ fn test_validator_permits() { SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators for validator in &validators { assert_eq!( - correct, + stake[*validator as usize] >= min_stake, SubtensorModule::get_validator_permit_for_uid(netuid, *validator) ); } @@ -2428,7 +2213,7 @@ fn test_validator_permits() { } for server in &servers { assert_eq!( - correct, + (stake[*server as usize] + (2 * network_n as u64)) >= min_stake, SubtensorModule::get_validator_permit_for_uid(netuid, *server) ); } @@ -2438,333 +2223,6 @@ fn test_validator_permits() { } } -#[test] -fn test_compute_alpha_values() { - // Define the consensus values. - let consensus = vec![ - I32F32::from_num(0.1), - I32F32::from_num(0.5), - I32F32::from_num(0.9), - ]; - // Define the logistic function parameters 'a' and 'b'. - let a = I32F32::from_num(1.0); - let b = I32F32::from_num(0.0); - - // Compute the alpha values using the function. - let alpha = SubtensorModule::compute_alpha_values(&consensus, a, b); - - // Ensure the length of the alpha vector matches the consensus vector. - assert_eq!(alpha.len(), consensus.len()); - - // Manually compute the expected alpha values for each consensus value. - // The logistic function is: 1 / (1 + exp(b - a * c)) - // where c is the consensus value. - - // For consensus[0] = 0.1: - // exp_val = exp(0.0 - 1.0 * 0.1) = exp(-0.1) - // alpha[0] = 1 / (1 + exp(-0.1)) ~ 0.9048374180359595 - let exp_val_0 = I32F32::from_num(0.9048374180359595); - let expected_alpha_0 = I32F32::from_num(1.0) / (I32F32::from_num(1.0) + exp_val_0); - - // For consensus[1] = 0.5: - // exp_val = exp(0.0 - 1.0 * 0.5) = exp(-0.5) - // alpha[1] = 1 / (1 + exp(-0.5)) ~ 0.6065306597126334 - let exp_val_1 = I32F32::from_num(0.6065306597126334); - let expected_alpha_1 = I32F32::from_num(1.0) / (I32F32::from_num(1.0) + exp_val_1); - - // For consensus[2] = 0.9: - // exp_val = exp(0.0 - 1.0 * 0.9) = exp(-0.9) - // alpha[2] = 1 / (1 + exp(-0.9)) ~ 0.4065696597405991 - let exp_val_2 = I32F32::from_num(0.4065696597405991); - let expected_alpha_2 = I32F32::from_num(1.0) / (I32F32::from_num(1.0) + exp_val_2); - - // Define an epsilon for approximate equality checks. - let epsilon = I32F32::from_num(1e-6); - - // Assert that the computed alpha values match the expected values within the epsilon. - assert_approx_eq(alpha[0], expected_alpha_0, epsilon); - assert_approx_eq(alpha[1], expected_alpha_1, epsilon); - assert_approx_eq(alpha[2], expected_alpha_2, epsilon); -} - -#[test] -fn test_compute_alpha_values_256_miners() { - // Define the consensus values for 256 miners. - let consensus: Vec = (0..256) - .map(|i| I32F32::from_num(i as f32 / 255.0)) - .collect(); - // Define the logistic function parameters 'a' and 'b'. - let a = I32F32::from_num(1.0); - let b = I32F32::from_num(0.0); - - // Compute the alpha values using the function. - let alpha = SubtensorModule::compute_alpha_values(&consensus, a, b); - - // Ensure the length of the alpha vector matches the consensus vector. - assert_eq!(alpha.len(), consensus.len()); - - // Define an epsilon for approximate equality checks. - let epsilon = I32F32::from_num(1e-6); - - for (i, &c) in consensus.iter().enumerate() { - // Use saturating subtraction and multiplication - let exponent = b - (a * c); - - // Use safe_exp instead of exp - let exp_val = safe_exp(exponent); - - // Use saturating addition and division - let expected_alpha = I32F32::from_num(1.0) / (I32F32::from_num(1.0) + exp_val); - - // Assert that the computed alpha values match the expected values within the epsilon. - assert_approx_eq(alpha[i], expected_alpha, epsilon); - } -} - -#[test] -fn test_clamp_alpha_values() { - // Define the alpha values. - let alpha = vec![ - I32F32::from_num(0.1), - I32F32::from_num(0.5), - I32F32::from_num(0.9), - ]; - // Define the high and low clamping values. - let alpha_high = I32F32::from_num(0.8); - let alpha_low = I32F32::from_num(0.2); - - // Compute the clamped alpha values using the function. - let clamped_alpha = SubtensorModule::clamp_alpha_values(alpha.clone(), alpha_high, alpha_low); - - // Ensure the length of the clamped alpha vector matches the original alpha vector. - assert_eq!(clamped_alpha.len(), alpha.len()); - - // Manually compute the expected clamped alpha values for each alpha value. - // The clamping logic is: max(alpha_low, min(alpha_high, a)) - - // For alpha[0] = 0.1: - // clamped_a = max(0.2, min(0.8, 0.1)) = max(0.2, 0.1) = 0.2 - let expected_clamped_alpha_0 = I32F32::from_num(0.2); - - // For alpha[1] = 0.5: - // clamped_a = max(0.2, min(0.8, 0.5)) = max(0.2, 0.5) = 0.5 - let expected_clamped_alpha_1 = I32F32::from_num(0.5); - - // For alpha[2] = 0.9: - // clamped_a = max(0.2, min(0.8, 0.9)) = max(0.2, 0.8) = 0.8 - let expected_clamped_alpha_2 = I32F32::from_num(0.8); - - // Assert that the computed clamped alpha values match the expected values. - assert_eq!(clamped_alpha[0], expected_clamped_alpha_0); - assert_eq!(clamped_alpha[1], expected_clamped_alpha_1); - assert_eq!(clamped_alpha[2], expected_clamped_alpha_2); -} - -#[test] -fn test_calculate_logistic_params() { - // Define test inputs - let alpha_high = I32F32::from_num(0.9); - let alpha_low = I32F32::from_num(0.1); - let consensus_high = I32F32::from_num(0.8); - let consensus_low = I32F32::from_num(0.2); - - // Expected values - // a = (ln((1 / alpha_high - 1)) - ln((1 / alpha_low - 1))) / (consensus_low - consensus_high) - // = (ln((1 / 0.9 - 1)) - ln((1 / 0.1 - 1))) / (0.2 - 0.8) - // = (ln(0.1111) - ln(9)) / -0.6 - // = (-2.1972 - 2.1972) / -0.6 - // = -4.3944 / -0.6 - // = 7.324 - let expected_a = I32F32::from_num(7.324); - - // b = ln((1 / alpha_low - 1)) + a * consensus_low - // = ln((1 / 0.1 - 1)) + 7.324 * 0.2 - // = ln(9) + 1.4648 - // = 2.1972 + 1.4648 - // = 3.662 - let expected_b = I32F32::from_num(3.662); - - // Call the function - let (a, b) = SubtensorModule::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); - - // Assert the results - assert!( - (a - expected_a).abs() < I32F32::from_num(0.001), - "Expected a: {:?}, got: {:?}", - expected_a, - a - ); - assert!( - (b - expected_b).abs() < I32F32::from_num(0.001), - "Expected b: {:?}, got: {:?}", - expected_b, - b - ); -} - -#[test] -fn test_calculate_logistic_params_edge_cases() { - // Edge Case 1: Alpha values at their boundaries (0 and 1) - let alpha_high = I32F32::from_num(1.0); - let alpha_low = I32F32::from_num(0.0); - let consensus_high = I32F32::from_num(0.8); - let consensus_low = I32F32::from_num(0.2); - - // Call the function - let (a, b) = SubtensorModule::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); - - // Assert the results - assert_eq!(a, I32F32::from_num(0.0), "Expected a to be 0, got: {:?}", a); - assert_eq!(b, I32F32::from_num(0.0), "Expected b to be 0, got: {:?}", b); - - // Edge Case 2: Consensus values at their boundaries (0 and 1) - let alpha_high = I32F32::from_num(0.9); - let alpha_low = I32F32::from_num(0.1); - let consensus_high = I32F32::from_num(1.0); - let consensus_low = I32F32::from_num(0.0); - - // Call the function - let (a, b) = SubtensorModule::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); - - // Expected values - // a = (ln((1 / 0.9 - 1)) - ln((1 / 0.1 - 1))) / (0.0 - 1.0) - // = (ln(0.1111) - ln(9)) / -1.0 - // = (-2.1972 - 2.1972) / -1.0 - // = -4.3944 / -1.0 - // = 4.3944 - let expected_a = I32F32::from_num(4.3944); - - // b = ln((1 / 0.1 - 1)) + a * 0.0 - // = ln(9) + 0 - // = 2.1972 - let expected_b = I32F32::from_num(2.1972); - - // Assert the results - assert!( - (a - expected_a).abs() < I32F32::from_num(0.001), - "Expected a: {:?}, got: {:?}", - expected_a, - a - ); - assert!( - (b - expected_b).abs() < I32F32::from_num(0.001), - "Expected b: {:?}, got: {:?}", - expected_b, - b - ); - - // Edge Case 3: Alpha values being equal - let alpha_high = I32F32::from_num(0.5); - let alpha_low = I32F32::from_num(0.5); - let consensus_high = I32F32::from_num(0.8); - let consensus_low = I32F32::from_num(0.2); - - // Call the function - let (a, b) = SubtensorModule::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); - - // Assert the results - assert_eq!(a, I32F32::from_num(0.0), "Expected a to be 0, got: {:?}", a); - assert_eq!(b, I32F32::from_num(0.0), "Expected b to be 0, got: {:?}", b); - - // Edge Case 4: Consensus values being equal - let alpha_high = I32F32::from_num(0.9); - let alpha_low = I32F32::from_num(0.1); - let consensus_high = I32F32::from_num(0.5); - let consensus_low = I32F32::from_num(0.5); - - // Call the function - let (a, b) = SubtensorModule::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); - - // Assert the results - assert_eq!(a, I32F32::from_num(0.0), "Expected a to be 0, got: {:?}", a); - assert_eq!(b, I32F32::from_num(0.0), "Expected b to be 0, got: {:?}", b); -} - -#[test] -fn test_compute_ema_bonds_with_liquid_alpha_sparse() { - // Define test inputs - let bonds_delta = vec![ - vec![(0, I32F32::from_num(0.1)), (1, I32F32::from_num(0.2))], - vec![(0, I32F32::from_num(0.3)), (1, I32F32::from_num(0.4))], - ]; - let bonds = vec![ - vec![(0, I32F32::from_num(0.5)), (1, I32F32::from_num(0.6))], - vec![(0, I32F32::from_num(0.7)), (1, I32F32::from_num(0.8))], - ]; - let alpha = vec![I32F32::from_num(0.9), I32F32::from_num(0.8)]; - - // Expected values - // EMA calculation for each bond: - // EMA = alpha * bond_delta + (1 - alpha) * bond - // For bond (0, 0): - // EMA = 0.9 * 0.1 + (1 - 0.9) * 0.5 = 0.09 + 0.05 = 0.14 - // For bond (0, 1): - // EMA = 0.8 * 0.2 + (1 - 0.8) * 0.6 = 0.16 + 0.12 = 0.28 - // For bond (1, 0): - // EMA = 0.9 * 0.3 + (1 - 0.9) * 0.7 = 0.27 + 0.07 = 0.34 - // For bond (1, 1): - // EMA = 0.8 * 0.4 + (1 - 0.8) * 0.8 = 0.32 + 0.16 = 0.48 - let expected_ema_bonds = vec![ - vec![(0, I32F32::from_num(0.14)), (1, I32F32::from_num(0.28))], - vec![(0, I32F32::from_num(0.34)), (1, I32F32::from_num(0.48))], - ]; - - // Call the function - let ema_bonds = - SubtensorModule::compute_ema_bonds_with_liquid_alpha_sparse(&bonds_delta, &bonds, alpha); - - // Assert the results with an epsilon for approximate equality - let epsilon = I32F32::from_num(1e-6); - assert_approx_eq_vec_of_vec(&ema_bonds, &expected_ema_bonds, epsilon); -} - -#[test] -fn test_compute_ema_bonds_with_liquid_alpha_sparse_empty() { - // Test with empty inputs - let bonds_delta: Vec> = vec![]; - let bonds: Vec> = vec![]; - let alpha: Vec = vec![]; - - // Expected values: Empty Vec - let expected_ema_bonds: Vec> = vec![]; - - // Call the function - let ema_bonds = - SubtensorModule::compute_ema_bonds_with_liquid_alpha_sparse(&bonds_delta, &bonds, alpha); - - // Assert the results - assert_eq!( - ema_bonds, expected_ema_bonds, - "Expected EMA bonds: {:?}, got: {:?}", - expected_ema_bonds, ema_bonds - ); -} - #[test] fn test_get_set_alpha() { new_test_ext(1).execute_with(|| { @@ -2789,6 +2247,7 @@ fn test_get_set_alpha() { ); assert_ok!(SubtensorModule::register_network(signer.clone(), hotkey)); + SubtokenEnabled::::insert(netuid, true); assert_ok!(SubtensorModule::add_stake( signer.clone(), hotkey, @@ -3170,29 +2629,854 @@ pub fn assert_approx_eq(left: I32F32, right: I32F32, epsilon: I32F32) { } } -/// Helper function to assert approximate equality of two vectors of vectors of tuples. -fn assert_approx_eq_vec_of_vec( - left: &[Vec<(u16, I32F32)>], - right: &[Vec<(u16, I32F32)>], - epsilon: I32F32, -) { - assert_eq!(left.len(), right.len(), "Vectors have different lengths"); - for (left_row, right_row) in left.iter().zip(right.iter()) { - assert_eq!( - left_row.len(), - right_row.len(), - "Rows have different lengths" +// test Yuma 3 scenarios over a sequence of epochs. +fn setup_yuma_3_scenario(netuid: u16, n: u16, sparse: bool, max_stake: u64, stakes: Vec) { + let block_number = System::block_number(); + let tempo: u16 = 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + add_network(netuid, tempo, 0); + + SubtensorModule::set_max_allowed_uids(netuid, n); + assert_eq!(SubtensorModule::get_max_allowed_uids(netuid), n); + SubtensorModule::set_max_registrations_per_block(netuid, n); + SubtensorModule::set_target_registrations_per_interval(netuid, n); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_min_allowed_weights(netuid, 1); + SubtensorModule::set_max_weight_limit(netuid, u16::MAX); + SubtensorModule::set_bonds_penalty(netuid, 0); + SubtensorModule::set_alpha_sigmoid_steepness(netuid, 10); + SubtensorModule::set_bonds_moving_average(netuid, 975_000); + + // === Register + for key in 0..n as u64 { + SubtensorModule::add_balance_to_coldkey_account(&U256::from(key), max_stake); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + key * 1_000_000, + &U256::from(key), ); - for ((left_idx, left_val), (right_idx, right_val)) in left_row.iter().zip(right_row.iter()) - { - assert_eq!(left_idx, right_idx, "Indices are different"); - assert!( - (left_val - right_val).abs() < epsilon, - "Values are different: left = {:?}, right = {:?}, epsilon = {:?}", - left_val, - right_val, - epsilon - ); + assert_ok!(SubtensorModule::register( + <::RuntimeOrigin>::signed(U256::from(key)), + netuid, + block_number, + nonce, + work, + U256::from(key), + U256::from(key) + )); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &U256::from(key), + &U256::from(key), + netuid, + stakes[key as usize], + ); + } + assert_eq!(SubtensorModule::get_max_allowed_uids(netuid), n); + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), n); + + // Enable Liquid Alpha + SubtensorModule::set_kappa(netuid, u16::MAX / 2); + SubtensorModule::set_liquid_alpha_enabled(netuid, true); + SubtensorModule::set_alpha_values_32(netuid, I32F32::from_num(0.1), I32F32::from_num(0.3)); + + // Enable Yuma3 + SubtensorModule::set_yuma3_enabled(netuid, true); + + // === Issue validator permits + SubtensorModule::set_max_allowed_validators(netuid, 3); + + // run first epoch to set allowed validators + // run to next block to ensure weights are set on nodes after their registration block + run_epoch(netuid, sparse); +} + +fn run_epoch(netuid: u16, sparse: bool) { + next_block_no_epoch(netuid); + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } +} + +fn run_epoch_and_check_bonds_dividends( + netuid: u16, + sparse: bool, + target_bonds: &[Vec], + target_dividends: &[f32], +) { + run_epoch(netuid, sparse); + let bonds = SubtensorModule::get_bonds_fixed_proportion(netuid); + let dividends = SubtensorModule::get_dividends(netuid); + + let epsilon = I32F32::from_num(1e-3); + // Check the bonds + for (bond, target_bond) in bonds.iter().zip(target_bonds.iter()) { + // skip the 3 validators + for (b, t) in bond.iter().zip(target_bond.iter().skip(3)) { + assert_approx_eq(*b, fixed(*t), epsilon); } } + // Check the dividends + for (dividend, target_dividend) in dividends.iter().zip(target_dividends.iter()) { + assert_approx_eq( + u16_proportion_to_fixed(*dividend), + fixed(*target_dividend), + epsilon, + ); + } +} + +fn set_yuma_3_weights(netuid: u16, weights: Vec>, indices: Vec) { + for (uid, weight) in weights.iter().enumerate() { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid as u64)), + netuid, + indices.clone(), + weight.to_vec(), + 0 + )); + } +} + +#[test] +fn test_yuma_3_kappa_moves_first() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let n: u16 = 5; // 3 validators, 2 servers + let netuid: u16 = 1; + let max_stake: u64 = 8; + + // Validator A: kappa / Big validator (0.8) - moves first + // Validator B: Small eager validator (0.1) - moves second + // Validator C: Small lazy validator (0.1) - moves last + let stakes: Vec = vec![8, 1, 1, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + let targets_bonds = [ + vec![ + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + ], + vec![ + vec![0.0908, 0.1013], + vec![0.3697, 0.0000], + vec![0.3697, 0.0000], + ], + vec![ + vec![0.0815, 0.1924], + vec![0.3170, 0.1013], + vec![0.5580, 0.0000], + ], + vec![ + vec![0.0731, 0.2742], + vec![0.2765, 0.1924], + vec![0.4306, 0.1013], + ], + vec![ + vec![0.0656, 0.3478], + vec![0.2435, 0.2742], + vec![0.3589, 0.1924], + ], + vec![ + vec![0.0588, 0.4139], + vec![0.2157, 0.3478], + vec![0.3089, 0.2742], + ], + ]; + + let targets_dividends = [ + vec![0.8000, 0.1000, 0.1000, 0.0000, 0.0000], + vec![1.0000, 0.0000, 0.0000, 0.0000, 0.0000], + vec![0.9382, 0.0618, 0.0000, 0.0000, 0.0000], + vec![0.8819, 0.0773, 0.0407, 0.0000, 0.0000], + vec![0.8564, 0.0844, 0.0592, 0.0000, 0.0000], + vec![0.8418, 0.0884, 0.0697, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 0 => { + // Initially, consensus is achieved by all Validators + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + 1 => { + // Validator A -> Server 2 + // Validator B -> Server 1 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![0, u16::MAX], vec![u16::MAX, 0], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + 2 => { + // Validator A -> Server 2 + // Validator B -> Server 2 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![0, u16::MAX], vec![0, u16::MAX], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + 3 => { + // Subsequent epochs All validators -> Server 2 + set_yuma_3_weights(netuid, vec![vec![0, u16::MAX]; 3], vec![3, 4]); + } + _ => {} + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_kappa_moves_second() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let n: u16 = 5; // 3 validators, 2 servers + let netuid: u16 = 1; + let max_stake: u64 = 8; + + // Validator A: kappa / Big validator (0.8) - moves second + // Validator B: Small eager validator (0.1) - moves first + // Validator C: Small lazy validator (0.1) - moves last + let stakes: Vec = vec![8, 1, 1, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + let targets_bonds = [ + vec![ + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + ], + vec![ + vec![0.1924, 0.0000], + vec![0.0908, 0.2987], + vec![0.1924, 0.0000], + ], + vec![ + vec![0.1715, 0.1013], + vec![0.0815, 0.3697], + vec![0.4336, 0.0000], + ], + vec![ + vec![0.1531, 0.1924], + vec![0.0731, 0.4336], + vec![0.3608, 0.1013], + ], + vec![ + vec![0.1369, 0.2742], + vec![0.0656, 0.4910], + vec![0.3103, 0.1924], + ], + vec![ + vec![0.1225, 0.3478], + vec![0.0588, 0.5426], + vec![0.2712, 0.2742], + ], + ]; + let targets_dividends = [ + vec![0.8000, 0.1000, 0.1000, 0.0000, 0.0000], + vec![0.8446, 0.0498, 0.1056, 0.0000, 0.0000], + vec![0.6868, 0.3132, 0.0000, 0.0000, 0.0000], + vec![0.7421, 0.2090, 0.0489, 0.0000, 0.0000], + vec![0.7625, 0.1706, 0.0669, 0.0000, 0.0000], + vec![0.7730, 0.1508, 0.0762, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 0 => { + // Initially, consensus is achieved by all Validators + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + 1 => { + // Validator A -> Server 1 + // Validator B -> Server 2 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![0, u16::MAX], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + 2 => { + // Validator A -> Server 2 + // Validator B -> Server 2 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![0, u16::MAX], vec![0, u16::MAX], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + 3 => { + // Subsequent epochs All validators -> Server 2 + set_yuma_3_weights(netuid, vec![vec![0, u16::MAX]; 3], vec![3, 4]); + } + _ => {} + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_kappa_moves_last() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let n: u16 = 5; // 3 validators, 2 servers + let netuid: u16 = 1; + let max_stake: u64 = 8; + + // Validator A: kappa / Big validator (0.8) - moves last + // Validator B: Small eager validator (0.1) - moves first + // Validator C: Small lazy validator (0.1) - moves second + let stakes: Vec = vec![8, 1, 1, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + let targets_bonds = [ + vec![ + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + ], + vec![ + vec![0.1924, 0.0000], + vec![0.0908, 0.2987], + vec![0.1924, 0.0000], + ], + vec![ + vec![0.2742, 0.0000], + vec![0.0815, 0.5081], + vec![0.1715, 0.2987], + ], + vec![ + vec![0.2416, 0.1013], + vec![0.0731, 0.5580], + vec![0.1531, 0.3697], + ], + vec![ + vec![0.2141, 0.1924], + vec![0.0656, 0.6028], + vec![0.1369, 0.4336], + ], + vec![ + vec![0.1903, 0.2742], + vec![0.0588, 0.6430], + vec![0.1225, 0.4910], + ], + ]; + let targets_dividends = [ + vec![0.8000, 0.1000, 0.1000, 0.0000, 0.0000], + vec![0.8446, 0.0498, 0.1056, 0.0000, 0.0000], + vec![0.8966, 0.0333, 0.0701, 0.0000, 0.0000], + vec![0.4663, 0.3210, 0.2127, 0.0000, 0.0000], + vec![0.5976, 0.2340, 0.1683, 0.0000, 0.0000], + vec![0.6592, 0.1932, 0.1475, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 0 => { + // Initially, consensus is achieved by all Validators + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + 1 => { + // Validator A -> Server 1 + // Validator B -> Server 2 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![0, u16::MAX], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + 2 => { + // Validator A -> Server 1 + // Validator B -> Server 2 + // Validator C -> Server 2 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![0, u16::MAX], vec![0, u16::MAX]], + vec![3, 4], + ); + } + 3 => { + // Subsequent epochs All validators -> Server 2 + set_yuma_3_weights(netuid, vec![vec![0, u16::MAX]; 3], vec![3, 4]); + } + _ => {} + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_one_epoch_switch() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let n: u16 = 5; // 3 validators, 2 servers + let netuid: u16 = 1; + let max_stake: u64 = 8; + + // Equal stake validators + let stakes: Vec = vec![33, 33, 34, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + + let targets_bonds = [ + vec![ + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + ], + vec![ + vec![0.1924, 0.0000], + vec![0.1924, 0.0000], + vec![0.1924, 0.0000], + ], + vec![ + vec![0.2742, 0.0000], + vec![0.2742, 0.0000], + vec![0.1715, 0.2987], + ], + vec![ + vec![0.3478, 0.0000], + vec![0.3478, 0.0000], + vec![0.2554, 0.2618], + ], + vec![ + vec![0.4139, 0.0000], + vec![0.4139, 0.0000], + vec![0.3309, 0.2312], + ], + vec![ + vec![0.4733, 0.0000], + vec![0.4733, 0.0000], + vec![0.3987, 0.2051], + ], + ]; + let targets_dividends = [ + vec![0.3300, 0.3300, 0.3400, 0.0000, 0.0000], + vec![0.3300, 0.3300, 0.3400, 0.0000, 0.0000], + vec![0.3782, 0.3782, 0.2436, 0.0000, 0.0000], + vec![0.3628, 0.3628, 0.2745, 0.0000, 0.0000], + vec![0.3541, 0.3541, 0.2917, 0.0000, 0.0000], + vec![0.3487, 0.3487, 0.3026, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 2 => { + // Validator A -> Server 1 + // Validator B -> Server 1 + // Validator C -> Server 2 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![u16::MAX, 0], vec![0, u16::MAX]], + vec![3, 4], + ); + } + _ => { + // All validators -> Server 1 + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_liquid_alpha_disabled() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let n: u16 = 5; // 3 validators, 2 servers + let max_stake: u64 = 8; + + // Equal stake validators + let stakes: Vec = vec![33, 33, 34, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + + // disable liquid alpha + SubtensorModule::set_liquid_alpha_enabled(netuid, false); + + let targets_bonds = [ + vec![ + vec![0.0000, 0.0250, 0.0000], + vec![0.0000, 0.0250, 0.0000], + vec![0.0000, 0.0250, 0.0000], + ], + vec![ + vec![0.0000, 0.0494, 0.0000], + vec![0.0000, 0.0494, 0.0000], + vec![0.0000, 0.0494, 0.0000], + ], + vec![ + vec![0.0000, 0.0731, 0.0000], + vec![0.0000, 0.0731, 0.0000], + vec![0.0000, 0.0481, 0.0250], + ], + vec![ + vec![0.0000, 0.0963, 0.0000], + vec![0.0000, 0.0963, 0.0000], + vec![0.0000, 0.0719, 0.0244], + ], + vec![ + vec![0.0000, 0.1189, 0.0000], + vec![0.0000, 0.1189, 0.0000], + vec![0.0000, 0.0951, 0.0238], + ], + vec![ + vec![0.0000, 0.1409, 0.0000], + vec![0.0000, 0.1409, 0.0000], + vec![0.0000, 0.1178, 0.0232], + ], + ]; + let targets_dividends = [ + vec![0.3300, 0.3300, 0.3400, 0.0000, 0.0000], + vec![0.3300, 0.3300, 0.3400, 0.0000, 0.0000], + vec![0.3734, 0.3734, 0.2532, 0.0000, 0.0000], + vec![0.3611, 0.3611, 0.2779, 0.0000, 0.0000], + vec![0.3541, 0.3541, 0.2919, 0.0000, 0.0000], + vec![0.3495, 0.3495, 0.3009, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 2 => { + // Validator A -> Server 1 + // Validator B -> Server 1 + // Validator C -> Server 2 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![u16::MAX, 0], vec![0, u16::MAX]], + vec![3, 4], + ); + } + _ => { + // All validators -> Server 1 + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_stable_miner() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let n: u16 = 6; // 3 validators, 3 servers + let max_stake: u64 = 8; + + // Validator A: kappa / Big validator (0.8) + // Validator B: Small eager validator (0.1) + // Validator C: Small lazy validator (0.1) + let stakes: Vec = vec![8, 1, 1, 0, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + let targets_bonds = [ + vec![ + vec![0.0507, 0.0000, 0.0507], + vec![0.0507, 0.0000, 0.0507], + vec![0.0507, 0.0000, 0.0507], + ], + vec![ + vec![0.0962, 0.0000, 0.0962], + vec![0.0455, 0.1000, 0.0962], + vec![0.0962, 0.0000, 0.0962], + ], + vec![ + vec![0.0863, 0.0507, 0.1371], + vec![0.0408, 0.1405, 0.1371], + vec![0.1770, 0.0000, 0.1371], + ], + vec![ + vec![0.0774, 0.0962, 0.1739], + vec![0.0367, 0.1770, 0.1739], + vec![0.1579, 0.0507, 0.1739], + ], + vec![ + vec![0.0694, 0.1371, 0.2069], + vec![0.0329, 0.2097, 0.2069], + vec![0.1411, 0.0962, 0.2069], + ], + vec![ + vec![0.0623, 0.1739, 0.2366], + vec![0.0296, 0.2391, 0.2366], + vec![0.1263, 0.1371, 0.2366], + ], + ]; + let targets_dividends = [ + vec![0.8000, 0.1000, 0.1000, 0.0000, 0.0000, 0.0000], + vec![0.8226, 0.0745, 0.1028, 0.0000, 0.0000, 0.0000], + vec![0.7750, 0.1685, 0.0565, 0.0000, 0.0000, 0.0000], + vec![0.7864, 0.1372, 0.0764, 0.0000, 0.0000, 0.0000], + vec![0.7912, 0.1241, 0.0847, 0.0000, 0.0000, 0.0000], + vec![0.7937, 0.1173, 0.0890, 0.0000, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 0 => { + // all validators 0.5 for first and third server + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX / 2, 0, u16::MAX / 2]; 3], + vec![3, 4, 5], + ); + } + 1 => { + // one of small validators moves 0.5 to seconds server + set_yuma_3_weights( + netuid, + vec![ + vec![u16::MAX / 2, 0, u16::MAX / 2], + vec![0, u16::MAX / 2, u16::MAX / 2], + vec![u16::MAX / 2, 0, u16::MAX / 2], + ], + vec![3, 4, 5], + ); + } + 2 => { + // big validator follows + set_yuma_3_weights( + netuid, + vec![ + vec![0, u16::MAX / 2, u16::MAX / 2], + vec![0, u16::MAX / 2, u16::MAX / 2], + vec![u16::MAX / 2, 0, u16::MAX / 2], + ], + vec![3, 4, 5], + ); + } + 3 => { + // Subsequent epochs all validators have moves + set_yuma_3_weights( + netuid, + vec![vec![0, u16::MAX / 2, u16::MAX / 2]; 3], + vec![3, 4, 5], + ); + } + _ => {} + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_bonds_reset() { + new_test_ext(1).execute_with(|| { + let sparse: bool = true; + let n: u16 = 5; // 3 validators, 2 servers + let netuid: u16 = 1; + let max_stake: u64 = 8; + + // "Case 8 - big vali moves late, then late" + // Big dishonest lazy vali. (0.8) + // Small eager-eager vali. (0.1) + // Small eager-eager vali 2. (0.1) + let stakes: Vec = vec![8, 1, 1, 0, 0]; + + setup_yuma_3_scenario(netuid, n, sparse, max_stake, stakes); + SubtensorModule::set_bonds_reset(netuid, true); + + // target bonds and dividends for specific epoch + let targets_dividends: std::collections::HashMap<_, _> = [ + (0, vec![0.8000, 0.1000, 0.1000, 0.0000, 0.0000]), + (1, vec![0.8944, 0.0528, 0.0528, 0.0000, 0.0000]), + (2, vec![0.5230, 0.2385, 0.2385, 0.0000, 0.0000]), + (19, vec![0.7919, 0.1040, 0.1040, 0.0000, 0.0000]), + (20, vec![0.7928, 0.1036, 0.1036, 0.0000, 0.0000]), + (21, vec![0.8467, 0.0766, 0.0766, 0.0000, 0.0000]), + (40, vec![0.7928, 0.1036, 0.1036, 0.0000, 0.0000]), + ] + .into_iter() + .collect(); + let targets_bonds: std::collections::HashMap<_, _> = [ + ( + 0, + vec![ + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + ], + ), + ( + 1, + vec![ + vec![0.1924, 0.0000], + vec![0.0908, 0.2987], + vec![0.0908, 0.2987], + ], + ), + ( + 2, + vec![ + vec![0.1715, 0.1013], + vec![0.0815, 0.3697], + vec![0.0815, 0.3697], + ], + ), + ( + 19, + vec![ + vec![0.0269, 0.8539], + vec![0.0131, 0.8975], + vec![0.0131, 0.8975], + ], + ), + ( + 20, + vec![ + vec![0.0000, 0.8687], + vec![0.0000, 0.9079], + vec![0.0000, 0.9079], + ], + ), + ( + 21, + vec![ + vec![0.0000, 0.8820], + vec![0.2987, 0.6386], + vec![0.2987, 0.6386], + ], + ), + ( + 40, + vec![ + vec![0.8687, 0.0578], + vec![0.9079, 0.0523], + vec![0.9079, 0.0523], + ], + ), + ] + .into_iter() + .collect(); + + for epoch in 0..=40 { + match epoch { + 0 => { + // All validators -> Server 1 + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + 1 => { + // validators B, C switch + // Validator A -> Server 1 + // Validator B -> Server 2 + // Validator C -> Server 2 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![0, u16::MAX], vec![0, u16::MAX]], + vec![3, 4], + ); + } + (2..=20) => { + // validator A copies weights + // All validators -> Server 2 + set_yuma_3_weights(netuid, vec![vec![0, u16::MAX]; 3], vec![3, 4]); + if epoch == 20 { + let hotkey = SubtensorModule::get_hotkey_for_net_and_uid(netuid, 3) + .expect("Hotkey not found"); + let _ = SubtensorModule::do_reset_bonds(netuid, &hotkey); + } + } + 21 => { + // validators B, C switch back + // Validator A -> Server 2 + // Validator B -> Server 1 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![0, u16::MAX], vec![u16::MAX, 0], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + _ => { + // validator A copies weights + // All validators -> Server 1 + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + }; + + if let Some((target_dividend, target_bond)) = + targets_dividends.get(&epoch).zip(targets_bonds.get(&epoch)) + { + run_epoch_and_check_bonds_dividends(netuid, sparse, target_bond, target_dividend); + } else { + run_epoch(netuid, sparse); + } + } + }) } diff --git a/pallets/subtensor/src/tests/evm.rs b/pallets/subtensor/src/tests/evm.rs index bdd55c1961..fd0ea51061 100644 --- a/pallets/subtensor/src/tests/evm.rs +++ b/pallets/subtensor/src/tests/evm.rs @@ -21,6 +21,14 @@ fn public_to_evm_key(pubkey: &ecdsa::Public) -> H160 { H160::from(address) } +fn sign_evm_message>(pair: &ecdsa::Pair, message: M) -> ecdsa::Signature { + let hash = SubtensorModule::hash_message_eip191(message); + let mut sig = pair.sign_prehashed(&hash); + // Adjust the v value to either 27 or 28 + sig.0[64] += 27; + sig +} + #[test] fn test_associate_evm_key_success() { new_test_ext(1).execute_with(|| { @@ -47,8 +55,7 @@ fn test_associate_evm_key_success() { let mut message = [0u8; 64]; message[..32].copy_from_slice(hotkey_bytes.as_ref()); message[32..].copy_from_slice(hashed_block_number.as_ref()); - let hashed_message = keccak_256(message.as_ref()); - let signature = pair.sign_prehashed(&hashed_message); + let signature = sign_evm_message(&pair, message); assert_ok!(SubtensorModule::associate_evm_key( RuntimeOrigin::signed(coldkey), @@ -94,11 +101,8 @@ fn test_associate_evm_key_different_block_number_success() { let hashed_block_number = keccak_256(block_number.encode().as_ref()); let hotkey_bytes = hotkey.encode(); - let mut message = [0u8; 64]; - message[..32].copy_from_slice(hotkey_bytes.as_ref()); - message[32..].copy_from_slice(hashed_block_number.as_ref()); - let hashed_message = keccak_256(message.as_ref()); - let signature = pair.sign_prehashed(&hashed_message); + let message = [hotkey_bytes.as_ref(), hashed_block_number.as_ref()].concat(); + let signature = sign_evm_message(&pair, message); assert_ok!(SubtensorModule::associate_evm_key( RuntimeOrigin::signed(coldkey), @@ -141,11 +145,8 @@ fn test_associate_evm_key_coldkey_does_not_own_hotkey() { let hashed_block_number = keccak_256(block_number.encode().as_ref()); let hotkey_bytes = hotkey.encode(); - let mut message = [0u8; 64]; - message[..32].copy_from_slice(hotkey_bytes.as_ref()); - message[32..].copy_from_slice(hashed_block_number.as_ref()); - let hashed_message = keccak_256(message.as_ref()); - let signature = pair.sign_prehashed(&hashed_message); + let message = [hotkey_bytes.as_ref(), hashed_block_number.as_ref()].concat(); + let signature = sign_evm_message(&pair, message); assert_err!( SubtensorModule::associate_evm_key( @@ -182,11 +183,8 @@ fn test_associate_evm_key_hotkey_not_registered_in_subnet() { let hashed_block_number = keccak_256(block_number.encode().as_ref()); let hotkey_bytes = hotkey.encode(); - let mut message = [0u8; 64]; - message[..32].copy_from_slice(hotkey_bytes.as_ref()); - message[32..].copy_from_slice(hashed_block_number.as_ref()); - let hashed_message = keccak_256(message.as_ref()); - let signature = pair.sign_prehashed(&hashed_message); + let message = [hotkey_bytes.as_ref(), hashed_block_number.as_ref()].concat(); + let signature = sign_evm_message(&pair, message); assert_err!( SubtensorModule::associate_evm_key( @@ -225,9 +223,7 @@ fn test_associate_evm_key_using_wrong_hash_function() { let hashed_block_number = keccak_256(block_number.encode().as_ref()); let hotkey_bytes = hotkey.encode(); - let mut message = [0u8; 64]; - message[..32].copy_from_slice(hotkey_bytes.as_ref()); - message[32..].copy_from_slice(hashed_block_number.as_ref()); + let message = [hotkey_bytes.as_ref(), hashed_block_number.as_ref()].concat(); let hashed_message = blake2_256(message.as_ref()); let signature = pair.sign_prehashed(&hashed_message); diff --git a/pallets/subtensor/src/tests/math.rs b/pallets/subtensor/src/tests/math.rs index c70da2c9d2..01e02742b7 100644 --- a/pallets/subtensor/src/tests/math.rs +++ b/pallets/subtensor/src/tests/math.rs @@ -1221,42 +1221,59 @@ fn test_math_vec_mask_sparse_matrix() { } #[test] -fn test_math_scalar_vec_mask_sparse_matrix() { - let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let target: Vec = vec![0., 2., 3., 0., 5., 6., 0., 8., 9.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let scalar: u64 = 1; - let masking_vector: Vec = vec![1, 4, 7]; - let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a == b); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); +fn test_math_vec_mul() { + let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); + let target: Vec = vec_to_fixed(&[1., 4., 9., 16.]); + let result = vec_mul(&vector, &vector); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let vector_empty: Vec = vec_to_fixed(&[]); + let result = vec_mul(&vector_empty, &vector); + let target: Vec = vec![]; + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let vector_zero: Vec = vec_to_fixed(&[0., 0., 0., 0., 0., 0., 0., 0.]); + let result = vec_mul(&vector_zero, &vector); + let target: Vec = vec![I32F32::from_num(0); 4]; + assert_vec_compare(&result, &target, I32F32::from_num(0)); +} - let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let target: Vec = vec![1., 2., 0., 4., 5., 0., 7., 8., 0.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let scalar: u64 = 5; - let masking_vector: Vec = vec![1, 4, 7]; - let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a <= b); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); +#[test] +fn test_math_mat_vec_mul() { + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_mat_fixed(&matrix, 4, false); + let vector: Vec = vec_to_fixed(&[1., 2., 3.]); + let target: Vec = vec![1., 4., 9., 4., 10., 18., 7., 16., 27., 10., 22., 36.]; + let target = vec_to_mat_fixed(&target, 4, false); + let result = mat_vec_mul(&matrix, &vector); + assert_mat_compare(&result, &target, I32F32::from_num(0)); + let vector_one: Vec = vec_to_fixed(&[1., 0., 0.]); + let target: Vec = vec![1., 0., 0., 4., 0., 0., 7., 0., 0., 10., 0., 0.]; + let target = vec_to_mat_fixed(&target, 4, false); + let result = mat_vec_mul(&matrix, &vector_one); + assert_mat_compare(&result, &target, I32F32::from_num(0)); + let vector_empty: Vec = vec_to_fixed(&[]); + let result = mat_vec_mul(&matrix, &vector_empty); + let target: Vec> = vec![vec![]; 4]; + assert_mat_compare(&result, &target, I32F32::from_num(0)); +} - let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let target: Vec = vec![0., 0., 3., 0., 0., 6., 0., 0., 9.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let scalar: u64 = 5; - let masking_vector: Vec = vec![1, 4, 7]; - let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a >= b); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); +#[test] +fn test_math_mat_vec_mul_sparse() { + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let vector: Vec = vec_to_fixed(&[1., 2., 3.]); + let target: Vec = vec![1., 4., 9., 4., 10., 18., 7., 16., 27., 10., 22., 36.]; + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = mat_vec_mul_sparse(&matrix, &vector); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); + let vector_one: Vec = vec_to_fixed(&[1., 0., 0.]); + let target: Vec = vec![1., 0., 0., 4., 0., 0., 7., 0., 0., 10., 0., 0.]; + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = mat_vec_mul_sparse(&matrix, &vector_one); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); + let vector_empty: Vec = vec_to_fixed(&[]); + let result = mat_vec_mul_sparse(&matrix, &vector_empty); + let target = vec![vec![]; 4]; + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); } #[test] @@ -2133,89 +2150,116 @@ fn test_math_hadamard_sparse() { } #[test] -fn test_math_mat_ema() { - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let new: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., +fn test_math_mat_ema_alpha() { + let old: Vec = vec![ + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, ]; + let new: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; let target: Vec = vec![ - 1.9, 3.8, 5.7, 7.6, 9.5, 11.4, 13.3, 15.2, 17.1, 19., 20.9, 22.8, + 0.19, 0.38, 1., 0.436, 0.545, 0.6539, 0.763, 0.8719, 0.981, 1., 1., 1., ]; + let old = vec_to_mat_fixed(&old, 4, false); let new = vec_to_mat_fixed(&new, 4, false); let target = vec_to_mat_fixed(&target, 4, false); - let result = mat_ema(&new, &old, I32F32::from_num(0.1)); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha(&new, &old, &alphas); + assert_mat_compare(&result, &target, I32F32::from_num(1e-4)); + let old: Vec = vec![ + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, + ]; let new: Vec = vec![ 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., ]; - let target: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let target: Vec = vec![ + 0.10, 0.2, 1., 0.0399, 0.05, 0.0599, 0.07, 0.07999, 0.09, 0.1, 0.10999, 0.11999, + ]; let old = vec_to_mat_fixed(&old, 4, false); let new = vec_to_mat_fixed(&new, 4, false); let target = vec_to_mat_fixed(&target, 4, false); - let result = mat_ema(&new, &old, I32F32::from_num(0)); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let alphas = vec_to_mat_fixed(&[0.; 12], 4, false); + let result = mat_ema_alpha(&new, &old, &alphas); + assert_mat_compare(&result, &target, I32F32::from_num(1e-4)); + let old: Vec = vec![ + 0.001, 0.002, 0.003, 0.004, 0.05, 0.006, 0.007, 0.008, 0.009, 0.010, 0.011, 0.012, + ]; let new: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, ]; let target: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., + 0.10, 0.2, 1., 0.0399, 0.05, 0.0599, 0.07, 0.07999, 0.09, 0.1, 0.10999, 0.11999, ]; + let old = vec_to_mat_fixed(&old, 4, false); let new = vec_to_mat_fixed(&new, 4, false); let target = vec_to_mat_fixed(&target, 4, false); - let result = mat_ema(&new, &old, I32F32::from_num(1)); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let alphas = vec_to_mat_fixed(&[1.; 12], 4, false); + let result = mat_ema_alpha(&new, &old, &alphas); + assert_mat_compare(&result, &target, I32F32::from_num(1e-4)); } #[test] -fn test_math_sparse_mat_ema() { - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let new: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., +fn test_math_sparse_mat_ema_alpha() { + let old: Vec = vec![ + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, ]; + let new: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; let target: Vec = vec![ - 1.9, 3.8, 5.7, 7.6, 9.5, 11.4, 13.3, 15.2, 17.1, 19., 20.9, 22.8, + 0.19, 0.38, 1., 0.43599, 0.545, 0.65399, 0.763, 0.87199, 0.981, 1., 1., 1., ]; let old = vec_to_sparse_mat_fixed(&old, 4, false); let new = vec_to_sparse_mat_fixed(&new, 4, false); let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; - let new: Vec = vec![10., 20., 0., 40., 0., 60., 0., 80., 90., 100., 110., 120.]; - let target: Vec = vec![1., 3.8, 2.7, 7.6, 0., 11.4, 6.3, 15.2, 9., 19., 20.9, 22.8]; + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha_sparse(&new, &old, &alphas); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(1e-4)); + let old: Vec = vec![ + 0.001, 0.002, 0.003, 0.004, 0.05, 0.006, 0.007, 0.008, 0.009, 0.010, 0.011, 0.012, + ]; + let new: Vec = vec![ + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, + ]; + let target: Vec = vec![ + 0.0109, 0.0218, 0.30270, 0.007599, 0.05, 0.01139, 0.0133, 0.01519, 0.017, 0.01899, 0.02089, + 0.0227, + ]; let old = vec_to_sparse_mat_fixed(&old, 4, false); let new = vec_to_sparse_mat_fixed(&new, 4, false); let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha_sparse(&new, &old, &alphas); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(1e-4)); let old: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let new: Vec = vec![10., 20., 0., 40., 0., 60., 0., 80., 90., 100., 110., 120.]; - let target: Vec = vec![1., 2., 0., 4., 0., 6., 0., 8., 9., 10., 11., 12.]; + let new: Vec = vec![ + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, + ]; + let target: Vec = vec![ + 0.01, 0.02, 0.3, 0.00399, 0.005, 0.00599, 0.007, 0.00799, 0.009, 0.01, 0.011, 0.01199, + ]; let old = vec_to_sparse_mat_fixed(&old, 4, false); let new = vec_to_sparse_mat_fixed(&new, 4, false); let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha_sparse(&new, &old, &alphas); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(1e-4)); let old: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; let new: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; let old = vec_to_sparse_mat_fixed(&old, 4, false); let new = vec_to_sparse_mat_fixed(&new, 4, false); let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha_sparse(&new, &old, &alphas); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(1e-4)); let old: Vec = vec![1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; let new: Vec = vec![0., 0., 0., 0., 2., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0.9, 0., 0., 0., 0.2, 0., 0., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0.0, 0., 0., 0., 0.2, 0., 0., 0., 0., 0., 0., 0.]; let old = vec_to_sparse_mat_fixed(&old, 4, false); let new = vec_to_sparse_mat_fixed(&new, 4, false); let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha_sparse(&new, &old, &alphas); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(1e-1)); } #[test] @@ -2505,25 +2549,25 @@ fn test_checked_sum() { } #[test] -fn test_mat_ema_alpha_vec_sparse_empty() { +fn test_mat_ema_alpha_sparse_empty() { let new: Vec> = Vec::new(); let old: Vec> = Vec::new(); - let alpha: Vec = Vec::new(); - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + let alpha: Vec> = Vec::new(); + let result = mat_ema_alpha_sparse(&new, &old, &alpha); assert_eq!(result, Vec::>::new()); } #[test] -fn test_mat_ema_alpha_vec_sparse_single_element() { +fn test_mat_ema_alpha_sparse_single_element() { let new: Vec> = vec![vec![(0, I32F32::from_num(1.0))]]; let old: Vec> = vec![vec![(0, I32F32::from_num(2.0))]]; - let alpha: Vec = vec![I32F32::from_num(0.5)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); - assert_eq!(result, vec![vec![(0, I32F32::from_num(1.5))]]); + let alpha = vec![vec![I32F32::from_num(0.5)]]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); + assert_eq!(result, vec![vec![(0, I32F32::from_num(1.0))]]); } #[test] -fn test_mat_ema_alpha_vec_sparse_multiple_elements() { +fn test_mat_ema_alpha_sparse_multiple_elements() { let new: Vec> = vec![ vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(2.0))], vec![(0, I32F32::from_num(3.0)), (1, I32F32::from_num(4.0))], @@ -2532,35 +2576,35 @@ fn test_mat_ema_alpha_vec_sparse_multiple_elements() { vec![(0, I32F32::from_num(5.0)), (1, I32F32::from_num(6.0))], vec![(0, I32F32::from_num(7.0)), (1, I32F32::from_num(8.0))], ]; - let alpha: Vec = vec![I32F32::from_num(0.1), I32F32::from_num(0.2)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + let alpha = vec![vec![I32F32::from_num(0.1), I32F32::from_num(0.2)]; 2]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); let expected = vec![ - vec![(0, I32F32::from_num(4.6)), (1, I32F32::from_num(5.2))], - vec![(0, I32F32::from_num(6.6)), (1, I32F32::from_num(7.2))], + vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(1.0))], + vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(1.0))], ]; assert_sparse_mat_compare(&result, &expected, I32F32::from_num(0.000001)); } #[test] -fn test_mat_ema_alpha_vec_sparse_zero_alpha() { +fn test_mat_ema_alpha_sparse_zero_alpha() { let new: Vec> = vec![vec![(0, I32F32::from_num(1.0))]]; let old: Vec> = vec![vec![(0, I32F32::from_num(2.0))]]; - let alpha: Vec = vec![I32F32::from_num(0.0)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); - assert_eq!(result, vec![vec![(0, I32F32::from_num(2.0))]]); + let alpha = vec![vec![I32F32::from_num(0.1), I32F32::from_num(0.0)]]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); + assert_eq!(result, vec![vec![(0, I32F32::from_num(1.0))]]); } #[test] -fn test_mat_ema_alpha_vec_sparse_one_alpha() { +fn test_mat_ema_alpha_sparse_one_alpha() { let new: Vec> = vec![vec![(0, I32F32::from_num(1.0))]]; let old: Vec> = vec![vec![(0, I32F32::from_num(2.0))]]; - let alpha: Vec = vec![I32F32::from_num(1.0)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + let alpha = vec![vec![I32F32::from_num(1.0), I32F32::from_num(0.0)]]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); assert_eq!(result, vec![vec![(0, I32F32::from_num(1.0))]]); } #[test] -fn test_mat_ema_alpha_vec_sparse_mixed_alpha() { +fn test_mat_ema_alpha_sparse_mixed_alpha() { let new: Vec> = vec![ vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(2.0))], vec![(0, I32F32::from_num(3.0)), (1, I32F32::from_num(4.0))], @@ -2569,20 +2613,20 @@ fn test_mat_ema_alpha_vec_sparse_mixed_alpha() { vec![(0, I32F32::from_num(5.0)), (1, I32F32::from_num(6.0))], vec![(0, I32F32::from_num(7.0)), (1, I32F32::from_num(8.0))], ]; - let alpha: Vec = vec![I32F32::from_num(0.3), I32F32::from_num(0.7)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + let alpha = vec![vec![I32F32::from_num(0.3), I32F32::from_num(0.7)]; 2]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); assert_sparse_mat_compare( &result, &[ - vec![(0, I32F32::from_num(3.8)), (1, I32F32::from_num(3.2))], - vec![(0, I32F32::from_num(5.8)), (1, I32F32::from_num(5.2))], + vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(1.0))], + vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(1.0))], ], I32F32::from_num(0.000001), ); } #[test] -fn test_mat_ema_alpha_vec_sparse_sparse_matrix() { +fn test_mat_ema_alpha_sparse_sparse_matrix() { let new: Vec> = vec![ vec![(0, I32F32::from_num(1.0))], vec![(1, I32F32::from_num(4.0))], @@ -2591,77 +2635,114 @@ fn test_mat_ema_alpha_vec_sparse_sparse_matrix() { vec![(0, I32F32::from_num(5.0))], vec![(1, I32F32::from_num(8.0))], ]; - let alpha: Vec = vec![I32F32::from_num(0.5), I32F32::from_num(0.5)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + let alpha = vec![vec![I32F32::from_num(0.5), I32F32::from_num(0.5)]; 2]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); assert_eq!( result, vec![ - vec![(0, I32F32::from_num(3.0))], - vec![(1, I32F32::from_num(6.0))] + vec![(0, I32F32::from_num(1.0))], + vec![(1, I32F32::from_num(1.0))] ] ); } #[test] -fn test_mat_ema_alpha_vec_basic() { +fn test_mat_ema_alpha_basic() { let new = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); let old = mat_to_fixed(&[vec![0.5, 1.5, 2.5], vec![3.5, 4.5, 5.5]]); let alpha = vec![ - I32F32::from_num(0.5), - I32F32::from_num(0.5), - I32F32::from_num(0.5), + vec![ + I32F32::from_num(0.5), + I32F32::from_num(0.5), + I32F32::from_num(0.5), + ]; + 2 ]; - let expected = mat_to_fixed(&[vec![0.75, 1.75, 2.75], vec![3.75, 4.75, 5.75]]); - let result = mat_ema_alpha_vec(&new, &old, &alpha); + let expected = mat_to_fixed(&[vec![0.75, 1.0, 1.0], vec![1.0, 1.0, 1.0]]); + let result = mat_ema_alpha(&new, &old, &alpha); assert_eq!(result, expected); } #[test] -fn test_mat_ema_alpha_vec_varying_alpha() { +fn test_mat_ema_alpha_varying_alpha() { let new = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); let old = mat_to_fixed(&[vec![0.5, 1.5, 2.5], vec![3.5, 4.5, 5.5]]); let alpha = vec![ - I32F32::from_num(0.2), - I32F32::from_num(0.5), - I32F32::from_num(0.8), + vec![ + I32F32::from_num(0.2), + I32F32::from_num(0.5), + I32F32::from_num(0.8), + ]; + 2 ]; - let expected = mat_to_fixed(&[vec![0.6, 1.75, 2.9], vec![3.6, 4.75, 5.9]]); - let result = mat_ema_alpha_vec(&new, &old, &alpha); + let expected = mat_to_fixed(&[vec![0.6, 1.0, 1.0], vec![1.0, 1.0, 1.0]]); + let result = mat_ema_alpha(&new, &old, &alpha); assert_mat_approx_eq(&result, &expected, I32F32::from_num(1e-6)); } #[test] -fn test_mat_ema_alpha_vec_empty_matrices() { +fn test_mat_ema_alpha_sparse_varying_alpha() { + let weights = vec![ + vec![(0, I32F32::from_num(0.1)), (1, I32F32::from_num(0.2))], + vec![(0, I32F32::from_num(0.3)), (1, I32F32::from_num(0.4))], + ]; + let bonds = vec![ + vec![(0, I32F32::from_num(0.5)), (1, I32F32::from_num(0.6))], + vec![(0, I32F32::from_num(0.7)), (1, I32F32::from_num(0.8))], + ]; + let alpha = vec![ + vec![I32F32::from_num(0.9), I32F32::from_num(0.8)], + vec![I32F32::from_num(0.5), I32F32::from_num(0.7)], + ]; + + let expected = vec![ + vec![(0, I32F32::from_num(0.14)), (1, I32F32::from_num(0.28))], + vec![ + (0, I32F32::from_num(0.499999)), + (1, I32F32::from_num(0.519999)), + ], + ]; + + let result = mat_ema_alpha_sparse(&weights, &bonds, &alpha); + // Assert the results with an epsilon for approximate equality + assert_sparse_mat_compare(&result, &expected, I32F32::from_num(1e-6)); +} + +#[test] +fn test_mat_ema_alpha_empty_matrices() { let new: Vec> = vec![]; let old: Vec> = vec![]; - let alpha: Vec = vec![]; + let alpha = vec![]; let expected: Vec> = vec![vec![]; 1]; - let result = mat_ema_alpha_vec(&new, &old, &alpha); + let result = mat_ema_alpha(&new, &old, &alpha); assert_eq!(result, expected); } #[test] -fn test_mat_ema_alpha_vec_single_element() { +fn test_mat_ema_alpha_single_element() { let new = mat_to_fixed(&[vec![1.0]]); let old = mat_to_fixed(&[vec![0.5]]); - let alpha = vec![I32F32::from_num(0.5)]; + let alpha = vec![vec![I32F32::from_num(0.5)]]; let expected = mat_to_fixed(&[vec![0.75]]); - let result = mat_ema_alpha_vec(&new, &old, &alpha); + let result = mat_ema_alpha(&new, &old, &alpha); assert_eq!(result, expected); } // TODO: (@sd): Should these be non panicking? #[test] #[should_panic(expected = "assertion failed")] -fn test_mat_ema_alpha_vec_mismatched_dimensions() { +fn test_mat_ema_alpha_mismatched_dimensions() { let new = mat_to_fixed(&[vec![1.0, 2.0], vec![3.0, 4.0]]); let old = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); let alpha = vec![ - I32F32::from_num(0.5), - I32F32::from_num(0.5), - I32F32::from_num(0.5), + vec![ + I32F32::from_num(0.5), + I32F32::from_num(0.5), + I32F32::from_num(0.5), + ]; + 2 ]; - let _result = mat_ema_alpha_vec(&new, &old, &alpha); + let _result = mat_ema_alpha(&new, &old, &alpha); } #[test] diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index 5efc4f152a..1dfac06ad5 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -12,6 +12,7 @@ use frame_support::{ weights::Weight, }; +use crate::migrations::migrate_storage; use frame_system::Config; use sp_core::{H256, U256, crypto::Ss58Codec}; use sp_io::hashing::twox_128; @@ -439,6 +440,32 @@ fn test_migrate_set_first_emission_block_number() { }); } +#[test] +fn test_migrate_set_subtoken_enable() { + new_test_ext(1).execute_with(|| { + let netuids: [u16; 3] = [1, 2, 3]; + let block_number = 100; + for netuid in netuids.iter() { + add_network(*netuid, 1, 0); + } + + let new_netuid = 4; + add_network_without_emission_block(new_netuid, 1, 0); + + let weight = + crate::migrations::migrate_set_subtoken_enabled::migrate_set_subtoken_enabled::(); + + let expected_weight: Weight = ::DbWeight::get().reads(1) + + ::DbWeight::get().writes(netuids.len() as u64 + 2); + assert_eq!(weight, expected_weight); + + for netuid in netuids.iter() { + assert!(SubtokenEnabled::::get(netuid)); + } + assert!(!SubtokenEnabled::::get(new_netuid)); + }); +} + #[test] fn test_migrate_remove_zero_total_hotkey_alpha() { new_test_ext(1).execute_with(|| { @@ -555,3 +582,241 @@ fn test_migrate_revealed_commitments() { assert!(!weight.is_zero(), "Migration weight should be non-zero"); }); } + +#[test] +fn test_migrate_remove_total_hotkey_coldkey_stakes_this_interval() { + new_test_ext(1).execute_with(|| { + const MIGRATION_NAME: &str = "migrate_remove_total_hotkey_coldkey_stakes_this_interval"; + + let pallet_name = twox_128(b"SubtensorModule"); + let storage_name = twox_128(b"TotalHotkeyColdkeyStakesThisInterval"); + let prefix = [pallet_name, storage_name].concat(); + + // Set up 200 000 entries to be deleted. + for i in 0..200_000{ + let hotkey = U256::from(i as u64); + let coldkey = U256::from(i as u64); + let key = [prefix.clone(), hotkey.encode(), coldkey.encode()].concat(); + let value = (100 + i, 200 + i); + put_raw(&key, &value.encode()); + } + + assert!(frame_support::storage::unhashed::contains_prefixed_key(&prefix), "Entries should exist before migration."); + assert!( + !HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should not have run yet." + ); + + // Run migration + let weight = crate::migrations::migrate_remove_total_hotkey_coldkey_stakes_this_interval::migrate_remove_total_hotkey_coldkey_stakes_this_interval::(); + + assert!(!frame_support::storage::unhashed::contains_prefixed_key(&prefix), "All entries should have been removed."); + assert!( + HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should be marked as run." + ); + assert!(!weight.is_zero(),"Migration weight should be non-zero."); + }); +} +fn test_migrate_remove_last_hotkey_coldkey_emission_on_netuid() { + const MIGRATION_NAME: &str = "migrate_remove_last_hotkey_coldkey_emission_on_netuid"; + let pallet_name = "SubtensorModule"; + let storage_name = "LastHotkeyColdkeyEmissionOnNetuid"; + let migration = crate::migrations::migrate_orphaned_storage_items::remove_last_hotkey_coldkey_emission_on_netuid::; + + test_remove_storage_item( + MIGRATION_NAME, + pallet_name, + storage_name, + migration, + 200_000, + ); +} +#[test] +fn test_migrate_remove_subnet_alpha_emission_sell() { + const MIGRATION_NAME: &str = "migrate_remove_subnet_alpha_emission_sell"; + let pallet_name = "SubtensorModule"; + let storage_name = "SubnetAlphaEmissionSell"; + let migration = + crate::migrations::migrate_orphaned_storage_items::remove_subnet_alpha_emission_sell::; + + test_remove_storage_item( + MIGRATION_NAME, + pallet_name, + storage_name, + migration, + 200_000, + ); +} + +#[test] +fn test_migrate_remove_neurons_to_prune_at_next_epoch() { + const MIGRATION_NAME: &str = "migrate_remove_neurons_to_prune_at_next_epoch"; + let pallet_name = "SubtensorModule"; + let storage_name = "NeuronsToPruneAtNextEpoch"; + let migration = + crate::migrations::migrate_orphaned_storage_items::remove_neurons_to_prune_at_next_epoch::< + Test, + >; + + test_remove_storage_item( + MIGRATION_NAME, + pallet_name, + storage_name, + migration, + 200_000, + ); +} + +#[test] +fn test_migrate_remove_total_stake_at_dynamic() { + const MIGRATION_NAME: &str = "migrate_remove_total_stake_at_dynamic"; + let pallet_name = "SubtensorModule"; + let storage_name = "TotalStakeAtDynamic"; + let migration = + crate::migrations::migrate_orphaned_storage_items::remove_total_stake_at_dynamic::; + + test_remove_storage_item( + MIGRATION_NAME, + pallet_name, + storage_name, + migration, + 200_000, + ); +} + +#[test] +fn test_migrate_remove_subnet_name() { + const MIGRATION_NAME: &str = "migrate_remove_subnet_name"; + let pallet_name = "SubtensorModule"; + let storage_name = "SubnetName"; + let migration = crate::migrations::migrate_orphaned_storage_items::remove_subnet_name::; + + test_remove_storage_item( + MIGRATION_NAME, + pallet_name, + storage_name, + migration, + 200_000, + ); +} + +#[test] +fn test_migrate_remove_network_min_allowed_uids() { + const MIGRATION_NAME: &str = "migrate_remove_network_min_allowed_uids"; + let pallet_name = "SubtensorModule"; + let storage_name = "NetworkMinAllowedUids"; + let migration = + crate::migrations::migrate_orphaned_storage_items::remove_network_min_allowed_uids::; + + test_remove_storage_item(MIGRATION_NAME, pallet_name, storage_name, migration, 1); +} + +#[test] +fn test_migrate_remove_dynamic_block() { + const MIGRATION_NAME: &str = "migrate_remove_dynamic_block"; + let pallet_name = "SubtensorModule"; + let storage_name = "DynamicBlock"; + let migration = crate::migrations::migrate_orphaned_storage_items::remove_dynamic_block::; + + test_remove_storage_item(MIGRATION_NAME, pallet_name, storage_name, migration, 1); +} + +#[allow(clippy::arithmetic_side_effects)] +fn test_remove_storage_item Weight>( + migration_name: &'static str, + pallet_name: &'static str, + storage_name: &'static str, + migration: F, + test_entries_number: i32, +) { + new_test_ext(1).execute_with(|| { + let pallet_name = twox_128(pallet_name.as_bytes()); + let storage_name = twox_128(storage_name.as_bytes()); + let prefix = [pallet_name, storage_name].concat(); + + // Set up entries to be deleted. + for i in 0..test_entries_number { + let hotkey = U256::from(i as u64); + let coldkey = U256::from(i as u64); + let key = [prefix.clone(), hotkey.encode(), coldkey.encode()].concat(); + let value = (100 + i, 200 + i); + put_raw(&key, &value.encode()); + } + + assert!( + frame_support::storage::unhashed::contains_prefixed_key(&prefix), + "Entries should exist before migration." + ); + assert!( + !HasMigrationRun::::get(migration_name.as_bytes().to_vec()), + "Migration should not have run yet." + ); + + // Run migration + let weight = migration(); + + assert!( + !frame_support::storage::unhashed::contains_prefixed_key(&prefix), + "All entries should have been removed." + ); + assert!( + HasMigrationRun::::get(migration_name.as_bytes().to_vec()), + "Migration should be marked as run." + ); + assert!(!weight.is_zero(), "Migration weight should be non-zero."); + }); +} + +#[test] +fn test_migrate_remove_commitments_rate_limit() { + new_test_ext(1).execute_with(|| { + // ------------------------------ + // Step 1: Simulate Old Storage Entry + // ------------------------------ + const MIGRATION_NAME: &str = "migrate_remove_commitments_rate_limit"; + + // Build the raw storage key: twox128("Commitments") ++ twox128("RateLimit") + let pallet_prefix = twox_128("Commitments".as_bytes()); + let storage_prefix = twox_128("RateLimit".as_bytes()); + + let mut key = Vec::new(); + key.extend_from_slice(&pallet_prefix); + key.extend_from_slice(&storage_prefix); + + let original_value: u64 = 123; + put_raw(&key, &original_value.encode()); + + let stored_before = get_raw(&key).expect("Expected RateLimit to exist"); + assert_eq!( + u64::decode(&mut &stored_before[..]).expect("Failed to decode RateLimit"), + original_value + ); + + assert!( + !HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should not have run yet" + ); + + // ------------------------------ + // Step 2: Run the Migration + // ------------------------------ + let weight = crate::migrations::migrate_remove_commitments_rate_limit:: + migrate_remove_commitments_rate_limit::(); + + assert!( + HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should be marked as completed" + ); + + // ------------------------------ + // Step 3: Verify Migration Effects + // ------------------------------ + assert!( + get_raw(&key).is_none(), + "RateLimit storage should have been cleared" + ); + + assert!(!weight.is_zero(), "Migration weight should be non-zero"); + }); +} diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 4cc16ce40a..d5d302d5c1 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -155,6 +155,7 @@ parameter_types! { pub const TransactionByteFee: Balance = 100; pub const SDebug:u64 = 1; pub const InitialRho: u16 = 30; + pub const InitialAlphaSigmoidSteepness: u16 = 10; pub const InitialKappa: u16 = 32_767; pub const InitialTempo: u16 = 360; pub const SelfOwnership: u64 = 2; @@ -162,6 +163,7 @@ parameter_types! { pub const InitialMaxAllowedUids: u16 = 2; pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialBondsPenalty:u16 = u16::MAX; + pub const InitialBondsResetOn: bool = false; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; pub const InitialDefaultDelegateTake: u16 = 11_796; // 18%, same as in production @@ -203,8 +205,10 @@ parameter_types! { pub const InitialAlphaHigh: u16 = 58982; // Represents 0.9 as per the production default pub const InitialAlphaLow: u16 = 45875; // Represents 0.7 as per the production default pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn + pub const InitialYuma3On: bool = false; // Default value for Yuma3On // pub const InitialNetworkMaxStake: u64 = u64::MAX; // (DEPRECATED) pub const InitialColdkeySwapScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days + pub const InitialColdkeySwapRescheduleDuration: u64 = 24 * 60 * 60 / 12; // Default as 1 day pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days pub const InitialTaoWeight: u64 = 0; // 100% global weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks @@ -389,6 +393,7 @@ impl crate::Config for Test { type InitialAdjustmentAlpha = InitialAdjustmentAlpha; type InitialTargetRegistrationsPerInterval = InitialTargetRegistrationsPerInterval; type InitialRho = InitialRho; + type InitialAlphaSigmoidSteepness = InitialAlphaSigmoidSteepness; type InitialKappa = InitialKappa; type InitialMaxAllowedUids = InitialMaxAllowedUids; type InitialValidatorPruneLen = InitialValidatorPruneLen; @@ -399,6 +404,7 @@ impl crate::Config for Test { type InitialPruningScore = InitialPruningScore; type InitialBondsMovingAverage = InitialBondsMovingAverage; type InitialBondsPenalty = InitialBondsPenalty; + type InitialBondsResetOn = InitialBondsResetOn; type InitialMaxAllowedValidators = InitialMaxAllowedValidators; type InitialDefaultDelegateTake = InitialDefaultDelegateTake; type InitialMinDelegateTake = InitialMinDelegateTake; @@ -427,8 +433,10 @@ impl crate::Config for Test { type AlphaHigh = InitialAlphaHigh; type AlphaLow = InitialAlphaLow; type LiquidAlphaOn = InitialLiquidAlphaOn; + type Yuma3On = InitialYuma3On; type Preimages = Preimage; type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; + type InitialColdkeySwapRescheduleDuration = InitialColdkeySwapRescheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; @@ -528,7 +536,6 @@ where impl pallet_drand::Config for Test { type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_drand::weights::SubstrateWeight; type AuthorityId = TestAuthId; type Verifier = pallet_drand::verifier::QuicknetVerifier; type UnsignedPriority = ConstU64<{ 1 << 20 }>; @@ -602,16 +609,23 @@ pub(crate) fn step_block(n: u16) { #[allow(dead_code)] pub(crate) fn run_to_block(n: u64) { + run_to_block_ext(n, false) +} + +#[allow(dead_code)] +pub(crate) fn run_to_block_ext(n: u64, enable_events: bool) { while System::block_number() < n { Scheduler::on_finalize(System::block_number()); SubtensorModule::on_finalize(System::block_number()); System::on_finalize(System::block_number()); System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); - System::events().iter().for_each(|event| { - log::info!("Event: {:?}", event.event); - }); - System::reset_events(); + if !enable_events { + System::events().iter().for_each(|event| { + log::info!("Event: {:?}", event.event); + }); + System::reset_events(); + } SubtensorModule::on_initialize(System::block_number()); Scheduler::on_initialize(System::block_number()); } @@ -712,6 +726,7 @@ pub fn add_network(netuid: u16, tempo: u16, _modality: u16) { SubtensorModule::set_network_registration_allowed(netuid, true); SubtensorModule::set_network_pow_registration_allowed(netuid, true); FirstEmissionBlockNumber::::insert(netuid, 1); + SubtokenEnabled::::insert(netuid, true); } #[allow(dead_code)] @@ -721,6 +736,14 @@ pub fn add_network_without_emission_block(netuid: u16, tempo: u16, _modality: u1 SubtensorModule::set_network_pow_registration_allowed(netuid, true); } +#[allow(dead_code)] +pub fn add_network_disable_subtoken(netuid: u16, tempo: u16, _modality: u16) { + SubtensorModule::init_new_network(netuid, tempo); + SubtensorModule::set_network_registration_allowed(netuid, true); + SubtensorModule::set_network_pow_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, false); +} + #[allow(dead_code)] pub fn add_dynamic_network(hotkey: &U256, coldkey: &U256) -> u16 { let netuid = SubtensorModule::get_next_netuid(); @@ -734,6 +757,7 @@ pub fn add_dynamic_network(hotkey: &U256, coldkey: &U256) -> u16 { NetworkRegistrationAllowed::::insert(netuid, true); NetworkPowRegistrationAllowed::::insert(netuid, true); FirstEmissionBlockNumber::::insert(netuid, 0); + SubtokenEnabled::::insert(netuid, true); netuid } diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index ce891e5615..161749a923 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -1,6 +1,7 @@ mod batch_tx; mod children; mod coinbase; +mod consensus; mod delegate_info; mod difficulty; mod emission; diff --git a/pallets/subtensor/src/tests/move_stake.rs b/pallets/subtensor/src/tests/move_stake.rs index 0b7584a4f0..dd85ab9075 100644 --- a/pallets/subtensor/src/tests/move_stake.rs +++ b/pallets/subtensor/src/tests/move_stake.rs @@ -566,11 +566,11 @@ fn test_do_move_wrong_origin() { }); } -// 14. test_do_move_same_hotkey -// Description: Attempt to move stake to the same hotkey, which should fail or have no effect -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test move -- test_do_move_same_hotkey --exact --nocapture +// 14. test_do_move_same_hotkey_fails +// Description: Attempt to move stake to the same hotkey, which should fail +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test move -- test_do_move_same_hotkey_fails --exact --nocapture #[test] -fn test_do_move_same_hotkey() { +fn test_do_move_same_hotkey_fails() { new_test_ext(1).execute_with(|| { let subnet_owner_coldkey = U256::from(1001); let subnet_owner_hotkey = U256::from(1002); @@ -587,20 +587,22 @@ fn test_do_move_same_hotkey() { SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); // Attempt to move stake to the same hotkey - assert_ok!(SubtensorModule::do_move_stake( - RuntimeOrigin::signed(coldkey), - hotkey, - hotkey, - netuid, - netuid, - alpha, - )); + assert_eq!( + SubtensorModule::do_move_stake( + RuntimeOrigin::signed(coldkey), + hotkey, + hotkey, + netuid, + netuid, + alpha, + ), + Err(Error::::SameNetuid.into()) + ); // Check that stake remains unchanged - assert_abs_diff_eq!( + assert_eq!( SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid), - alpha - fee, - epsilon = alpha / 1000 + alpha, ); }); } @@ -1151,7 +1153,8 @@ fn test_do_swap_nonexistent_subnet() { new_test_ext(1).execute_with(|| { let coldkey = U256::from(1); let hotkey = U256::from(2); - let nonexistent_netuid: u16 = 9999; + let nonexistent_netuid1: u16 = 9998; + let nonexistent_netuid2: u16 = 9999; let stake_amount = 1_000_000; SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); @@ -1160,8 +1163,8 @@ fn test_do_swap_nonexistent_subnet() { SubtensorModule::do_swap_stake( RuntimeOrigin::signed(coldkey), hotkey, - nonexistent_netuid, - nonexistent_netuid, + nonexistent_netuid1, + nonexistent_netuid2, stake_amount ), Error::::SubnetNotExists @@ -1257,7 +1260,8 @@ fn test_do_swap_minimum_stake_check() { new_test_ext(1).execute_with(|| { let subnet_owner_coldkey = U256::from(1001); let subnet_owner_hotkey = U256::from(1002); - let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + let netuid1 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + let netuid2 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); let coldkey = U256::from(1); let hotkey = U256::from(3); @@ -1265,14 +1269,14 @@ fn test_do_swap_minimum_stake_check() { let swap_amount = 1; SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); - SubtensorModule::stake_into_subnet(&hotkey, &coldkey, netuid, total_stake, 0); + SubtensorModule::stake_into_subnet(&hotkey, &coldkey, netuid1, total_stake, 0); assert_err!( SubtensorModule::do_swap_stake( RuntimeOrigin::signed(coldkey), hotkey, - netuid, - netuid, + netuid1, + netuid2, swap_amount ), Error::::AmountTooLow @@ -1290,30 +1294,28 @@ fn test_do_swap_same_subnet() { let coldkey = U256::from(1); let hotkey = U256::from(2); let stake_amount = DefaultMinStake::::get() * 10; - let fee = DefaultStakingFee::::get(); SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); SubtensorModule::stake_into_subnet(&hotkey, &coldkey, netuid, stake_amount, 0); let alpha_before = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); - let fee_as_alpha = SubtensorModule::swap_tao_for_alpha(netuid, fee); - assert_ok!(SubtensorModule::do_swap_stake( - RuntimeOrigin::signed(coldkey), - hotkey, - netuid, - netuid, - alpha_before - )); + assert_eq!( + SubtensorModule::do_swap_stake( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + netuid, + alpha_before + ), + Err(Error::::SameNetuid.into()) + ); let alpha_after = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); - assert_abs_diff_eq!( - alpha_after, - alpha_before - fee_as_alpha, - epsilon = alpha_after / 10000 - ); + + assert_eq!(alpha_after, alpha_before,); }); } diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index 1ae16d95c0..3a8ce39ba3 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -37,7 +37,7 @@ fn test_registration_subscribe_ok_dispatch_info_ok() { assert_eq!( call.get_dispatch_info(), DispatchInfo { - weight: frame_support::weights::Weight::from_parts(2_992_000_000, 0), + weight: frame_support::weights::Weight::from_parts(3_166_200_000, 0), class: DispatchClass::Normal, pays_fee: Pays::No } diff --git a/pallets/subtensor/src/tests/senate.rs b/pallets/subtensor/src/tests/senate.rs index 5592b303be..52b0c240c2 100644 --- a/pallets/subtensor/src/tests/senate.rs +++ b/pallets/subtensor/src/tests/senate.rs @@ -397,6 +397,7 @@ fn test_senate_leave_vote_removal() { add_network(netuid, tempo, 0); // Give it some $$$ in his coldkey balance SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, stake); + SubtokenEnabled::::insert(netuid, true); // Subscribe and check extrinsic output assert_ok!(SubtensorModule::burned_register( @@ -479,6 +480,9 @@ fn test_senate_leave_vote_removal() { SubtensorModule::set_target_registrations_per_interval(other_netuid, 1000); SubtensorModule::set_max_registrations_per_block(root_netuid, 1000); SubtensorModule::set_target_registrations_per_interval(root_netuid, 1000); + SubtokenEnabled::::insert(root_netuid, true); + SubtokenEnabled::::insert(other_netuid, true); + for i in 0..200 { let hot: U256 = U256::from(i + 100); let cold: U256 = U256::from(i + 100); @@ -715,6 +719,8 @@ fn test_adjust_senate_events() { SubtensorModule::set_target_registrations_per_interval(netuid, max_senate_size + 1); SubtensorModule::set_max_registrations_per_block(root_netuid, max_senate_size + 1); SubtensorModule::set_target_registrations_per_interval(root_netuid, max_senate_size + 1); + SubtokenEnabled::::insert(netuid, true); + SubtokenEnabled::::insert(root_netuid, true); // Subscribe and check extrinsic output assert_ok!(SubtensorModule::burned_register( diff --git a/pallets/subtensor/src/tests/serving.rs b/pallets/subtensor/src/tests/serving.rs index 1d8202a242..251dde2078 100644 --- a/pallets/subtensor/src/tests/serving.rs +++ b/pallets/subtensor/src/tests/serving.rs @@ -53,7 +53,7 @@ fn test_serving_subscribe_ok_dispatch_info_ok() { assert_eq!( call.get_dispatch_info(), DispatchInfo { - weight: frame_support::weights::Weight::from_parts(246_000_000, 0), + weight: frame_support::weights::Weight::from_parts(235_670_000, 0), class: DispatchClass::Normal, pays_fee: Pays::No } @@ -355,7 +355,7 @@ fn test_prometheus_serving_subscribe_ok_dispatch_info_ok() { assert_eq!( call.get_dispatch_info(), DispatchInfo { - weight: frame_support::weights::Weight::from_parts(245_000_000, 0), + weight: frame_support::weights::Weight::from_parts(231_170_000, 0), class: DispatchClass::Normal, pays_fee: Pays::No } diff --git a/pallets/subtensor/src/tests/staking.rs b/pallets/subtensor/src/tests/staking.rs index 69e0c7ada1..9220fa7296 100644 --- a/pallets/subtensor/src/tests/staking.rs +++ b/pallets/subtensor/src/tests/staking.rs @@ -13,7 +13,6 @@ use frame_support::dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays use frame_support::sp_runtime::DispatchError; use sp_core::{Get, H256, U256}; use substrate_fixed::types::{I96F32, I110F18, U64F64, U96F32}; - /*********************************************************** staking::add_stake() tests ************************************************************/ @@ -32,7 +31,7 @@ fn test_add_stake_dispatch_info_ok() { assert_eq!( call.get_dispatch_info(), DispatchInfo { - weight: frame_support::weights::Weight::from_parts(1_074_000_000, 0), + weight: frame_support::weights::Weight::from_parts(1_501_000_000, 0), class: DispatchClass::Normal, pays_fee: Pays::No } @@ -90,6 +89,873 @@ fn test_add_stake_ok_no_emission() { ); }); } +// #[test] +// fn test_add_stake_aggregate_ok_no_emission() { +// new_test_ext(1).execute_with(|| { +// let hotkey_account_id = U256::from(533453); +// let coldkey_account_id = U256::from(55453); +// let amount = DefaultMinStake::::get() * 10; +// let fee = DefaultStakingFee::::get(); +// +// //add network +// let netuid: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// +// // Give it some $$$ in his coldkey balance +// SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, amount); +// +// // Check we have zero staked before transfer +// assert_eq!( +// SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), +// 0 +// ); +// +// // Also total stake should be equal to the network initial lock +// assert_eq!( +// SubtensorModule::get_total_stake(), +// SubtensorModule::get_network_min_lock() +// ); +// +// // Transfer to hotkey account, and check if the result is ok +// assert_ok!(SubtensorModule::add_stake_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid, +// amount +// )); +// +// // Ensure that extrinsic call doesn't change the stake. +// assert_eq!( +// SubtensorModule::get_total_stake(), +// SubtensorModule::get_network_min_lock() +// ); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedStakeAdded(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// // Check if stake has increased +// assert_abs_diff_eq!( +// SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), +// amount - fee, +// epsilon = amount / 1000, +// ); +// +// // Check if balance has decreased +// assert_eq!(SubtensorModule::get_coldkey_balance(&coldkey_account_id), 1); +// +// // Check if total stake has increased accordingly. +// assert_eq!( +// SubtensorModule::get_total_stake(), +// amount + SubtensorModule::get_network_min_lock() +// ); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::StakeAdded(..)) +// ) +// })); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedStakeAdded(..)) +// ) +// })); +// }); +// } +// +// #[test] +// fn test_add_stake_aggregate_failed() { +// new_test_ext(1).execute_with(|| { +// let hotkey_account_id = U256::from(533453); +// let coldkey_account_id = U256::from(55453); +// let amount = DefaultMinStake::::get() * 100; +// //add network +// let netuid: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// +// // Transfer to hotkey account, and check if the result is ok +// assert_ok!(SubtensorModule::add_stake_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid, +// amount +// )); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::FailedToAddAggregatedStake(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::FailedToAddAggregatedStake(..)) +// ) +// })); +// }); +// } +// +// #[test] +// fn test_verify_aggregated_stake_order() { +// new_test_ext(1).execute_with(|| { +// let hotkey_account_id = U256::from(533453); +// let coldkey_account_id = U256::from(55453); +// let amount = 1_000_000_000_000u64; +// let limit_price = 6_000_000_000u64; +// let unstake_amount = 150_000_000_000u64; +// let limit_price2 = 1_350_000_000; +// +// // add network +// let netuid1: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// let netuid2: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// let netuid3: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// let netuid4: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// let netuid5: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// let netuid6: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// +// let tao_reserve: U96F32 = U96F32::from_num(1_500_000_000_000_u64); +// let alpha_in: U96F32 = U96F32::from_num(1_000_000_000_000_u64); +// +// for netuid in [netuid1, netuid3, netuid3, netuid4, netuid5, netuid6] { +// SubnetTAO::::insert(netuid, tao_reserve.to_num::()); +// SubnetAlphaIn::::insert(netuid, alpha_in.to_num::()); +// } +// +// // Give it some $$$ in his coldkey balance +// SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 6 * amount); +// // Give the neuron some stake to remove +// SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( +// &hotkey_account_id, +// &coldkey_account_id, +// netuid3, +// amount, +// ); +// SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( +// &hotkey_account_id, +// &coldkey_account_id, +// netuid4, +// amount, +// ); +// +// // Add stake with slippage safety and check if the result is ok +// assert_ok!(SubtensorModule::remove_stake_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid3, +// amount +// )); +// +// assert_ok!(SubtensorModule::remove_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid4, +// unstake_amount, +// limit_price2, +// true +// )); +// +// assert_ok!(SubtensorModule::add_stake_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid1, +// amount, +// )); +// +// assert_ok!(SubtensorModule::add_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid2, +// amount, +// limit_price, +// true +// )); +// +// assert_ok!(SubtensorModule::unstake_all_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// )); +// +// assert_ok!(SubtensorModule::unstake_all_alpha_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// )); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// let add_stake_position = System::events() +// .iter() +// .position(|e| { +// if let RuntimeEvent::SubtensorModule(Event::AggregatedStakeAdded(.., netuid, _)) = +// e.event +// { +// netuid == netuid1 +// } else { +// false +// } +// }) +// .expect("Stake event must be present in the event log."); +// +// let add_stake_limit_position = System::events() +// .iter() +// .position(|e| { +// if let RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeAdded( +// _, +// _, +// netuid, +// _, +// _, +// _, +// )) = e.event +// { +// netuid == netuid2 +// } else { +// false +// } +// }) +// .expect("Stake event must be present in the event log."); +// +// let remove_stake_position = System::events() +// .iter() +// .position(|e| { +// if let RuntimeEvent::SubtensorModule(Event::AggregatedStakeRemoved(.., netuid, _)) = +// e.event +// { +// netuid == netuid3 +// } else { +// false +// } +// }) +// .expect("Stake event must be present in the event log."); +// +// let remove_stake_limit_position = System::events() +// .iter() +// .position(|e| { +// if let RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeRemoved( +// .., +// netuid, +// _, +// _, +// _, +// )) = e.event +// { +// netuid == netuid4 +// } else { +// false +// } +// }) +// .expect("Stake event must be present in the event log."); +// +// let unstake_all_position = System::events() +// .iter() +// .position(|e| { +// matches!( +// e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllSucceeded(..)) +// ) +// }) +// .expect("Stake event must be present in the event log."); +// +// let unstake_all_alpha_position = System::events() +// .iter() +// .position(|e| { +// matches!( +// e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllAlphaSucceeded(..)) +// ) +// }) +// .expect("Stake event must be present in the event log."); +// +// // Check events order +// assert!(remove_stake_limit_position < remove_stake_position); +// assert!(remove_stake_position < unstake_all_position); +// assert!(unstake_all_position < unstake_all_alpha_position); +// assert!(add_stake_position > unstake_all_alpha_position); +// assert!(add_stake_limit_position < add_stake_position); +// }); +// } +// +// #[test] +// #[allow(clippy::indexing_slicing)] +// fn test_verify_aggregated_stake_order_reversed() { +// new_test_ext(1).execute_with(|| { +// let amount = 1_000_000_000_000u64; +// let limit_price = 6_000_000_000u64; +// let unstake_amount = 150_000_000_000u64; +// let limit_price2 = 1_350_000_000; +// +// // Coldkeys and hotkeys +// let coldkeys = vec![ +// U256::from(100), // add_stake +// U256::from(200), // add_stake_limit +// U256::from(300), // remove_stake +// U256::from(400), // remove_stake_limit +// U256::from(500), // unstake_all +// U256::from(600), // unstake_all_alpha +// ]; +// +// let hotkeys = (1..=6).map(U256::from).collect::>(); +// +// let netuids: Vec<_> = hotkeys +// .iter() +// .zip(coldkeys.iter()) +// .map(|(h, c)| add_dynamic_network(h, c)) +// .collect(); +// +// let tao_reserve = U96F32::from_num(1_500_000_000_000u64); +// let alpha_in = U96F32::from_num(1_000_000_000_000u64); +// +// for netuid in &netuids { +// SubnetTAO::::insert(*netuid, tao_reserve.to_num::()); +// SubnetAlphaIn::::insert(*netuid, alpha_in.to_num::()); +// } +// +// for coldkey in &coldkeys { +// SubtensorModule::add_balance_to_coldkey_account(coldkey, amount); +// } +// +// for ((hotkey, coldkey), netuid) in hotkeys.iter().zip(coldkeys.iter()).zip(netuids.iter()) { +// SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( +// hotkey, coldkey, *netuid, amount, +// ); +// } +// +// // Add stake with slippage safety and check if the result is ok +// assert_ok!(SubtensorModule::remove_stake_aggregate( +// RuntimeOrigin::signed(coldkeys[2]), +// hotkeys[2], +// netuids[2], +// amount +// )); +// +// assert_ok!(SubtensorModule::remove_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkeys[3]), +// hotkeys[3], +// netuids[3], +// unstake_amount, +// limit_price2, +// true +// )); +// +// assert_ok!(SubtensorModule::add_stake_aggregate( +// RuntimeOrigin::signed(coldkeys[0]), +// hotkeys[0], +// netuids[0], +// amount, +// )); +// +// assert_ok!(SubtensorModule::add_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkeys[1]), +// hotkeys[1], +// netuids[1], +// amount, +// limit_price, +// true +// )); +// +// assert_ok!(SubtensorModule::unstake_all_aggregate( +// RuntimeOrigin::signed(coldkeys[4]), +// hotkeys[4], +// )); +// +// assert_ok!(SubtensorModule::unstake_all_alpha_aggregate( +// RuntimeOrigin::signed(coldkeys[5]), +// hotkeys[5], +// )); +// +// // Enable on_finalize code to run +// run_to_block_ext(2, false); +// // Reorder jobs based on the previous block hash +// let mut parent_hash = >::parent_hash(); +// parent_hash.as_mut()[0] = 0b10000000; +// >::set_parent_hash(parent_hash); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// let add_stake_position = System::events() +// .iter() +// .position(|e| { +// if let RuntimeEvent::SubtensorModule(Event::AggregatedStakeAdded(.., netuid, _)) = +// e.event +// { +// netuid == netuids[0] +// } else { +// false +// } +// }) +// .expect("Stake event must be present in the event log."); +// +// let add_stake_limit_position = System::events() +// .iter() +// .position(|e| { +// if let RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeAdded( +// _, +// _, +// netuid, +// _, +// _, +// _, +// )) = e.event +// { +// netuid == netuids[1] +// } else { +// false +// } +// }) +// .expect("Stake event must be present in the event log."); +// +// let remove_stake_position = System::events() +// .iter() +// .position(|e| { +// if let RuntimeEvent::SubtensorModule(Event::AggregatedStakeRemoved(.., netuid, _)) = +// e.event +// { +// netuid == netuids[2] +// } else { +// false +// } +// }) +// .expect("Stake event must be present in the event log."); +// +// let remove_stake_limit_position = System::events() +// .iter() +// .position(|e| { +// if let RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeRemoved( +// .., +// netuid, +// _, +// _, +// _, +// )) = e.event +// { +// netuid == netuids[3] +// } else { +// false +// } +// }) +// .expect("Stake event must be present in the event log."); +// +// let unstake_all_position = System::events() +// .iter() +// .position(|e| { +// matches!( +// e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllSucceeded(..)) +// ) +// }) +// .expect("Stake event must be present in the event log."); +// +// let unstake_all_alpha_position = System::events() +// .iter() +// .position(|e| { +// matches!( +// e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllAlphaSucceeded(..)) +// ) +// }) +// .expect("Stake event must be present in the event log."); +// +// // Check events order +// assert!(add_stake_limit_position > add_stake_position); +// assert!(add_stake_position < unstake_all_alpha_position); +// assert!(unstake_all_position > unstake_all_alpha_position); +// assert!(remove_stake_position > unstake_all_position); +// assert!(remove_stake_limit_position > remove_stake_position); +// }); +// } +// +// #[test] +// #[allow(clippy::indexing_slicing)] +// fn test_verify_all_job_type_sort_by_coldkey() { +// new_test_ext(1).execute_with(|| { +// let amount = 1_000_000_000_000u64; +// let limit_price = 6_000_000_000u64; +// let unstake_amount = 150_000_000_000u64; +// let limit_price2 = 1_350_000_000; +// +// // Coldkeys and hotkeys +// let coldkeys = vec![ +// U256::from(100), // add_stake +// U256::from(200), // add_stake +// U256::from(300), // add_stake_limit +// U256::from(400), // add_stake_limit +// U256::from(500), // remove_stake +// U256::from(600), // remove_stake +// U256::from(700), // remove_stake_limit +// U256::from(800), // remove_stake_limit +// U256::from(900), // unstake_all +// U256::from(1000), // unstake_all +// U256::from(1100), // unstake_all_alpha +// U256::from(1200), // unstake_all_alpha +// ]; +// +// let hotkeys = (1..=12).map(U256::from).collect::>(); +// +// let netuids: Vec<_> = hotkeys +// .iter() +// .zip(coldkeys.iter()) +// .map(|(h, c)| add_dynamic_network(h, c)) +// .collect(); +// +// let tao_reserve = U96F32::from_num(1_500_000_000_000u64); +// let alpha_in = U96F32::from_num(1_000_000_000_000u64); +// +// for netuid in &netuids { +// SubnetTAO::::insert(*netuid, tao_reserve.to_num::()); +// SubnetAlphaIn::::insert(*netuid, alpha_in.to_num::()); +// } +// +// for coldkey in &coldkeys { +// SubtensorModule::add_balance_to_coldkey_account(coldkey, amount); +// } +// +// for ((hotkey, coldkey), netuid) in hotkeys.iter().zip(coldkeys.iter()).zip(netuids.iter()) { +// SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( +// hotkey, coldkey, *netuid, amount, +// ); +// } +// +// // === Submit all job types === +// +// assert_ok!(SubtensorModule::add_stake_aggregate( +// RuntimeOrigin::signed(coldkeys[0]), +// hotkeys[0], +// netuids[0], +// amount +// )); +// assert_ok!(SubtensorModule::add_stake_aggregate( +// RuntimeOrigin::signed(coldkeys[1]), +// hotkeys[1], +// netuids[1], +// amount +// )); +// +// assert_ok!(SubtensorModule::add_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkeys[2]), +// hotkeys[2], +// netuids[2], +// amount, +// limit_price, +// true +// )); +// assert_ok!(SubtensorModule::add_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkeys[3]), +// hotkeys[3], +// netuids[3], +// amount, +// limit_price, +// true +// )); +// +// assert_ok!(SubtensorModule::remove_stake_aggregate( +// RuntimeOrigin::signed(coldkeys[4]), +// hotkeys[4], +// netuids[4], +// amount +// )); +// assert_ok!(SubtensorModule::remove_stake_aggregate( +// RuntimeOrigin::signed(coldkeys[5]), +// hotkeys[5], +// netuids[5], +// amount +// )); +// +// assert_ok!(SubtensorModule::remove_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkeys[6]), +// hotkeys[6], +// netuids[6], +// unstake_amount, +// limit_price2, +// true +// )); +// assert_ok!(SubtensorModule::remove_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkeys[7]), +// hotkeys[7], +// netuids[7], +// unstake_amount, +// limit_price2, +// true +// )); +// +// assert_ok!(SubtensorModule::unstake_all_aggregate( +// RuntimeOrigin::signed(coldkeys[8]), +// hotkeys[8], +// )); +// assert_ok!(SubtensorModule::unstake_all_aggregate( +// RuntimeOrigin::signed(coldkeys[9]), +// hotkeys[9], +// )); +// +// assert_ok!(SubtensorModule::unstake_all_alpha_aggregate( +// RuntimeOrigin::signed(coldkeys[10]), +// hotkeys[10], +// )); +// assert_ok!(SubtensorModule::unstake_all_alpha_aggregate( +// RuntimeOrigin::signed(coldkeys[11]), +// hotkeys[11], +// )); +// +// // Finalize block +// run_to_block_ext(3, true); +// +// // === Collect coldkeys by event type === +// let mut add_coldkeys = vec![]; +// let mut add_limit_coldkeys = vec![]; +// let mut remove_coldkeys = vec![]; +// let mut remove_limit_coldkeys = vec![]; +// let mut unstake_all_coldkeys = vec![]; +// let mut unstake_all_alpha_coldkeys = vec![]; +// +// for event in System::events().iter().map(|e| &e.event) { +// match event { +// RuntimeEvent::SubtensorModule(Event::AggregatedStakeAdded(coldkey, ..)) => { +// add_coldkeys.push(*coldkey); +// } +// RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeAdded(coldkey, ..)) => { +// add_limit_coldkeys.push(*coldkey); +// } +// RuntimeEvent::SubtensorModule(Event::AggregatedStakeRemoved(coldkey, ..)) => { +// remove_coldkeys.push(*coldkey); +// } +// RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeRemoved( +// coldkey, +// .., +// )) => { +// remove_limit_coldkeys.push(*coldkey); +// } +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllSucceeded(coldkey, _)) => { +// unstake_all_coldkeys.push(*coldkey); +// } +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllAlphaSucceeded( +// coldkey, +// _, +// )) => { +// unstake_all_alpha_coldkeys.push(*coldkey); +// } +// _ => {} +// } +// } +// +// // === Assertions === +// assert_eq!(add_coldkeys, vec![coldkeys[1], coldkeys[0]]); // descending +// assert_eq!(add_limit_coldkeys, vec![coldkeys[3], coldkeys[2]]); // descending +// assert_eq!(remove_coldkeys, vec![coldkeys[4], coldkeys[5]]); // ascending +// assert_eq!(remove_limit_coldkeys, vec![coldkeys[6], coldkeys[7]]); // ascending +// assert_eq!(unstake_all_coldkeys, vec![coldkeys[8], coldkeys[9]]); // ascending +// assert_eq!(unstake_all_alpha_coldkeys, vec![coldkeys[10], coldkeys[11]]); // ascending +// }); +// } +// +// #[test] +// #[allow(clippy::indexing_slicing)] +// fn test_verify_all_job_type_sort_by_coldkey_reverse_order() { +// new_test_ext(1).execute_with(|| { +// let amount = 1_000_000_000_000u64; +// let limit_price = 6_000_000_000u64; +// let unstake_amount = 150_000_000_000u64; +// let limit_price2 = 1_350_000_000; +// +// // Coldkeys and hotkeys +// let coldkeys = vec![ +// U256::from(100), // add_stake +// U256::from(200), // add_stake +// U256::from(300), // add_stake_limit +// U256::from(400), // add_stake_limit +// U256::from(500), // remove_stake +// U256::from(600), // remove_stake +// U256::from(700), // remove_stake_limit +// U256::from(800), // remove_stake_limit +// U256::from(900), // unstake_all +// U256::from(1000), // unstake_all +// U256::from(1100), // unstake_all_alpha +// U256::from(1200), // unstake_all_alpha +// ]; +// +// let hotkeys = (1..=12).map(U256::from).collect::>(); +// +// let netuids: Vec<_> = hotkeys +// .iter() +// .zip(coldkeys.iter()) +// .map(|(h, c)| add_dynamic_network(h, c)) +// .collect(); +// +// let tao_reserve = U96F32::from_num(1_500_000_000_000u64); +// let alpha_in = U96F32::from_num(1_000_000_000_000u64); +// +// for netuid in &netuids { +// SubnetTAO::::insert(*netuid, tao_reserve.to_num::()); +// SubnetAlphaIn::::insert(*netuid, alpha_in.to_num::()); +// } +// +// for coldkey in &coldkeys { +// SubtensorModule::add_balance_to_coldkey_account(coldkey, amount); +// } +// +// for ((hotkey, coldkey), netuid) in hotkeys.iter().zip(coldkeys.iter()).zip(netuids.iter()) { +// SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( +// hotkey, coldkey, *netuid, amount, +// ); +// } +// +// // === Submit all job types === +// +// assert_ok!(SubtensorModule::add_stake_aggregate( +// RuntimeOrigin::signed(coldkeys[0]), +// hotkeys[0], +// netuids[0], +// amount +// )); +// assert_ok!(SubtensorModule::add_stake_aggregate( +// RuntimeOrigin::signed(coldkeys[1]), +// hotkeys[1], +// netuids[1], +// amount +// )); +// +// assert_ok!(SubtensorModule::add_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkeys[2]), +// hotkeys[2], +// netuids[2], +// amount, +// limit_price, +// true +// )); +// assert_ok!(SubtensorModule::add_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkeys[3]), +// hotkeys[3], +// netuids[3], +// amount, +// limit_price, +// true +// )); +// +// assert_ok!(SubtensorModule::remove_stake_aggregate( +// RuntimeOrigin::signed(coldkeys[4]), +// hotkeys[4], +// netuids[4], +// amount +// )); +// assert_ok!(SubtensorModule::remove_stake_aggregate( +// RuntimeOrigin::signed(coldkeys[5]), +// hotkeys[5], +// netuids[5], +// amount +// )); +// +// assert_ok!(SubtensorModule::remove_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkeys[6]), +// hotkeys[6], +// netuids[6], +// unstake_amount, +// limit_price2, +// true +// )); +// assert_ok!(SubtensorModule::remove_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkeys[7]), +// hotkeys[7], +// netuids[7], +// unstake_amount, +// limit_price2, +// true +// )); +// +// assert_ok!(SubtensorModule::unstake_all_aggregate( +// RuntimeOrigin::signed(coldkeys[8]), +// hotkeys[8], +// )); +// assert_ok!(SubtensorModule::unstake_all_aggregate( +// RuntimeOrigin::signed(coldkeys[9]), +// hotkeys[9], +// )); +// +// assert_ok!(SubtensorModule::unstake_all_alpha_aggregate( +// RuntimeOrigin::signed(coldkeys[10]), +// hotkeys[10], +// )); +// assert_ok!(SubtensorModule::unstake_all_alpha_aggregate( +// RuntimeOrigin::signed(coldkeys[11]), +// hotkeys[11], +// )); +// +// // Reorder jobs based on the previous block hash +// let mut parent_hash = >::parent_hash(); +// parent_hash.as_mut()[0] = 0b10000000; +// >::set_parent_hash(parent_hash); +// +// // Finalize block +// run_to_block_ext(3, true); +// +// // === Collect coldkeys by event type === +// let mut add_coldkeys = vec![]; +// let mut add_limit_coldkeys = vec![]; +// let mut remove_coldkeys = vec![]; +// let mut remove_limit_coldkeys = vec![]; +// let mut unstake_all_coldkeys = vec![]; +// let mut unstake_all_alpha_coldkeys = vec![]; +// +// for event in System::events().iter().map(|e| &e.event) { +// match event { +// RuntimeEvent::SubtensorModule(Event::AggregatedStakeAdded(coldkey, ..)) => { +// add_coldkeys.push(*coldkey); +// } +// RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeAdded(coldkey, ..)) => { +// add_limit_coldkeys.push(*coldkey); +// } +// RuntimeEvent::SubtensorModule(Event::AggregatedStakeRemoved(coldkey, ..)) => { +// remove_coldkeys.push(*coldkey); +// } +// RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeRemoved( +// coldkey, +// .., +// )) => { +// remove_limit_coldkeys.push(*coldkey); +// } +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllSucceeded(coldkey, _)) => { +// unstake_all_coldkeys.push(*coldkey); +// } +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllAlphaSucceeded( +// coldkey, +// _, +// )) => { +// unstake_all_alpha_coldkeys.push(*coldkey); +// } +// _ => {} +// } +// } +// +// // === Assertions === +// assert_eq!(add_coldkeys, vec![coldkeys[0], coldkeys[1]]); // ascending (reversed) +// assert_eq!(add_limit_coldkeys, vec![coldkeys[2], coldkeys[3]]); // ascending (reversed) +// assert_eq!(remove_coldkeys, vec![coldkeys[5], coldkeys[4]]); // descending (reversed) +// assert_eq!(remove_limit_coldkeys, vec![coldkeys[7], coldkeys[6]]); // descending (reversed) +// assert_eq!(unstake_all_coldkeys, vec![coldkeys[9], coldkeys[8]]); // descending (reversed) +// assert_eq!(unstake_all_alpha_coldkeys, vec![coldkeys[11], coldkeys[10]]); // descending (reversed) +// }); +// } #[test] fn test_dividends_with_run_to_block() { @@ -344,8 +1210,8 @@ fn test_remove_stake_dispatch_info_ok() { assert_eq!( call.get_dispatch_info(), DispatchInfo { - weight: frame_support::weights::Weight::from_parts(1_061_000_000, 0) - .add_proof_size(43991), + weight: frame_support::weights::Weight::from_parts(1_671_800_000, 0) + .add_proof_size(0), class: DispatchClass::Normal, pays_fee: Pays::No } @@ -421,6 +1287,145 @@ fn test_remove_stake_ok_no_emission() { ); }); } +// +// #[test] +// fn test_remove_stake_aggregate_ok_no_emission() { +// new_test_ext(1).execute_with(|| { +// let subnet_owner_coldkey = U256::from(1); +// let subnet_owner_hotkey = U256::from(2); +// let coldkey_account_id = U256::from(4343); +// let hotkey_account_id = U256::from(4968585); +// let amount = DefaultMinStake::::get() * 10; +// let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); +// register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 192213123); +// +// // Some basic assertions +// assert_eq!( +// SubtensorModule::get_total_stake(), +// SubtensorModule::get_network_min_lock() +// ); +// assert_eq!( +// SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), +// 0 +// ); +// assert_eq!(SubtensorModule::get_coldkey_balance(&coldkey_account_id), 0); +// +// // Give the neuron some stake to remove +// SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( +// &hotkey_account_id, +// &coldkey_account_id, +// netuid, +// amount, +// ); +// assert_eq!( +// SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), +// amount +// ); +// +// // Add subnet TAO for the equivalent amount added at price +// let amount_tao = +// U96F32::saturating_from_num(amount) * SubtensorModule::get_alpha_price(netuid); +// SubnetTAO::::mutate(netuid, |v| *v += amount_tao.saturating_to_num::()); +// TotalStake::::mutate(|v| *v += amount_tao.saturating_to_num::()); +// +// // Do the magic +// assert_ok!(SubtensorModule::remove_stake_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid, +// amount +// )); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedStakeRemoved(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// let fee = SubtensorModule::calculate_staking_fee( +// Some((&hotkey_account_id, netuid)), +// &coldkey_account_id, +// None, +// &coldkey_account_id, +// U96F32::saturating_from_num(amount), +// ); +// +// // we do not expect the exact amount due to slippage +// assert!(SubtensorModule::get_coldkey_balance(&coldkey_account_id) > amount / 10 * 9 - fee); +// assert_eq!( +// SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), +// 0 +// ); +// assert_eq!( +// SubtensorModule::get_total_stake(), +// SubtensorModule::get_network_min_lock() + fee +// ); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::StakeRemoved(..)) +// ) +// })); +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedStakeRemoved(..)) +// ) +// })); +// }); +// } +// #[test] +// fn test_remove_stake_aggregate_fail() { +// new_test_ext(1).execute_with(|| { +// let subnet_owner_coldkey = U256::from(1); +// let subnet_owner_hotkey = U256::from(2); +// let coldkey_account_id = U256::from(4343); +// let hotkey_account_id = U256::from(4968585); +// let amount = DefaultMinStake::::get() * 10; +// let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); +// register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 192213123); +// +// assert_ok!(SubtensorModule::remove_stake_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid, +// amount +// )); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::FailedToRemoveAggregatedStake(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::FailedToRemoveAggregatedStake(..)) +// ) +// })); +// }); +// } #[test] fn test_remove_stake_amount_too_low() { @@ -2891,27 +3896,33 @@ fn test_unstake_all_validate() { fn test_max_amount_add_root() { new_test_ext(0).execute_with(|| { // 0 price on root => max is 0 - assert_eq!(SubtensorModule::get_max_amount_add(0, 0), 0); + assert_eq!( + SubtensorModule::get_max_amount_add(0, 0), + Err(Error::::ZeroMaxStakeAmount) + ); // 0.999999... price on root => max is 0 - assert_eq!(SubtensorModule::get_max_amount_add(0, 999_999_999), 0); + assert_eq!( + SubtensorModule::get_max_amount_add(0, 999_999_999), + Err(Error::::ZeroMaxStakeAmount) + ); // 1.0 price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(0, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(0, 1_000_000_001), - u64::MAX + Ok(u64::MAX) ); // 2.0 price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(0, 2_000_000_000), - u64::MAX + Ok(u64::MAX) ); }); } @@ -2923,27 +3934,33 @@ fn test_max_amount_add_stable() { add_network(netuid, 1, 0); // 0 price => max is 0 - assert_eq!(SubtensorModule::get_max_amount_add(netuid, 0), 0); + assert_eq!( + SubtensorModule::get_max_amount_add(netuid, 0), + Err(Error::::ZeroMaxStakeAmount) + ); // 0.999999... price => max is 0 - assert_eq!(SubtensorModule::get_max_amount_add(netuid, 999_999_999), 0); + assert_eq!( + SubtensorModule::get_max_amount_add(netuid, 999_999_999), + Err(Error::::ZeroMaxStakeAmount) + ); // 1.0 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(netuid, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(netuid, 1_000_000_001), - u64::MAX + Ok(u64::MAX) ); // 2.0 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(netuid, 2_000_000_000), - u64::MAX + Ok(u64::MAX) ); }); } @@ -2968,101 +3985,148 @@ fn test_max_amount_add_dynamic() { // tao_in, alpha_in, limit_price, expected_max_swappable [ // Zero handling (no panics) - (0, 1_000_000_000, 100, 0), - (1_000_000_000, 0, 100, 0), - (1_000_000_000, 1_000_000_000, 0, 0), + ( + 0, + 1_000_000_000, + 100, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000_000, + 0, + 100, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000_000, + 1_000_000_000, + 0, + Err(Error::::ZeroMaxStakeAmount), + ), // Low bounds - (1, 1, 0, 0), - (1, 1, 1, 0), - (1, 1, 2, 0), - (1, 1, 50_000_000_000, 49), + (1, 1, 0, Err(Error::::ZeroMaxStakeAmount)), + (1, 1, 1, Err(Error::::ZeroMaxStakeAmount)), + (1, 1, 2, Err(Error::::ZeroMaxStakeAmount)), + (1, 1, 50_000_000_000, Ok(49)), // Basic math - (1_000, 1_000, 2_000_000_000, 1_000), - (1_000, 1_000, 4_000_000_000, 3_000), - (1_000, 1_000, 16_000_000_000, 15_000), + (1_000, 1_000, 2_000_000_000, Ok(1_000)), + (1_000, 1_000, 4_000_000_000, Ok(3_000)), + (1_000, 1_000, 16_000_000_000, Ok(15_000)), ( 1_000_000_000_000, 1_000_000_000_000, 16_000_000_000, - 15_000_000_000_000, + Ok(15_000_000_000_000), ), // Normal range values with edge cases - (150_000_000_000, 100_000_000_000, 0, 0), - (150_000_000_000, 100_000_000_000, 100_000_000, 0), - (150_000_000_000, 100_000_000_000, 500_000_000, 0), - (150_000_000_000, 100_000_000_000, 1_499_999_999, 0), - (150_000_000_000, 100_000_000_000, 1_500_000_000, 0), - (150_000_000_000, 100_000_000_000, 1_500_000_001, 100), ( 150_000_000_000, 100_000_000_000, - 3_000_000_000, + 0, + Err(Error::::ZeroMaxStakeAmount), + ), + ( 150_000_000_000, + 100_000_000_000, + 100_000_000, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 150_000_000_000, + 100_000_000_000, + 500_000_000, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 150_000_000_000, + 100_000_000_000, + 1_499_999_999, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 150_000_000_000, + 100_000_000_000, + 1_500_000_000, + Err(Error::::ZeroMaxStakeAmount), + ), + (150_000_000_000, 100_000_000_000, 1_500_000_001, Ok(100)), + ( + 150_000_000_000, + 100_000_000_000, + 3_000_000_000, + Ok(150_000_000_000), ), // Miscellaneous overflows and underflows - (150_000_000_000, 100_000_000_000, u64::MAX, u64::MAX), - (150_000_000_000, 100_000_000_000, u64::MAX / 2, u64::MAX), - (1_000_000, 1_000_000_000_000_000_000_u64, 1, 999_000_000), - (1_000_000, 1_000_000_000_000_000_000_u64, 2, 1_999_000_000), + (150_000_000_000, 100_000_000_000, u64::MAX, Ok(u64::MAX)), + (150_000_000_000, 100_000_000_000, u64::MAX / 2, Ok(u64::MAX)), + (1_000_000, 1_000_000_000_000_000_000_u64, 1, Ok(999_000_000)), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + 2, + Ok(1_999_000_000), + ), ( 1_000_000, 1_000_000_000_000_000_000_u64, 10_000, - 9_999_999_000_000, + Ok(9_999_999_000_000), ), ( 1_000_000, 1_000_000_000_000_000_000_u64, 100_000, - 99_999_999_000_000, + Ok(99_999_999_000_000), ), ( 1_000_000, 1_000_000_000_000_000_000_u64, 1_000_000, - 999_999_999_000_000, + Ok(999_999_999_000_000), ), ( 1_000_000, 1_000_000_000_000_000_000_u64, 1_000_000_000, - 999_999_999_999_000_000, + Ok(999_999_999_999_000_000), ), ( 21_000_000_000_000_000, 10_000_000, 4_200_000_000_000_000_000, - 21_000_000_000_000_000, + Ok(21_000_000_000_000_000), ), ( 21_000_000_000_000_000, 1_000_000_000_000_000_000_u64, u64::MAX, - u64::MAX, + Ok(u64::MAX), ), ( 21_000_000_000_000_000, 1_000_000_000_000_000_000_u64, 42_000_000, - 21_000_000_000_000_000, + Ok(21_000_000_000_000_000), ), ] .iter() - .for_each(|&(tao_in, alpha_in, limit_price, expected_max_swappable)| { - // Forse-set alpha in and tao reserve to achieve relative price of subnets - SubnetTAO::::insert(netuid, tao_in); - SubnetAlphaIn::::insert(netuid, alpha_in); - - if alpha_in != 0 { - let expected_price = I96F32::from_num(tao_in) / I96F32::from_num(alpha_in); - assert_eq!(SubtensorModule::get_alpha_price(netuid), expected_price); - } + .for_each( + |&(tao_in, alpha_in, limit_price, ref expected_max_swappable)| { + // Forse-set alpha in and tao reserve to achieve relative price of subnets + SubnetTAO::::insert(netuid, tao_in); + SubnetAlphaIn::::insert(netuid, alpha_in); - assert_eq!( - SubtensorModule::get_max_amount_add(netuid, limit_price), - expected_max_swappable, - ); - }); + if alpha_in != 0 { + let expected_price = I96F32::from_num(tao_in) / I96F32::from_num(alpha_in); + assert_eq!(SubtensorModule::get_alpha_price(netuid), expected_price); + } + + assert_eq!( + SubtensorModule::get_max_amount_add(netuid, limit_price), + *expected_max_swappable, + ); + }, + ); }); } @@ -3070,31 +4134,37 @@ fn test_max_amount_add_dynamic() { fn test_max_amount_remove_root() { new_test_ext(0).execute_with(|| { // 0 price on root => max is u64::MAX - assert_eq!(SubtensorModule::get_max_amount_remove(0, 0), u64::MAX); + assert_eq!(SubtensorModule::get_max_amount_remove(0, 0), Ok(u64::MAX)); // 0.5 price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_remove(0, 500_000_000), - u64::MAX + Ok(u64::MAX) ); // 0.999999... price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_remove(0, 999_999_999), - u64::MAX + Ok(u64::MAX) ); // 1.0 price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_remove(0, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price on root => max is 0 - assert_eq!(SubtensorModule::get_max_amount_remove(0, 1_000_000_001), 0); + assert_eq!( + SubtensorModule::get_max_amount_remove(0, 1_000_000_001), + Err(Error::::ZeroMaxStakeAmount) + ); // 2.0 price on root => max is 0 - assert_eq!(SubtensorModule::get_max_amount_remove(0, 2_000_000_000), 0); + assert_eq!( + SubtensorModule::get_max_amount_remove(0, 2_000_000_000), + Err(Error::::ZeroMaxStakeAmount) + ); }); } @@ -3105,30 +4175,33 @@ fn test_max_amount_remove_stable() { add_network(netuid, 1, 0); // 0 price => max is u64::MAX - assert_eq!(SubtensorModule::get_max_amount_remove(netuid, 0), u64::MAX); + assert_eq!( + SubtensorModule::get_max_amount_remove(netuid, 0), + Ok(u64::MAX) + ); // 0.999999... price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_remove(netuid, 999_999_999), - u64::MAX + Ok(u64::MAX) ); // 1.0 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_remove(netuid, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_remove(netuid, 1_000_000_001), - 0 + Err(Error::::ZeroMaxStakeAmount) ); // 2.0 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_remove(netuid, 2_000_000_000), - 0 + Err(Error::::ZeroMaxStakeAmount) ); }); } @@ -3153,85 +4226,142 @@ fn test_max_amount_remove_dynamic() { // tao_in, alpha_in, limit_price, expected_max_swappable [ // Zero handling (no panics) - (0, 1_000_000_000, 100, 0), - (1_000_000_000, 0, 100, 0), - (1_000_000_000, 1_000_000_000, 0, u64::MAX), + ( + 0, + 1_000_000_000, + 100, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000_000, + 0, + 100, + Err(Error::::ZeroMaxStakeAmount), + ), + (1_000_000_000, 1_000_000_000, 0, Ok(u64::MAX)), // Low bounds - (1, 1, 0, u64::MAX), - (1, 1, 1, 999_999_999), - (1, 1, 2, 499_999_999), - (1, 1, 250_000_000, 3), + (1, 1, 0, Ok(u64::MAX)), + (1, 1, 1, Ok(999_999_999)), + (1, 1, 2, Ok(499_999_999)), + (1, 1, 250_000_000, Ok(3)), // Basic math - (1_000, 1_000, 250_000_000, 3_000), - (1_000, 1_000, 62_500_000, 15_000), + (1_000, 1_000, 250_000_000, Ok(3_000)), + (1_000, 1_000, 62_500_000, Ok(15_000)), ( 1_000_000_000_000, 1_000_000_000_000, 62_500_000, - 15_000_000_000_000, + Ok(15_000_000_000_000), ), // Normal range values with edge cases - (200_000_000_000, 100_000_000_000, 0, u64::MAX), + (200_000_000_000, 100_000_000_000, 0, Ok(u64::MAX)), ( 200_000_000_000, 100_000_000_000, 1_000_000_000, - 100_000_000_000, + Ok(100_000_000_000), ), ( 200_000_000_000, 100_000_000_000, 500_000_000, - 300_000_000_000, + Ok(300_000_000_000), + ), + ( + 200_000_000_000, + 100_000_000_000, + 2_000_000_000, + Err(Error::::ZeroMaxStakeAmount), ), - (200_000_000_000, 100_000_000_000, 2_000_000_000, 0), - (200_000_000_000, 100_000_000_000, 2_000_000_001, 0), - (200_000_000_000, 100_000_000_000, 1_999_999_999, 50), - (200_000_000_000, 100_000_000_000, 1_999_999_990, 500), + ( + 200_000_000_000, + 100_000_000_000, + 2_000_000_001, + Err(Error::::ZeroMaxStakeAmount), + ), + (200_000_000_000, 100_000_000_000, 1_999_999_999, Ok(50)), + (200_000_000_000, 100_000_000_000, 1_999_999_990, Ok(500)), // Miscellaneous overflows and underflows - (2_000_000_000_000, 100_000_000_000, u64::MAX, 0), - (200_000_000_000, 100_000_000_000, u64::MAX / 2, 0), - (1_000_000, 1_000_000_000_000_000_000_u64, 1, 0), - (1_000_000, 1_000_000_000_000_000_000_u64, 10, 0), - (1_000_000, 1_000_000_000_000_000_000_u64, 100, 0), - (1_000_000, 1_000_000_000_000_000_000_u64, 1_000, 0), - (1_000_000, 1_000_000_000_000_000_000_u64, u64::MAX, 0), + ( + 2_000_000_000_000, + 100_000_000_000, + u64::MAX, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 200_000_000_000, + 100_000_000_000, + u64::MAX / 2, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + 1, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + 10, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + 100, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + 1_000, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + u64::MAX, + Err(Error::::ZeroMaxStakeAmount), + ), ( 21_000_000_000_000_000, 1_000_000, 21_000_000_000_000_000, - 999_000_000, + Ok(999_000_000), ), - (21_000_000_000_000_000, 1_000_000, u64::MAX, 138_412), + (21_000_000_000_000_000, 1_000_000, u64::MAX, Ok(138_412)), ( 21_000_000_000_000_000, 1_000_000_000_000_000_000_u64, u64::MAX, - 0, + Err(Error::::ZeroMaxStakeAmount), ), ( 21_000_000_000_000_000, 1_000_000_000_000_000_000_u64, 20_000_000, - 50_000_000_000_000_000, + Ok(50_000_000_000_000_000), ), ] .iter() - .for_each(|&(tao_in, alpha_in, limit_price, expected_max_swappable)| { - // Forse-set alpha in and tao reserve to achieve relative price of subnets - SubnetTAO::::insert(netuid, tao_in); - SubnetAlphaIn::::insert(netuid, alpha_in); - - if alpha_in != 0 { - let expected_price = I96F32::from_num(tao_in) / I96F32::from_num(alpha_in); - assert_eq!(SubtensorModule::get_alpha_price(netuid), expected_price); - } + .for_each( + |&(tao_in, alpha_in, limit_price, ref expected_max_swappable)| { + // Forse-set alpha in and tao reserve to achieve relative price of subnets + SubnetTAO::::insert(netuid, tao_in); + SubnetAlphaIn::::insert(netuid, alpha_in); - assert_eq!( - SubtensorModule::get_max_amount_remove(netuid, limit_price), - expected_max_swappable, - ); - }); + if alpha_in != 0 { + let expected_price = I96F32::from_num(tao_in) / I96F32::from_num(alpha_in); + assert_eq!(SubtensorModule::get_alpha_price(netuid), expected_price); + } + + assert_eq!( + SubtensorModule::get_max_amount_remove(netuid, limit_price), + *expected_max_swappable, + ); + }, + ); }); } @@ -3240,31 +4370,37 @@ fn test_max_amount_remove_dynamic() { fn test_max_amount_move_root_root() { new_test_ext(0).execute_with(|| { // 0 price on (root, root) exchange => max is u64::MAX - assert_eq!(SubtensorModule::get_max_amount_move(0, 0, 0), u64::MAX); + assert_eq!(SubtensorModule::get_max_amount_move(0, 0, 0), Ok(u64::MAX)); // 0.5 price on (root, root) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, 0, 500_000_000), - u64::MAX + Ok(u64::MAX) ); // 0.999999... price on (root, root) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, 0, 999_999_999), - u64::MAX + Ok(u64::MAX) ); // 1.0 price on (root, root) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, 0, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price on (root, root) => max is 0 - assert_eq!(SubtensorModule::get_max_amount_move(0, 0, 1_000_000_001), 0); + assert_eq!( + SubtensorModule::get_max_amount_move(0, 0, 1_000_000_001), + Err(Error::::ZeroMaxStakeAmount) + ); // 2.0 price on (root, root) => max is 0 - assert_eq!(SubtensorModule::get_max_amount_move(0, 0, 2_000_000_000), 0); + assert_eq!( + SubtensorModule::get_max_amount_move(0, 0, 2_000_000_000), + Err(Error::::ZeroMaxStakeAmount) + ); }); } @@ -3276,36 +4412,39 @@ fn test_max_amount_move_root_stable() { add_network(netuid, 1, 0); // 0 price on (root, stable) exchange => max is u64::MAX - assert_eq!(SubtensorModule::get_max_amount_move(0, netuid, 0), u64::MAX); + assert_eq!( + SubtensorModule::get_max_amount_move(0, netuid, 0), + Ok(u64::MAX) + ); // 0.5 price on (root, stable) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, netuid, 500_000_000), - u64::MAX + Ok(u64::MAX) ); // 0.999999... price on (root, stable) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, netuid, 999_999_999), - u64::MAX + Ok(u64::MAX) ); // 1.0 price on (root, stable) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, netuid, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price on (root, stable) => max is 0 assert_eq!( SubtensorModule::get_max_amount_move(0, netuid, 1_000_000_001), - 0 + Err(Error::::ZeroMaxStakeAmount) ); // 2.0 price on (root, stable) => max is 0 assert_eq!( SubtensorModule::get_max_amount_move(0, netuid, 2_000_000_000), - 0 + Err(Error::::ZeroMaxStakeAmount) ); }); } @@ -3337,24 +4476,25 @@ fn test_max_amount_move_stable_dynamic() { // 0 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 0), - u64::MAX + Ok(u64::MAX) ); // 2.0 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 2_000_000_000), - 0 + Err(Error::::ZeroMaxStakeAmount) ); // 3.0 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 3_000_000_000), - 0 + Err(Error::::ZeroMaxStakeAmount) ); // 2x price => max is 1x TAO assert_abs_diff_eq!( - SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 1_000_000_000), + SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 1_000_000_000) + .unwrap(), 50_000_000_000, epsilon = 10_000, ); @@ -3362,21 +4502,23 @@ fn test_max_amount_move_stable_dynamic() { // Precision test: // 1.99999..9000 price => max > 0 assert!( - SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 1_999_999_000) > 0 + SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 1_999_999_000) + .unwrap() + > 0 ); // Max price doesn't panic and returns something meaningful assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, u64::MAX), - 0 + Err(Error::::ZeroMaxStakeAmount) ); assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, u64::MAX - 1), - 0 + Err(Error::::ZeroMaxStakeAmount) ); assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, u64::MAX / 2), - 0 + Err(Error::::ZeroMaxStakeAmount) ); }); } @@ -3408,30 +4550,38 @@ fn test_max_amount_move_dynamic_stable() { // 0 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 0), - u64::MAX + Ok(u64::MAX) ); // Low price values don't blow things up - assert!(SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1) > 0); - assert!(SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 2) > 0); - assert!(SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 3) > 0); + assert!( + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1).unwrap() > 0 + ); + assert!( + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 2).unwrap() > 0 + ); + assert!( + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 3).unwrap() > 0 + ); // 1.5000...1 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1_500_000_001), - 0 + Err(Error::::ZeroMaxStakeAmount) ); // 1.5 price => max is 0 because of non-zero slippage assert_abs_diff_eq!( - SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1_500_000_000), + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1_500_000_000) + .unwrap_or(0), 0, epsilon = 10_000 ); // 1/2 price => max is 1x Alpha assert_abs_diff_eq!( - SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 750_000_000), + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 750_000_000) + .unwrap(), 100_000_000_000, epsilon = 10_000, ); @@ -3439,20 +4589,25 @@ fn test_max_amount_move_dynamic_stable() { // Precision test: // 1.499999.. price => max > 0 assert!( - SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1_499_999_999) > 0 + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1_499_999_999) + .unwrap() + > 0 ); // Max price doesn't panic and returns something meaningful assert!( SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, u64::MAX) + .unwrap_or(0) < 21_000_000_000_000_000 ); assert!( SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, u64::MAX - 1) + .unwrap_or(0) < 21_000_000_000_000_000 ); assert!( SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, u64::MAX / 2) + .unwrap_or(0) < 21_000_000_000_000_000 ); }); @@ -3676,7 +4831,8 @@ fn test_max_amount_move_dynamic_dynamic() { origin_netuid, destination_netuid, limit_price - ), + ) + .unwrap_or(0u64), expected_max_swappable, epsilon = precision ); @@ -3751,6 +4907,145 @@ fn test_add_stake_limit_ok() { ); }); } +// +// #[test] +// fn test_add_stake_limit_aggregate_ok() { +// new_test_ext(1).execute_with(|| { +// let hotkey_account_id = U256::from(533453); +// let coldkey_account_id = U256::from(55453); +// let amount = 900_000_000_000; // over the maximum +// let fee = DefaultStakingFee::::get(); +// +// // add network +// let netuid: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// +// // Forse-set alpha in and tao reserve to make price equal 1.5 +// let tao_reserve: U96F32 = U96F32::from_num(150_000_000_000_u64); +// let alpha_in: U96F32 = U96F32::from_num(100_000_000_000_u64); +// SubnetTAO::::insert(netuid, tao_reserve.to_num::()); +// SubnetAlphaIn::::insert(netuid, alpha_in.to_num::()); +// let current_price: U96F32 = U96F32::from_num(SubtensorModule::get_alpha_price(netuid)); +// assert_eq!(current_price, U96F32::from_num(1.5)); +// +// // Give it some $$$ in his coldkey balance +// SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, amount); +// +// // Setup limit price so that it doesn't peak above 4x of current price +// // The amount that can be executed at this price is 450 TAO only +// // Alpha produced will be equal to 75 = 450*100/(450+150) +// let limit_price = 6_000_000_000; +// let expected_executed_stake = 75_000_000_000; +// +// // Add stake with slippage safety and check if the result is ok +// assert_ok!(SubtensorModule::add_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid, +// amount, +// limit_price, +// true +// )); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeAdded(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// // Check if stake has increased only by 75 Alpha +// assert_abs_diff_eq!( +// SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( +// &hotkey_account_id, +// &coldkey_account_id, +// netuid +// ), +// expected_executed_stake - fee, +// epsilon = expected_executed_stake / 1000, +// ); +// +// // Check that 450 TAO balance still remains free on coldkey +// assert_abs_diff_eq!( +// SubtensorModule::get_coldkey_balance(&coldkey_account_id), +// 450_000_000_000, +// epsilon = 10_000 +// ); +// +// // Check that price has updated to ~24 = (150+450) / (100 - 75) +// let exp_price = U96F32::from_num(24.0); +// let current_price: U96F32 = U96F32::from_num(SubtensorModule::get_alpha_price(netuid)); +// assert_abs_diff_eq!( +// exp_price.to_num::(), +// current_price.to_num::(), +// epsilon = 0.0001, +// ); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::StakeAdded(..)) +// ) +// })); +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeAdded(..)) +// ) +// })); +// }); +// } +// +// #[test] +// fn test_add_stake_limit_aggregate_fail() { +// new_test_ext(1).execute_with(|| { +// let hotkey_account_id = U256::from(533453); +// let coldkey_account_id = U256::from(55453); +// let amount = 900_000_000_000; +// let limit_price = 6_000_000_000; +// // add network +// let netuid: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// +// assert_ok!(SubtensorModule::add_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid, +// amount, +// limit_price, +// true +// )); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::FailedToAddAggregatedLimitedStake(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::FailedToAddAggregatedLimitedStake(..)) +// ) +// })); +// }); +// } #[test] fn test_add_stake_limit_fill_or_kill() { @@ -3804,6 +5099,39 @@ fn test_add_stake_limit_fill_or_kill() { }); } +#[test] +fn test_add_stake_limit_partial_zero_max_stake_amount_error() { + new_test_ext(1).execute_with(|| { + let hotkey_account_id = U256::from(533453); + let coldkey_account_id = U256::from(55453); + + // Exact values from the error: + // https://taostats.io/extrinsic/5338471-0009?network=finney + let amount = 19980000000; + let limit_price = 26953618; + let tao_reserve: U96F32 = U96F32::from_num(5_032_494_439_940_u64); + let alpha_in: U96F32 = U96F32::from_num(186_268_425_402_874_u64); + + let netuid: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); + SubnetTAO::::insert(netuid, tao_reserve.to_num::()); + SubnetAlphaIn::::insert(netuid, alpha_in.to_num::()); + + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, amount); + + assert_noop!( + SubtensorModule::add_stake_limit( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid, + amount, + limit_price, + true + ), + Error::::ZeroMaxStakeAmount + ); + }); +} + #[test] fn test_remove_stake_limit_ok() { new_test_ext(1).execute_with(|| { @@ -3865,6 +5193,149 @@ fn test_remove_stake_limit_ok() { ); }); } +// +// #[test] +// fn test_remove_stake_limit_aggregate_ok() { +// new_test_ext(1).execute_with(|| { +// let hotkey_account_id = U256::from(533453); +// let coldkey_account_id = U256::from(55453); +// let stake_amount = 300_000_000_000; +// let unstake_amount = 150_000_000_000; +// let fee = DefaultStakingFee::::get(); +// +// // add network +// let netuid: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// +// // Give the neuron some stake to remove +// SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( +// &hotkey_account_id, +// &coldkey_account_id, +// netuid, +// stake_amount, +// ); +// let alpha_before = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( +// &hotkey_account_id, +// &coldkey_account_id, +// netuid, +// ); +// +// // Forse-set alpha in and tao reserve to make price equal 1.5 +// let tao_reserve: U96F32 = U96F32::from_num(150_000_000_000_u64); +// let alpha_in: U96F32 = U96F32::from_num(100_000_000_000_u64); +// SubnetTAO::::insert(netuid, tao_reserve.to_num::()); +// SubnetAlphaIn::::insert(netuid, alpha_in.to_num::()); +// let current_price: U96F32 = U96F32::from_num(SubtensorModule::get_alpha_price(netuid)); +// assert_eq!(current_price, U96F32::from_num(1.5)); +// +// // Setup limit price so resulting average price doesn't drop by more than 10% from current price +// let limit_price = 1_350_000_000; +// +// // Alpha unstaked = 150 / 1.35 - 100 ~ 11.1 +// let expected_alpha_reduction = 11_111_111_111; +// +// // Remove stake with slippage safety +// assert_ok!(SubtensorModule::remove_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid, +// unstake_amount, +// limit_price, +// true +// )); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeRemoved(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// // Check if stake has decreased only by +// assert_abs_diff_eq!( +// SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( +// &hotkey_account_id, +// &coldkey_account_id, +// netuid +// ), +// alpha_before - expected_alpha_reduction - fee, +// epsilon = expected_alpha_reduction / 1_000, +// ); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::StakeRemoved(..)) +// ) +// })); +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedLimitedStakeRemoved(..)) +// ) +// })); +// }); +// } +// +// #[test] +// fn test_remove_stake_limit_aggregate_fail() { +// new_test_ext(1).execute_with(|| { +// let hotkey_account_id = U256::from(533453); +// let coldkey_account_id = U256::from(55453); +// let stake_amount = 300_000_000; +// let unstake_amount = 150_000_000_000; +// let limit_price = 1_350_000_000; +// // add network +// let netuid: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); +// +// // Give the neuron some stake to remove +// SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( +// &hotkey_account_id, +// &coldkey_account_id, +// netuid, +// stake_amount, +// ); +// +// assert_ok!(SubtensorModule::remove_stake_limit_aggregate( +// RuntimeOrigin::signed(coldkey_account_id), +// hotkey_account_id, +// netuid, +// unstake_amount, +// limit_price, +// true +// )); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::FailedToRemoveAggregatedLimitedStake(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::FailedToRemoveAggregatedLimitedStake(..)) +// ) +// })); +// }); +// } #[test] fn test_remove_stake_limit_fill_or_kill() { @@ -4435,6 +5906,110 @@ fn test_unstake_all_alpha_works() { assert!(new_root > 100_000); }); } +// #[test] +// fn test_unstake_all_alpha_aggregate_works() { +// new_test_ext(1).execute_with(|| { +// let subnet_owner_coldkey = U256::from(1001); +// let subnet_owner_hotkey = U256::from(1002); +// let coldkey = U256::from(1); +// let hotkey = U256::from(2); +// +// let stake_amount = 190_000_000_000; // 190 Alpha +// +// let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); +// register_ok_neuron(netuid, hotkey, coldkey, 192213123); +// // Give the neuron some stake to remove +// SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( +// &hotkey, +// &coldkey, +// netuid, +// stake_amount, +// ); +// +// // Setup the Alpha pool so that removing all the Alpha will keep liq above min +// let remaining_tao: I96F32 = +// DefaultMinimumPoolLiquidity::::get().saturating_add(I96F32::from(10_000_000)); +// let alpha_reserves: I110F18 = I110F18::from(stake_amount + 10_000_000); +// let alpha = stake_amount; +// +// let k: I110F18 = I110F18::from_fixed(remaining_tao) +// .saturating_mul(alpha_reserves.saturating_add(I110F18::from(alpha))); +// let tao_reserves: I110F18 = k.safe_div(alpha_reserves); +// +// SubnetTAO::::insert(netuid, tao_reserves.to_num::()); +// SubnetAlphaIn::::insert(netuid, alpha_reserves.to_num::()); +// +// // Unstake all alpha to root +// assert_ok!(SubtensorModule::unstake_all_alpha_aggregate( +// RuntimeOrigin::signed(coldkey), +// hotkey, +// )); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllAlphaSucceeded(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// let new_alpha = +// SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); +// assert_abs_diff_eq!(new_alpha, 0, epsilon = 1_000,); +// let new_root = +// SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, 0); +// assert!(new_root > 100_000); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllAlphaSucceeded(..)) +// ) +// })); +// }); +// } +// +// #[test] +// fn test_unstake_all_alpha_aggregate_fails() { +// new_test_ext(1).execute_with(|| { +// let coldkey = U256::from(1); +// let hotkey = U256::from(2); +// +// assert_ok!(SubtensorModule::unstake_all_alpha_aggregate( +// RuntimeOrigin::signed(coldkey), +// hotkey, +// )); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllAlphaFailed(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllAlphaFailed(..)) +// ) +// })); +// }); +// } #[test] fn test_unstake_all_works() { @@ -4482,3 +6057,155 @@ fn test_unstake_all_works() { assert!(new_balance > 100_000); }); } + +// #[test] +// fn test_unstake_all_aggregate_works() { +// new_test_ext(1).execute_with(|| { +// let subnet_owner_coldkey = U256::from(1001); +// let subnet_owner_hotkey = U256::from(1002); +// let coldkey = U256::from(1); +// let hotkey = U256::from(2); +// +// let stake_amount = 190_000_000_000; // 190 Alpha +// +// let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); +// register_ok_neuron(netuid, hotkey, coldkey, 192213123); +// // Give the neuron some stake to remove +// SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( +// &hotkey, +// &coldkey, +// netuid, +// stake_amount, +// ); +// +// // Setup the Alpha pool so that removing all the Alpha will keep liq above min +// let remaining_tao: I96F32 = +// DefaultMinimumPoolLiquidity::::get().saturating_add(I96F32::from(10_000_000)); +// let alpha_reserves: I110F18 = I110F18::from(stake_amount + 10_000_000); +// let alpha = stake_amount; +// +// let k: I110F18 = I110F18::from_fixed(remaining_tao) +// .saturating_mul(alpha_reserves.saturating_add(I110F18::from(alpha))); +// let tao_reserves: I110F18 = k.safe_div(alpha_reserves); +// +// SubnetTAO::::insert(netuid, tao_reserves.to_num::()); +// SubnetAlphaIn::::insert(netuid, alpha_reserves.to_num::()); +// +// // Unstake all alpha to root +// assert_ok!(SubtensorModule::unstake_all_aggregate( +// RuntimeOrigin::signed(coldkey), +// hotkey, +// )); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllSucceeded(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// let new_alpha = +// SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); +// assert_abs_diff_eq!(new_alpha, 0, epsilon = 1_000,); +// let new_balance = SubtensorModule::get_coldkey_balance(&coldkey); +// assert!(new_balance > 100_000); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllSucceeded(..)) +// ) +// })); +// }); +// } +// +// #[test] +// fn test_unstake_all_aggregate_fails() { +// new_test_ext(1).execute_with(|| { +// let coldkey = U256::from(1); +// let hotkey = U256::from(2); +// +// // Unstake all alpha to root +// assert_ok!(SubtensorModule::unstake_all_aggregate( +// RuntimeOrigin::signed(coldkey), +// hotkey, +// )); +// +// // Check for the block delay +// run_to_block_ext(2, true); +// +// // Check that event was not emitted. +// assert!(System::events().iter().all(|e| { +// !matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllFailed(..)) +// ) +// })); +// +// // Enable on_finalize code to run +// run_to_block_ext(3, true); +// +// // Check that event was emitted. +// assert!(System::events().iter().any(|e| { +// matches!( +// &e.event, +// RuntimeEvent::SubtensorModule(Event::AggregatedUnstakeAllFailed(..)) +// ) +// })); +// }); +// } + +#[test] +fn test_increase_stake_for_hotkey_and_coldkey_on_subnet_adds_to_staking_hotkeys_map() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let coldkey1 = U256::from(2); + let hotkey = U256::from(3); + + let netuid = 1; + let stake_amount = 100_000_000_000; + + // Check no entry in the staking hotkeys map + assert!(!StakingHotkeys::::contains_key(coldkey)); + // insert manually + StakingHotkeys::::insert(coldkey, Vec::::new()); + // check entry has no hotkey + assert!(!StakingHotkeys::::get(coldkey).contains(&hotkey)); + + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + stake_amount, + ); + + // Check entry exists in the staking hotkeys map + assert!(StakingHotkeys::::contains_key(coldkey)); + // check entry has hotkey + assert!(StakingHotkeys::::get(coldkey).contains(&hotkey)); + + // Check no entry in the staking hotkeys map for coldkey1 + assert!(!StakingHotkeys::::contains_key(coldkey1)); + + // Run increase stake for hotkey and coldkey1 on subnet + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey1, + netuid, + stake_amount, + ); + + // Check entry exists in the staking hotkeys map for coldkey1 + assert!(StakingHotkeys::::contains_key(coldkey1)); + // check entry has hotkey + assert!(StakingHotkeys::::get(coldkey1).contains(&hotkey)); + }); +} diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index 999c02dd65..ec737f8601 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -14,23 +14,11 @@ fn test_do_start_call_ok() { let netuid: u16 = 1; let tempo: u16 = 13; let coldkey_account_id = U256::from(0); - let hotkey_account_id = U256::from(1); - let burn_cost = 1000; - //add network - SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); - // Give it some $$$ in his coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); - - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); - + // account 0 is the default owner for any subnet assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); let block_number = System::block_number() + DurationOfStartCall::get(); @@ -69,21 +57,9 @@ fn test_do_start_call_fail_not_owner() { let netuid: u16 = 1; let tempo: u16 = 13; let coldkey_account_id = U256::from(0); - let hotkey_account_id = U256::from(1); - let wrong_owner_account_id = U256::from(2); - let burn_cost = 1000; - //add network - SubtensorModule::set_burn(netuid, burn_cost); - add_network_without_emission_block(netuid, tempo, 0); - // Give it some $$$ in his coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + let wrong_owner_account_id = U256::from(1); - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); + add_network_without_emission_block(netuid, tempo, 0); assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); @@ -105,20 +81,8 @@ fn test_do_start_call_fail_with_cannot_start_call_now() { let netuid: u16 = 1; let tempo: u16 = 13; let coldkey_account_id = U256::from(0); - let hotkey_account_id = U256::from(1); - let burn_cost = 1000; - //add network - SubtensorModule::set_burn(netuid, burn_cost); - add_network_without_emission_block(netuid, tempo, 0); - // Give it some $$$ in his coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); + add_network_without_emission_block(netuid, tempo, 0); assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); @@ -138,23 +102,10 @@ fn test_do_start_call_fail_for_set_again() { let netuid: u16 = 1; let tempo: u16 = 13; let coldkey_account_id = U256::from(0); - let hotkey_account_id = U256::from(1); - let burn_cost = 1000; - //add network - SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); - // Give it some $$$ in his coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); - - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); - assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); let block_number = System::block_number() + DurationOfStartCall::get(); @@ -181,23 +132,10 @@ fn test_do_start_call_ok_with_same_block_number_after_coinbase() { let netuid: u16 = 1; let tempo: u16 = 13; let coldkey_account_id = U256::from(0); - let hotkey_account_id = U256::from(1); - let burn_cost = 1000; - //add network - SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); - // Give it some $$$ in his coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); - - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); - assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); let block_number = System::block_number() + DurationOfStartCall::get(); @@ -248,7 +186,7 @@ fn test_register_network_min_burn_at_default() { RuntimeEvent::SubtensorModule(Event::::NetworkAdded(..)) ) }) - .last(); + .next_back(); let netuid = match min_burn_event.map(|event| event.event.clone()) { Some(RuntimeEvent::SubtensorModule(Event::::NetworkAdded(netuid, _))) => netuid, @@ -276,3 +214,275 @@ fn test_no_duplicates_in_get_symbol_for_subnet() { ); } } + +// cargo test --package pallet-subtensor --lib -- tests::subnet::test_subtoken_enable --exact --show-output + +#[test] +fn test_subtoken_enable() { + // ensure_subtoken_enabled + new_test_ext(1).execute_with(|| { + let account = U256::from(0); + let netuid: u16 = 1; + // let to_be_set: u64 = 10 + add_network_disable_subtoken(netuid, 10, 0); + assert!(!SubtokenEnabled::::get(netuid)); + + let block_number = System::block_number() + DurationOfStartCall::get(); + System::set_block_number(block_number); + + assert_ok!(SubtensorModule::start_call( + <::RuntimeOrigin>::signed(account), + netuid + )); + + assert!(SubtokenEnabled::::get(netuid)); + }); +} + +// cargo test --package pallet-subtensor --lib -- tests::subnet::test_subtoken_enable_reject_trading_before_enable --exact --show-output +#[test] +fn test_subtoken_enable_reject_trading_before_enable() { + // ensure_subtoken_enabled + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let netuid2: u16 = 2; + let hotkey_account_id: U256 = U256::from(1); + let coldkey_account_id = U256::from(2); + let hotkey_account_2_id: U256 = U256::from(3); + let amount = DefaultMinStake::::get() * 10; + + add_network_disable_subtoken(netuid, 10, 0); + add_network_disable_subtoken(netuid2, 10, 0); + + // Register so staking *could* work + register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 0); + register_ok_neuron(netuid2, hotkey_account_id, coldkey_account_id, 100); + register_ok_neuron(netuid, hotkey_account_2_id, coldkey_account_id, 0); + register_ok_neuron(netuid2, hotkey_account_2_id, coldkey_account_id, 100); + + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10_000); + + // all trading extrinsic should be rejected. + assert_noop!( + SubtensorModule::add_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid, + amount + ), + Error::::SubtokenDisabled + ); + + assert_noop!( + SubtensorModule::remove_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid, + amount + ), + Error::::SubtokenDisabled + ); + + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + amount, + netuid + ), + Error::::SubtokenDisabled + ); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + amount, + netuid + ), + Error::::SubtokenDisabled + ); + + assert_noop!( + SubtensorModule::move_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + hotkey_account_2_id, + netuid, + netuid2, + amount, + ), + Error::::SubtokenDisabled + ); + + assert_noop!( + SubtensorModule::transfer_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + hotkey_account_2_id, + netuid, + netuid2, + amount, + ), + Error::::SubtokenDisabled + ); + + assert_noop!( + SubtensorModule::swap_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid, + netuid2, + amount, + ), + Error::::SubtokenDisabled + ); + }); +} + +// cargo test --package pallet-subtensor --lib -- tests::subnet::test_subtoken_enable_trading_ok_with_enable --exact --show-output +#[test] +fn test_subtoken_enable_trading_ok_with_enable() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let netuid2: u16 = 2; + let hotkey_account_id: U256 = U256::from(1); + let coldkey_account_id = U256::from(2); + let hotkey_account_2_id: U256 = U256::from(3); + // stake big enough + let stake_amount = DefaultMinStake::::get() * 10000; + // unstake, transfer, swap just very little + let unstake_amount = DefaultMinStake::::get() * 10; + + add_network(netuid, 10, 0); + add_network(netuid2, 10, 0); + // Register so staking works + register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 0); + register_ok_neuron(netuid2, hotkey_account_id, coldkey_account_id, 100); + register_ok_neuron(netuid, hotkey_account_2_id, coldkey_account_id, 0); + register_ok_neuron(netuid2, hotkey_account_2_id, coldkey_account_id, 100); + + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, stake_amount * 10); + + // all trading extrinsic should be possible now that subtoken is enabled. + assert_ok!(SubtensorModule::add_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid, + stake_amount + )); + + assert_ok!(SubtensorModule::add_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid2, + stake_amount + )); + + assert_ok!(SubtensorModule::add_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_2_id, + netuid, + stake_amount + )); + + assert_ok!(SubtensorModule::add_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_2_id, + netuid2, + stake_amount + )); + + assert_ok!(SubtensorModule::remove_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid, + unstake_amount + )); + + assert_ok!(SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + unstake_amount, + netuid + )); + + assert_ok!(SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + unstake_amount, + netuid + )); + + assert_ok!(SubtensorModule::move_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + hotkey_account_2_id, + netuid, + netuid2, + unstake_amount, + )); + + assert_ok!(SubtensorModule::transfer_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + hotkey_account_2_id, + netuid, + netuid2, + unstake_amount, + )); + + assert_ok!(SubtensorModule::swap_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid, + netuid2, + unstake_amount, + )); + + assert_ok!(SubtensorModule::unstake_all( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + )); + + assert_ok!(SubtensorModule::unstake_all_alpha( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + )); + }); +} + +// cargo test --package pallet-subtensor --lib -- tests::subnet::test_subtoken_enable_ok_for_burn_register_before_enable --exact --show-output +#[test] +fn test_subtoken_enable_ok_for_burn_register_before_enable() { + // ensure_subtoken_enabled + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let netuid2: u16 = 2; + let hotkey_account_id: U256 = U256::from(1); + let coldkey_account_id = U256::from(2); + let hotkey_account_2_id: U256 = U256::from(3); + + let burn_cost = 1000; + // Set the burn cost + SubtensorModule::set_burn(netuid, burn_cost); + // Add the networks with subtoken disabled + add_network_disable_subtoken(netuid, 10, 0); + add_network_disable_subtoken(netuid2, 10, 0); + // Give enough to burned register + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, burn_cost * 2 + 5_000); + + // Should be possible to burned register before enable is activated + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid2, + hotkey_account_2_id + )); + }); +} diff --git a/pallets/subtensor/src/tests/swap_coldkey.rs b/pallets/subtensor/src/tests/swap_coldkey.rs index beb4df59a5..385830904c 100644 --- a/pallets/subtensor/src/tests/swap_coldkey.rs +++ b/pallets/subtensor/src/tests/swap_coldkey.rs @@ -1883,6 +1883,71 @@ fn test_schedule_swap_coldkey_with_pending_swap() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_schedule_swap_coldkey_failure_and_reschedule --exact --nocapture +#[test] +fn test_schedule_swap_coldkey_failure_and_reschedule() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey1 = U256::from(2); + let new_coldkey2 = U256::from(3); + + let swap_cost = SubtensorModule::get_key_swap_cost(); + + // Two swaps + SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, swap_cost + 1_000 * 2); + + assert_ok!(SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey1 + )); + + let current_block = >::block_number(); + let duration = ColdkeySwapScheduleDuration::::get(); + let when = current_block.saturating_add(duration); + + // Setup first key to fail + // -- will fail if the new coldkey is already a hotkey (has an Owner) + Owner::::insert(new_coldkey1, U256::from(4)); + + // First swap fails + run_to_block(when - 1); + next_block(); + + // Check the failure + next_block(); // Still in the scheduled-swap map + assert!(ColdkeySwapScheduled::::contains_key(old_coldkey)); + + // Try to schedule the second swap + assert_noop!( + SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey2 + ), + Error::::SwapAlreadyScheduled + ); + + // Wait for correct duration after first swap fails + let fail_duration = ColdkeySwapRescheduleDuration::::get(); + run_to_block(when + fail_duration); + + // Schedule the second swap + assert_ok!(SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey2 + )); + + let current_block = >::block_number(); + let duration = ColdkeySwapScheduleDuration::::get(); + let when = current_block.saturating_add(duration); + run_to_block(when - 1); + next_block(); + + // Check the success + next_block(); // Now in the scheduled-swap map + assert!(!ColdkeySwapScheduled::::contains_key(old_coldkey)); + }); +} + #[test] fn test_coldkey_swap_delegate_identity_updated() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 14b80a0310..3d240750cf 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -677,6 +677,13 @@ fn test_weights_err_setting_weights_too_fast() { SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id) .expect("Not registered."); SubtensorModule::set_validator_permit_for_uid(netuid, neuron_uid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(66), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey_account_id, + &(U256::from(66)), + netuid, + 1, + ); SubtensorModule::set_weights_set_rate_limit(netuid, 10); assert_eq!(SubtensorModule::get_weights_set_rate_limit(netuid), 10); @@ -753,6 +760,7 @@ fn test_weights_err_has_duplicate_ids() { let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; add_network(netuid, tempo, 0); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_max_allowed_uids(netuid, 100); // Allow many registrations per block. SubtensorModule::set_max_registrations_per_block(netuid, 100); // Allow many registrations per block. SubtensorModule::set_target_registrations_per_interval(netuid, 100); // Allow many registrations per block. @@ -762,6 +770,13 @@ fn test_weights_err_has_duplicate_ids() { SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id) .expect("Not registered."); SubtensorModule::set_validator_permit_for_uid(netuid, neuron_uid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(77), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey_account_id, + &(U256::from(77)), + netuid, + 1, + ); // uid 1 register_ok_neuron(netuid, U256::from(1), U256::from(1), 100_000); @@ -933,7 +948,15 @@ fn test_set_weights_err_invalid_uid() { let neuron_uid: u16 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id) .expect("Not registered."); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, neuron_uid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(66), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey_account_id, + &(U256::from(66)), + netuid, + 1, + ); let weight_keys: Vec = vec![9999]; // Does not exist let weight_values: Vec = vec![88]; // random value let result = @@ -958,6 +981,13 @@ fn test_set_weight_not_enough_values() { .expect("Not registered."); SubtensorModule::set_validator_permit_for_uid(netuid, neuron_uid, true); SubtensorModule::set_max_weight_limit(netuid, u16::MAX); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(2), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &account_id, + &(U256::from(2)), + netuid, + 1, + ); register_ok_neuron(1, U256::from(3), U256::from(4), 300000); SubtensorModule::set_min_allowed_weights(netuid, 2); @@ -1059,8 +1089,16 @@ fn test_set_weights_sum_larger_than_u16_max() { register_ok_neuron(1, U256::from(1), U256::from(2), 100_000); let neuron_uid: u16 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &U256::from(1)) .expect("Not registered."); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, neuron_uid, true); SubtensorModule::set_max_weight_limit(netuid, u16::MAX); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(2), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(2)), + netuid, + 1, + ); register_ok_neuron(1, U256::from(3), U256::from(4), 300_000); SubtensorModule::set_min_allowed_weights(1, 2); @@ -1548,10 +1586,25 @@ fn test_commit_reveal_weights_ok() { // Register neurons and set up configurations register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); // Commit at block 0 assert_ok!(SubtensorModule::commit_weights( @@ -1601,10 +1654,25 @@ fn test_commit_reveal_tempo_interval() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); // Commit at block 0 assert_ok!(SubtensorModule::commit_weights( @@ -1722,9 +1790,24 @@ fn test_commit_reveal_hash() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); @@ -1807,9 +1890,24 @@ fn test_commit_reveal_disabled_or_enabled() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); // Disable commit/reveal SubtensorModule::set_commit_reveal_weights_enabled(netuid, false); @@ -1869,9 +1967,24 @@ fn test_toggle_commit_reveal_weights_and_set_weights() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); // Enable commit/reveal SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); @@ -1939,10 +2052,25 @@ fn test_tempo_change_during_commit_reveal_process() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), @@ -2073,10 +2201,25 @@ fn test_commit_reveal_multiple_commits() { // Setup the network and neurons register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); // 1. Commit 10 times successfully let mut commit_info = Vec::new(); @@ -2463,8 +2606,23 @@ fn test_expired_commits_handling_in_commit_and_reveal() { // Register neurons register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); // 1. Commit 5 times in epoch 0 let mut commit_info = Vec::new(); @@ -2646,8 +2804,23 @@ fn test_reveal_at_exact_epoch() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); let reveal_periods: Vec = vec![0, 1, 2, 7, 40, 86, 100]; @@ -2795,8 +2968,23 @@ fn test_tempo_and_reveal_period_change_during_commit_reveal_process() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); // Step 1: Commit weights assert_ok!(SubtensorModule::commit_weights( @@ -2970,8 +3158,23 @@ fn test_commit_reveal_order_enforcement() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); // Commit three times: A, B, C let mut commit_info = Vec::new(); @@ -3227,8 +3430,23 @@ fn test_successful_batch_reveal() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); // 1. Commit multiple times let mut commit_info = Vec::new(); @@ -3290,8 +3508,23 @@ fn test_batch_reveal_with_expired_commits() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); let mut commit_info = Vec::new(); @@ -3692,8 +3925,23 @@ fn test_batch_reveal_with_out_of_order_commits() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); // 1. Commit multiple times (A, B, C) let mut commit_info = Vec::new(); @@ -4080,10 +4328,25 @@ fn test_get_reveal_blocks() { // **5. Register Neurons and Configure the Network** register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); // **6. Commit Weights at Block 0** assert_ok!(SubtensorModule::commit_weights( @@ -4199,10 +4462,25 @@ fn test_commit_weights_rate_limit() { register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 10); // Rate limit is 10 blocks SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(0), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(0)), + &(U256::from(0)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &(U256::from(1)), + &(U256::from(1)), + netuid, + 1, + ); let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("expected uid"); @@ -4367,6 +4645,7 @@ fn test_reveal_crv3_commits_success() { add_network(netuid, 5, 0); register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); register_ok_neuron(netuid, hotkey2, U256::from(4), 100_000); + SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); SubtensorModule::set_reveal_period(netuid, 3); @@ -4378,6 +4657,20 @@ fn test_reveal_crv3_commits_success() { SubtensorModule::set_validator_permit_for_uid(netuid, neuron_uid1, true); SubtensorModule::set_validator_permit_for_uid(netuid, neuron_uid2, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(3), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(4), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey1, + &(U256::from(3)), + netuid, + 1, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey2, + &(U256::from(4)), + netuid, + 1, + ); let version_key = SubtensorModule::get_weights_version_key(netuid); diff --git a/pallets/subtensor/src/utils/evm.rs b/pallets/subtensor/src/utils/evm.rs index a34f6afc80..6877739f69 100644 --- a/pallets/subtensor/src/utils/evm.rs +++ b/pallets/subtensor/src/utils/evm.rs @@ -1,10 +1,26 @@ use super::*; +use alloc::string::ToString; use frame_support::ensure; use frame_system::ensure_signed; use sp_core::{H160, ecdsa::Signature, hashing::keccak_256}; +use sp_std::vec::Vec; + +const MESSAGE_PREFIX: &str = "\x19Ethereum Signed Message:\n"; impl Pallet { + pub(crate) fn hash_message_eip191>(message: M) -> [u8; 32] { + let msg_len = message.as_ref().len().to_string(); + keccak_256( + &[ + MESSAGE_PREFIX.as_bytes(), + msg_len.as_bytes(), + message.as_ref(), + ] + .concat(), + ) + } + /// Associate an EVM key with a hotkey. /// /// This function accepts a Signature, which is a signed message containing the hotkey concatenated with @@ -30,7 +46,7 @@ impl Pallet { hotkey: T::AccountId, evm_key: H160, block_number: u64, - signature: Signature, + mut signature: Signature, ) -> dispatch::DispatchResult { let coldkey = ensure_signed(origin)?; @@ -39,15 +55,18 @@ impl Pallet { Error::::NonAssociatedColdKey ); + // Normalize the v value to 0 or 1 + if signature.0[64] >= 27 { + signature.0[64] = signature.0[64].saturating_sub(27); + } + let uid = Self::get_uid_for_net_and_hotkey(netuid, &hotkey)?; - let mut message = [0u8; 64]; let block_hash = keccak_256(block_number.encode().as_ref()); - message[..32].copy_from_slice(&hotkey.encode()[..]); - message[32..].copy_from_slice(block_hash.as_ref()); + let message = [hotkey.encode().as_ref(), block_hash.as_ref()].concat(); let public = signature - .recover_prehashed(&keccak_256(message.as_ref())) - .ok_or(Error::::UnableToRecoverPublicKey)?; + .recover_prehashed(&Self::hash_message_eip191(message)) + .ok_or(Error::::InvalidIdentity)?; let secp_pubkey = libsecp256k1::PublicKey::parse_compressed(&public.0) .map_err(|_| Error::::UnableToRecoverPublicKey)?; let uncompressed = secp_pubkey.serialize(); @@ -71,4 +90,19 @@ impl Pallet { Ok(()) } + + pub fn uid_lookup(netuid: u16, evm_key: H160, limit: u16) -> Vec<(u16, u64)> { + let mut ret_val = AssociatedEvmAddress::::iter_prefix(netuid) + .take(limit as usize) + .filter_map(|(uid, (stored_evm_key, block_associated))| { + if stored_evm_key != evm_key { + return None; + } + + Some((uid, block_associated)) + }) + .collect::>(); + ret_val.sort_by(|(_, block1), (_, block2)| block1.cmp(block2)); + ret_val + } } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index b375cc66e4..899fa83646 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -582,6 +582,14 @@ impl Pallet { Self::deposit_event(Event::BondsPenaltySet(netuid, bonds_penalty)); } + pub fn get_bonds_reset(netuid: u16) -> bool { + BondsResetOn::::get(netuid) + } + pub fn set_bonds_reset(netuid: u16, bonds_reset: bool) { + BondsResetOn::::insert(netuid, bonds_reset); + Self::deposit_event(Event::BondsResetOnSet(netuid, bonds_reset)); + } + pub fn get_max_registrations_per_block(netuid: u16) -> u16 { MaxRegistrationsPerBlock::::get(netuid) } @@ -671,6 +679,12 @@ impl Pallet { AlphaValues::::get(netuid) } + pub fn set_alpha_values_32(netuid: u16, low: I32F32, high: I32F32) { + let low = (low.saturating_mul(I32F32::saturating_from_num(u16::MAX))).to_num::(); + let high = (high.saturating_mul(I32F32::saturating_from_num(u16::MAX))).to_num::(); + AlphaValues::::insert(netuid, (low, high)); + } + pub fn get_alpha_values_32(netuid: u16) -> (I32F32, I32F32) { let (alpha_low, alpha_high): (u16, u16) = AlphaValues::::get(netuid); let converted_low = @@ -681,6 +695,14 @@ impl Pallet { (converted_low, converted_high) } + pub fn set_alpha_sigmoid_steepness(netuid: u16, steepness: u16) { + AlphaSigmoidSteepness::::insert(netuid, steepness); + } + pub fn get_alpha_sigmoid_steepness(netuid: u16) -> I32F32 { + let alpha = AlphaSigmoidSteepness::::get(netuid); + I32F32::saturating_from_num(alpha) + } + pub fn set_liquid_alpha_enabled(netuid: u16, enabled: bool) { LiquidAlphaOn::::set(netuid, enabled); } @@ -689,6 +711,14 @@ impl Pallet { LiquidAlphaOn::::get(netuid) } + pub fn set_yuma3_enabled(netuid: u16, enabled: bool) { + Yuma3On::::set(netuid, enabled); + } + + pub fn get_yuma3_enabled(netuid: u16) -> bool { + Yuma3On::::get(netuid) + } + /// Set the duration for coldkey swap /// /// # Arguments diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index c37a78d2e4..7edaebc98a 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -8,6 +8,7 @@ pub enum TransactionType { Unknown, RegisterNetwork, SetWeightsVersionKey, + SetSNOwnerHotkey, } /// Implement conversion from TransactionType to u16 @@ -19,6 +20,7 @@ impl From for u16 { TransactionType::Unknown => 2, TransactionType::RegisterNetwork => 3, TransactionType::SetWeightsVersionKey => 4, + TransactionType::SetSNOwnerHotkey => 5, } } } @@ -31,6 +33,7 @@ impl From for TransactionType { 1 => TransactionType::SetChildkeyTake, 3 => TransactionType::RegisterNetwork, 4 => TransactionType::SetWeightsVersionKey, + 5 => TransactionType::SetSNOwnerHotkey, _ => TransactionType::Unknown, } } @@ -56,6 +59,8 @@ impl Pallet { match tx_type { TransactionType::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) .saturating_mul(WeightsVersionKeyRateLimit::::get()), + TransactionType::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), + _ => Self::get_rate_limit(tx_type), } } @@ -102,6 +107,9 @@ impl Pallet { ) -> u64 { match tx_type { TransactionType::RegisterNetwork => Self::get_network_last_lock_block(), + TransactionType::SetSNOwnerHotkey => { + Self::get_rate_limited_last_block(&RateLimitKey::SetSNOwnerHotkey(netuid)) + } _ => { let tx_as_u16: u16 = (*tx_type).into(); TransactionKeyLastBlock::::get((hotkey, netuid, tx_as_u16)) @@ -126,6 +134,9 @@ impl Pallet { ) { match tx_type { TransactionType::RegisterNetwork => Self::set_network_last_lock_block(block), + TransactionType::SetSNOwnerHotkey => { + Self::set_rate_limited_last_block(&RateLimitKey::SetSNOwnerHotkey(netuid), block) + } _ => { let tx_as_u16: u16 = (*tx_type).into(); TransactionKeyLastBlock::::insert((key, netuid, tx_as_u16), block); diff --git a/precompiles/src/extensions.rs b/precompiles/src/extensions.rs index 2d3d65a41c..1c90922c57 100644 --- a/precompiles/src/extensions.rs +++ b/precompiles/src/extensions.rs @@ -6,8 +6,8 @@ use frame_support::dispatch::{GetDispatchInfo, Pays, PostDispatchInfo}; use frame_system::RawOrigin; use pallet_admin_utils::{PrecompileEnable, PrecompileEnum}; use pallet_evm::{ - AddressMapping, BalanceConverter, ExitError, GasWeightMapping, Precompile, PrecompileFailure, - PrecompileHandle, PrecompileResult, + AddressMapping, BalanceConverter, EvmBalance, ExitError, GasWeightMapping, Precompile, + PrecompileFailure, PrecompileHandle, PrecompileResult, }; use precompile_utils::EvmResult; use sp_core::{H160, U256, blake2_256}; @@ -27,14 +27,14 @@ pub(crate) trait PrecompileHandleExt: PrecompileHandle { where R: pallet_evm::Config, { - let amount = self.context().apparent_value; - ::BalanceConverter::into_substrate_balance(amount).ok_or( - PrecompileFailure::Error { + let amount = EvmBalance::new(self.context().apparent_value); + let result = ::BalanceConverter::into_substrate_balance(amount) + .ok_or(PrecompileFailure::Error { exit_status: ExitError::Other( "error converting balance from ETH to subtensor".into(), ), - }, - ) + })?; + Ok(result.into()) } /// Dispatches a runtime call, but also checks and records the gas costs. diff --git a/precompiles/src/lib.rs b/precompiles/src/lib.rs index ed0c2222a2..4c6824e07b 100644 --- a/precompiles/src/lib.rs +++ b/precompiles/src/lib.rs @@ -26,6 +26,7 @@ use crate::metagraph::*; use crate::neuron::*; use crate::staking::*; use crate::subnet::*; +use crate::uid_lookup::*; mod balance_transfer; mod ed25519; @@ -34,6 +35,7 @@ mod metagraph; mod neuron; mod staking; mod subnet; +mod uid_lookup; pub struct Precompiles(PhantomData); @@ -84,7 +86,7 @@ where Self(Default::default()) } - pub fn used_addresses() -> [H160; 14] { + pub fn used_addresses() -> [H160; 15] { [ hash(1), hash(2), @@ -100,6 +102,7 @@ where hash(MetagraphPrecompile::::INDEX), hash(NeuronPrecompile::::INDEX), hash(StakingPrecompileV2::::INDEX), + hash(UidLookupPrecompile::::INDEX), ] } } @@ -158,6 +161,9 @@ where a if a == hash(NeuronPrecompile::::INDEX) => { NeuronPrecompile::::try_execute::(handle, PrecompileEnum::Neuron) } + a if a == hash(UidLookupPrecompile::::INDEX) => { + UidLookupPrecompile::::try_execute::(handle, PrecompileEnum::UidLookup) + } _ => None, } } diff --git a/precompiles/src/solidity/stakingV2.abi b/precompiles/src/solidity/stakingV2.abi index 16adb1d8a8..20cc9c90fe 100644 --- a/precompiles/src/solidity/stakingV2.abi +++ b/precompiles/src/solidity/stakingV2.abi @@ -251,5 +251,71 @@ "outputs": [], "stateMutability": "nonpayable", "type": "function" - } + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "limit_price", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "allow_partial", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "addStakeLimit", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "limit_price", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "allow_partial", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "removeStakeLimit", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, ] diff --git a/precompiles/src/solidity/stakingV2.sol b/precompiles/src/solidity/stakingV2.sol index dd033cfca8..202615af62 100644 --- a/precompiles/src/solidity/stakingV2.sol +++ b/precompiles/src/solidity/stakingV2.sol @@ -51,12 +51,12 @@ interface IStaking { ) external; /** - * @dev Moves a subtensor stake `amount` associated with the `hotkey` to a different hotkey + * @dev Moves a subtensor stake `amount` associated with the `hotkey` to a different hotkey * `destination_hotkey`. * * This function allows external accounts and contracts to move staked TAO from one hotkey to another, - * which effectively calls `move_stake` on the subtensor pallet with specified origin and destination - * hotkeys as parameters being the hashed address mappings of H160 sender address to Substrate ss58 + * which effectively calls `move_stake` on the subtensor pallet with specified origin and destination + * hotkeys as parameters being the hashed address mappings of H160 sender address to Substrate ss58 * address as implemented in Frontier HashedAddressMapping: * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 * @@ -67,7 +67,7 @@ interface IStaking { * @param amount The amount to move in rao. * * Requirements: - * - `origin_hotkey` and `destination_hotkey` must be valid hotkeys registered on the network, ensuring + * - `origin_hotkey` and `destination_hotkey` must be valid hotkeys registered on the network, ensuring * that the stake is correctly attributed. */ function moveStake( @@ -79,12 +79,12 @@ interface IStaking { ) external; /** - * @dev Transfer a subtensor stake `amount` associated with the transaction signer to a different coldkey + * @dev Transfer a subtensor stake `amount` associated with the transaction signer to a different coldkey * `destination_coldkey`. * * This function allows external accounts and contracts to transfer staked TAO to another coldkey, - * which effectively calls `transfer_stake` on the subtensor pallet with specified destination - * coldkey as a parameter being the hashed address mapping of H160 sender address to Substrate ss58 + * which effectively calls `transfer_stake` on the subtensor pallet with specified destination + * coldkey as a parameter being the hashed address mapping of H160 sender address to Substrate ss58 * address as implemented in Frontier HashedAddressMapping: * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 * @@ -95,7 +95,7 @@ interface IStaking { * @param amount The amount to move in rao. * * Requirements: - * - `origin_hotkey` and `destination_hotkey` must be valid hotkeys registered on the network, ensuring + * - `origin_hotkey` and `destination_hotkey` must be valid hotkeys registered on the network, ensuring * that the stake is correctly attributed. */ function transferStake( @@ -194,4 +194,59 @@ interface IStaking { bytes32 hotkey, uint256 netuid ) external view returns (uint256); + + /** + * @dev Adds a subtensor stake `amount` associated with the `hotkey` within a price limit. + * + * This function allows external accounts and contracts to stake TAO into the subtensor pallet, + * which effectively calls `add_stake_limit` on the subtensor pallet with specified hotkey as a parameter + * and coldkey being the hashed address mapping of H160 sender address to Substrate ss58 address as + * implemented in Frontier HashedAddressMapping: + * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 + * + * @param hotkey The hotkey public key (32 bytes). + * @param amount The amount to stake in rao. + * @param limit_price The price limit to stake at in rao. Number of rao per alpha. + * @param allow_partial Whether to allow partial stake. + * @param netuid The subnet to stake to (uint256). + * + * Requirements: + * - `hotkey` must be a valid hotkey registered on the network, ensuring that the stake is + * correctly attributed. + */ + function addStakeLimit( + bytes32 hotkey, + uint256 amount, + uint256 limit_price, + bool allow_partial, + uint256 netuid + ) external payable; + + /** + * @dev Removes a subtensor stake `amount` from the specified `hotkey` within a price limit. + * + * This function allows external accounts and contracts to unstake TAO from the subtensor pallet, + * which effectively calls `remove_stake_limit` on the subtensor pallet with specified hotkey as a parameter + * and coldkey being the hashed address mapping of H160 sender address to Substrate ss58 address as + * implemented in Frontier HashedAddressMapping: + * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 + * + * @param hotkey The hotkey public key (32 bytes). + * @param amount The amount to unstake in alpha. + * @param limit_price The price limit to unstake at in rao. Number of rao per alpha. + * @param allow_partial Whether to allow partial unstake. + * @param netuid The subnet to stake to (uint256). + * + * Requirements: + * - `hotkey` must be a valid hotkey registered on the network, ensuring that the stake is + * correctly attributed. + * - The existing stake amount must be not lower than specified amount + */ + function removeStakeLimit( + bytes32 hotkey, + uint256 amount, + uint256 limit_price, + bool allow_partial, + uint256 netuid + ) external; } diff --git a/precompiles/src/solidity/subnet.abi b/precompiles/src/solidity/subnet.abi index e2a3e569da..a2849a0cbe 100644 --- a/precompiles/src/solidity/subnet.abi +++ b/precompiles/src/solidity/subnet.abi @@ -194,6 +194,25 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint16", + "name": "netuid", + "type": "uint16" + } + ], + "name": "getYuma3Enabled", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -668,6 +687,24 @@ "stateMutability": "payable", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint16", + "name": "netuid", + "type": "uint16" + }, + { + "internalType": "bool", + "name": "yuma3Enabled", + "type": "bool" + } + ], + "name": "setYuma3Enabled", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, { "inputs": [ { diff --git a/precompiles/src/solidity/subnet.sol b/precompiles/src/solidity/subnet.sol index d5ef0916d9..2fa9d3f550 100644 --- a/precompiles/src/solidity/subnet.sol +++ b/precompiles/src/solidity/subnet.sol @@ -152,6 +152,13 @@ interface ISubnet { bool liquidAlphaEnabled ) external payable; + function getYuma3Enabled(uint16 netuid) external view returns (bool); + + function setYuma3Enabled( + uint16 netuid, + bool yuma3Enabled + ) external payable; + function getAlphaValues( uint16 netuid ) external view returns (uint16, uint16); diff --git a/precompiles/src/solidity/uidLookup.abi b/precompiles/src/solidity/uidLookup.abi new file mode 100644 index 0000000000..558358dcaa --- /dev/null +++ b/precompiles/src/solidity/uidLookup.abi @@ -0,0 +1,43 @@ +[ + { + "inputs": [ + { + "internalType": "uint16", + "name": "netuid", + "type": "uint16" + }, + { + "internalType": "address", + "name": "evm_address", + "type": "address" + }, + { + "internalType": "uint16", + "name": "limit", + "type": "uint16" + } + ], + "name":"uidLookup", + "outputs": [ + { + "components": [ + { + "internalType": "uint16", + "name": "uid", + "type": "uint16" + }, + { + "internalType": "uint64", + "name": "block_associated", + "type": "uint64" + } + ], + "internalType": "struct LookupItem[]", + "name": "", + "type": "tuple[]" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/precompiles/src/solidity/uidLookup.sol b/precompiles/src/solidity/uidLookup.sol new file mode 100644 index 0000000000..4eae98899c --- /dev/null +++ b/precompiles/src/solidity/uidLookup.sol @@ -0,0 +1,16 @@ +pragma solidity ^0.8.0; + +address constant IUID_LOOKUP_ADDRESS = 0x0000000000000000000000000000000000000806; + +struct LookupItem { + uint16 uid; + uint64 block_associated; +} + +interface IUidLookup { + function uidLookup( + uint16 netuid, + address evm_address, + uint16 limit + ) external view returns (LookupItem[] memory); +} diff --git a/precompiles/src/staking.rs b/precompiles/src/staking.rs index 8f797a7476..21f50bc917 100644 --- a/precompiles/src/staking.rs +++ b/precompiles/src/staking.rs @@ -30,7 +30,8 @@ use core::marker::PhantomData; use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo}; use frame_system::RawOrigin; use pallet_evm::{ - AddressMapping, BalanceConverter, ExitError, PrecompileFailure, PrecompileHandle, + AddressMapping, BalanceConverter, EvmBalance, ExitError, PrecompileFailure, PrecompileHandle, + SubstrateBalance, }; use precompile_utils::EvmResult; use sp_core::{H256, U256}; @@ -276,6 +277,56 @@ where handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) } + + #[precompile::public("addStakeLimit(bytes32,uint256,uint256,bool,uint256)")] + fn add_stake_limit( + handle: &mut impl PrecompileHandle, + address: H256, + amount_rao: U256, + limit_price_rao: U256, + allow_partial: bool, + netuid: U256, + ) -> EvmResult<()> { + let account_id = handle.caller_account_id::(); + let amount_staked = amount_rao.unique_saturated_into(); + let limit_price = limit_price_rao.unique_saturated_into(); + let hotkey = R::AccountId::from(address.0); + let netuid = try_u16_from_u256(netuid)?; + let call = pallet_subtensor::Call::::add_stake_limit { + hotkey, + netuid, + amount_staked, + limit_price, + allow_partial, + }; + + handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) + } + + #[precompile::public("removeStakeLimit(bytes32,uint256,uint256,bool,uint256)")] + fn remove_stake_limit( + handle: &mut impl PrecompileHandle, + address: H256, + amount_alpha: U256, + limit_price_rao: U256, + allow_partial: bool, + netuid: U256, + ) -> EvmResult<()> { + let account_id = handle.caller_account_id::(); + let hotkey = R::AccountId::from(address.0); + let netuid = try_u16_from_u256(netuid)?; + let amount_unstaked = amount_alpha.unique_saturated_into(); + let limit_price = limit_price_rao.unique_saturated_into(); + let call = pallet_subtensor::Call::::remove_stake_limit { + hotkey, + netuid, + amount_unstaked, + limit_price, + allow_partial, + }; + + handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) + } } // Deprecated, exists for backward compatibility. @@ -351,10 +402,11 @@ where let account_id = handle.caller_account_id::(); let hotkey = R::AccountId::from(address.0); let netuid = try_u16_from_u256(netuid)?; + let amount = EvmBalance::new(amount); let amount_unstaked = ::BalanceConverter::into_substrate_balance(amount) + .map(|amount| amount.into_u64_saturating()) .ok_or(ExitError::OutOfFund)?; - let amount_unstaked = amount_unstaked.unique_saturated_into(); let call = pallet_subtensor::Call::::remove_stake { hotkey, netuid, @@ -375,8 +427,9 @@ where // get total stake of coldkey let total_stake = pallet_subtensor::Pallet::::get_total_stake_for_coldkey(&coldkey); // Convert to EVM decimals - let stake_u256 = U256::from(total_stake); + let stake_u256: SubstrateBalance = total_stake.into(); let stake_eth = ::BalanceConverter::into_evm_balance(stake_u256) + .map(|amount| amount.into_u256()) .ok_or(ExitError::InvalidRange)?; Ok(stake_eth) @@ -393,8 +446,9 @@ where // get total stake of hotkey let total_stake = pallet_subtensor::Pallet::::get_total_stake_for_hotkey(&hotkey); // Convert to EVM decimals - let stake_u256 = U256::from(total_stake); + let stake_u256: SubstrateBalance = total_stake.into(); let stake_eth = ::BalanceConverter::into_evm_balance(stake_u256) + .map(|amount| amount.into_u256()) .ok_or(ExitError::InvalidRange)?; Ok(stake_eth) @@ -414,8 +468,9 @@ where let stake = pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( &hotkey, &coldkey, netuid, ); - let stake = U256::from(stake); + let stake: SubstrateBalance = stake.into(); let stake = ::BalanceConverter::into_evm_balance(stake) + .map(|amount| amount.into_u256()) .ok_or(ExitError::InvalidRange)?; Ok(stake) @@ -453,15 +508,20 @@ where account_id: &::AccountId, amount: U256, ) -> Result<(), PrecompileFailure> { + let amount = EvmBalance::new(amount); let amount_sub = ::BalanceConverter::into_substrate_balance(amount) .ok_or(ExitError::OutOfFund)?; // Create a transfer call from the smart contract to the caller + let value = amount_sub + .into_u64_saturating() + .try_into() + .map_err(|_| ExitError::Other("Failed to convert u64 to Balance".into()))?; let transfer_call = ::RuntimeCall::from( pallet_balances::Call::::transfer_allow_death { dest: account_id.clone().into(), - value: amount_sub.unique_saturated_into(), + value, }, ); diff --git a/precompiles/src/subnet.rs b/precompiles/src/subnet.rs index e9bfc0c5f9..7d4dd175e3 100644 --- a/precompiles/src/subnet.rs +++ b/precompiles/src/subnet.rs @@ -327,6 +327,12 @@ where Ok(pallet_subtensor::Rho::::get(netuid)) } + #[precompile::public("getAlphaSigmoidSteepness(uint16)")] + #[precompile::view] + fn get_alpha_sigmoid_steepness(_: &mut impl PrecompileHandle, netuid: u16) -> EvmResult { + Ok(pallet_subtensor::AlphaSigmoidSteepness::::get(netuid)) + } + #[precompile::public("setRho(uint16,uint16)")] #[precompile::payable] fn set_rho(handle: &mut impl PrecompileHandle, netuid: u16, rho: u16) -> EvmResult<()> { @@ -338,6 +344,22 @@ where ) } + #[precompile::public("setAlphaSigmoidSteepness(uint16,uint16)")] + #[precompile::payable] + fn set_alpha_sigmoid_steepness( + handle: &mut impl PrecompileHandle, + netuid: u16, + steepness: u16, + ) -> EvmResult<()> { + let call = + pallet_admin_utils::Call::::sudo_set_alpha_sigmoid_steepness { netuid, steepness }; + + handle.try_dispatch_runtime_call::( + call, + RawOrigin::Signed(handle.caller_account_id::()), + ) + } + #[precompile::public("getActivityCutoff(uint16)")] #[precompile::view] fn get_activity_cutoff(_: &mut impl PrecompileHandle, netuid: u16) -> EvmResult { @@ -446,16 +468,12 @@ where #[precompile::public("setMaxBurn(uint16,uint64)")] #[precompile::payable] fn set_max_burn( - handle: &mut impl PrecompileHandle, - netuid: u16, - max_burn: u64, + _handle: &mut impl PrecompileHandle, + _netuid: u16, + _max_burn: u64, ) -> EvmResult<()> { - let call = pallet_admin_utils::Call::::sudo_set_max_burn { netuid, max_burn }; - - handle.try_dispatch_runtime_call::( - call, - RawOrigin::Signed(handle.caller_account_id::()), - ) + // DEPRECATED. The subnet owner cannot set the max burn anymore. + Ok(()) } #[precompile::public("getDifficulty(uint16)")] @@ -553,6 +571,27 @@ where ) } + #[precompile::public("getYuma3Enabled(uint16)")] + #[precompile::view] + fn get_yuma3_enabled(_: &mut impl PrecompileHandle, netuid: u16) -> EvmResult { + Ok(pallet_subtensor::Yuma3On::::get(netuid)) + } + + #[precompile::public("setYuma3Enabled(uint16,bool)")] + #[precompile::payable] + fn set_yuma3_enabled( + handle: &mut impl PrecompileHandle, + netuid: u16, + enabled: bool, + ) -> EvmResult<()> { + let call = pallet_admin_utils::Call::::sudo_set_yuma3_enabled { netuid, enabled }; + + handle.try_dispatch_runtime_call::( + call, + RawOrigin::Signed(handle.caller_account_id::()), + ) + } + #[precompile::public("getAlphaValues(uint16)")] #[precompile::view] fn get_alpha_values(_: &mut impl PrecompileHandle, netuid: u16) -> EvmResult<(u16, u16)> { diff --git a/precompiles/src/uid_lookup.rs b/precompiles/src/uid_lookup.rs new file mode 100644 index 0000000000..61fb9d6d7f --- /dev/null +++ b/precompiles/src/uid_lookup.rs @@ -0,0 +1,53 @@ +use core::marker::PhantomData; + +use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo}; +use pallet_evm::PrecompileHandle; +use precompile_utils::{EvmResult, prelude::Address}; +use sp_runtime::traits::{Dispatchable, StaticLookup}; +use sp_std::vec::Vec; + +use crate::PrecompileExt; + +pub(crate) struct UidLookupPrecompile(PhantomData); + +impl PrecompileExt for UidLookupPrecompile +where + R: frame_system::Config + pallet_subtensor::Config + pallet_evm::Config, + R::AccountId: From<[u8; 32]>, + ::RuntimeCall: + GetDispatchInfo + Dispatchable, + ::RuntimeCall: From> + + GetDispatchInfo + + Dispatchable, + <::Lookup as StaticLookup>::Source: From, +{ + const INDEX: u64 = 2054; +} + +#[precompile_utils::precompile] +impl UidLookupPrecompile +where + R: frame_system::Config + pallet_subtensor::Config + pallet_evm::Config, + R::AccountId: From<[u8; 32]>, + ::RuntimeCall: + GetDispatchInfo + Dispatchable, + ::RuntimeCall: From> + + GetDispatchInfo + + Dispatchable, + <::Lookup as StaticLookup>::Source: From, +{ + #[precompile::public("uidLookup(uint16,address,uint16)")] + #[precompile::view] + fn uid_lookup( + _handle: &mut impl PrecompileHandle, + netuid: u16, + evm_address: Address, + limit: u16, + ) -> EvmResult> { + Ok(pallet_subtensor::Pallet::::uid_lookup( + netuid, + evm_address.0, + limit, + )) + } +} diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 67add266a4..f417a18afc 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -122,6 +122,9 @@ w3f-bls = { workspace = true } sha2 = { workspace = true } ark-serialize = { workspace = true } +# Crowdloan +pallet-crowdloan = { workspace = true } + [dev-dependencies] frame-metadata = { workspace = true } sp-io = { workspace = true } @@ -191,6 +194,7 @@ std = [ "sp-genesis-builder/std", "subtensor-precompiles/std", "subtensor-runtime-common/std", + "pallet-crowdloan/std", # Frontier "fp-evm/std", "fp-rpc/std", @@ -236,6 +240,7 @@ runtime-benchmarks = [ "pallet-preimage/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", + "pallet-crowdloan/runtime-benchmarks", # EVM + Frontier "pallet-ethereum/runtime-benchmarks", @@ -269,6 +274,7 @@ try-runtime = [ "pallet-admin-utils/try-runtime", "pallet-commitments/try-runtime", "pallet-registry/try-runtime", + "pallet-crowdloan/try-runtime", # EVM + Frontier "fp-self-contained/try-runtime", diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index cbe8a84a68..0b64ee3cde 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -14,6 +14,7 @@ mod migrations; use codec::{Compact, Decode, Encode}; use frame_support::traits::{Imbalance, InsideBoth}; use frame_support::{ + PalletId, dispatch::DispatchResultWithPostInfo, genesis_builder_helper::{build_state, get_preset}, pallet_prelude::Get, @@ -25,7 +26,7 @@ use frame_support::{ }, }; use frame_system::{EnsureNever, EnsureRoot, EnsureRootWithSuccess, RawOrigin}; -use pallet_commitments::CanCommit; +use pallet_commitments::{CanCommit, OnMetadataCommitment}; use pallet_grandpa::{ AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, fg_primitives, }; @@ -94,12 +95,13 @@ use scale_info::TypeInfo; // Frontier use fp_rpc::TransactionStatus; use pallet_ethereum::{Call::transact, PostLogContent, Transaction as EthereumTransaction}; -use pallet_evm::{Account as EVMAccount, BalanceConverter, FeeCalculator, Runner}; +use pallet_evm::{ + Account as EVMAccount, BalanceConverter, EvmBalance, FeeCalculator, Runner, SubstrateBalance, +}; // Drand impl pallet_drand::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_drand::weights::SubstrateWeight; type AuthorityId = pallet_drand::crypto::TestAuthId; type Verifier = pallet_drand::verifier::QuicknetVerifier; type UnsignedPriority = ConstU64<{ 1 << 20 }>; @@ -207,7 +209,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 265, + spec_version: 273, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -695,6 +697,18 @@ impl InstanceFilter for ProxyType { | RuntimeCall::SubtensorModule( pallet_subtensor::Call::remove_stake_limit { .. } ) + // | RuntimeCall::SubtensorModule( + // pallet_subtensor::Call::add_stake_aggregate { .. } + // ) + // | RuntimeCall::SubtensorModule( + // pallet_subtensor::Call::add_stake_limit_aggregate { .. } + // ) + // | RuntimeCall::SubtensorModule( + // pallet_subtensor::Call::remove_stake_aggregate { .. } + // ) + // | RuntimeCall::SubtensorModule( + // pallet_subtensor::Call::remove_stake_limit_aggregate { .. } + // ) | RuntimeCall::SubtensorModule(pallet_subtensor::Call::unstake_all { .. }) | RuntimeCall::SubtensorModule( pallet_subtensor::Call::unstake_all_alpha { .. } @@ -732,7 +746,15 @@ impl InstanceFilter for ProxyType { }) => *alpha_amount < SMALL_TRANSFER_LIMIT, _ => false, }, - ProxyType::Owner => matches!(c, RuntimeCall::AdminUtils(..)), + ProxyType::Owner => { + matches!(c, RuntimeCall::AdminUtils(..)) + && !matches!( + c, + RuntimeCall::AdminUtils( + pallet_admin_utils::Call::sudo_set_sn_owner_hotkey { .. } + ) + ) + } ProxyType::NonCritical => !matches!( c, RuntimeCall::SubtensorModule(pallet_subtensor::Call::dissolve_network { .. }) @@ -767,7 +789,18 @@ impl InstanceFilter for ProxyType { | RuntimeCall::SubtensorModule(pallet_subtensor::Call::add_stake_limit { .. }) | RuntimeCall::SubtensorModule( pallet_subtensor::Call::remove_stake_limit { .. } - ) + ) // | RuntimeCall::SubtensorModule( + // pallet_subtensor::Call::add_stake_aggregate { .. } + // ) + // | RuntimeCall::SubtensorModule( + // pallet_subtensor::Call::add_stake_limit_aggregate { .. } + // ) + // | RuntimeCall::SubtensorModule( + // pallet_subtensor::Call::remove_stake_aggregate { .. } + // ) + // | RuntimeCall::SubtensorModule( + // pallet_subtensor::Call::remove_stake_limit_aggregate { .. } + // ) ), ProxyType::Registration => matches!( c, @@ -942,10 +975,9 @@ impl pallet_registry::Config for Runtime { } parameter_types! { - pub const MaxCommitFieldsInner: u32 = 1; + pub const MaxCommitFieldsInner: u32 = 3; pub const CommitmentInitialDeposit: Balance = 0; // Free pub const CommitmentFieldDeposit: Balance = 0; // Free - pub const CommitmentRateLimit: BlockNumber = 100; // Allow commitment every 100 blocks } #[subtensor_macros::freeze_struct("7c76bd954afbb54e")] @@ -971,17 +1003,28 @@ impl CanCommit for AllowCommitments { } } +pub struct ResetBondsOnCommit; +impl OnMetadataCommitment for ResetBondsOnCommit { + #[cfg(not(feature = "runtime-benchmarks"))] + fn on_metadata_commitment(netuid: u16, address: &AccountId) { + let _ = SubtensorModule::do_reset_bonds(netuid, address); + } + + #[cfg(feature = "runtime-benchmarks")] + fn on_metadata_commitment(_: u16, _: &AccountId) {} +} + impl pallet_commitments::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type WeightInfo = pallet_commitments::weights::SubstrateWeight; type CanCommit = AllowCommitments; + type OnMetadataCommitment = ResetBondsOnCommit; type MaxFields = MaxCommitFields; type InitialDeposit = CommitmentInitialDeposit; type FieldDeposit = CommitmentFieldDeposit; - type DefaultRateLimit = CommitmentRateLimit; type TempoInterface = TempoInterface; } @@ -1013,6 +1056,7 @@ pub const INITIAL_CHILDKEY_TAKE_RATELIMIT: u64 = 5; // Configure the pallet subtensor. parameter_types! { pub const SubtensorInitialRho: u16 = 10; + pub const SubtensorInitialAlphaSigmoidSteepness: u16 = 1000; pub const SubtensorInitialKappa: u16 = 32_767; // 0.5 = 65535/2 pub const SubtensorInitialMaxAllowedUids: u16 = 4096; pub const SubtensorInitialIssuance: u64 = 0; @@ -1033,6 +1077,7 @@ parameter_types! { pub const SubtensorInitialPruningScore : u16 = u16::MAX; pub const SubtensorInitialBondsMovingAverage: u64 = 900_000; pub const SubtensorInitialBondsPenalty: u16 = u16::MAX; + pub const SubtensorInitialBondsResetOn: bool = false; pub const SubtensorInitialDefaultTake: u16 = 11_796; // 18% honest number. pub const SubtensorInitialMinDelegateTake: u16 = 0; // Allow 0% delegate take pub const SubtensorInitialDefaultChildKeyTake: u16 = 0; // Allow 0% childkey take @@ -1061,8 +1106,10 @@ parameter_types! { pub const InitialAlphaHigh: u16 = 58982; // Represents 0.9 as per the production default pub const InitialAlphaLow: u16 = 45875; // Represents 0.7 as per the production default pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn + pub const InitialYuma3On: bool = false; // Default value for Yuma3On // pub const SubtensorInitialNetworkMaxStake: u64 = u64::MAX; // (DEPRECATED) pub const InitialColdkeySwapScheduleDuration: BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days + pub const InitialColdkeySwapRescheduleDuration: BlockNumber = 24 * 60 * 60 / 12; // 1 day pub const InitialDissolveNetworkScheduleDuration: BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days pub const SubtensorInitialTaoWeight: u64 = 971_718_665_099_567_868; // 0.05267697438728329% tao weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks @@ -1083,10 +1130,12 @@ impl pallet_subtensor::Config for Runtime { type TriumvirateInterface = TriumvirateVotes; type Scheduler = Scheduler; type InitialRho = SubtensorInitialRho; + type InitialAlphaSigmoidSteepness = SubtensorInitialAlphaSigmoidSteepness; type InitialKappa = SubtensorInitialKappa; type InitialMaxAllowedUids = SubtensorInitialMaxAllowedUids; type InitialBondsMovingAverage = SubtensorInitialBondsMovingAverage; type InitialBondsPenalty = SubtensorInitialBondsPenalty; + type InitialBondsResetOn = SubtensorInitialBondsResetOn; type InitialIssuance = SubtensorInitialIssuance; type InitialMinAllowedWeights = SubtensorInitialMinAllowedWeights; type InitialEmissionValue = SubtensorInitialEmissionValue; @@ -1130,9 +1179,11 @@ impl pallet_subtensor::Config for Runtime { type AlphaHigh = InitialAlphaHigh; type AlphaLow = InitialAlphaLow; type LiquidAlphaOn = InitialLiquidAlphaOn; + type Yuma3On = InitialYuma3On; type InitialTaoWeight = SubtensorInitialTaoWeight; type Preimages = Preimage; type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; + type InitialColdkeySwapRescheduleDuration = InitialColdkeySwapRescheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; type DurationOfStartCall = DurationOfStartCall; @@ -1165,7 +1216,6 @@ impl pallet_admin_utils::Config for Runtime { type Aura = AuraPalletIntrf; type Grandpa = GrandpaInterfaceImpl; type Balance = Balance; - type WeightInfo = pallet_admin_utils::weights::SubstrateWeight; } /// Define the ChainId @@ -1228,11 +1278,12 @@ pub struct SubtensorEvmBalanceConverter; impl BalanceConverter for SubtensorEvmBalanceConverter { /// Convert from Substrate balance (u64) to EVM balance (U256) - fn into_evm_balance(value: U256) -> Option { + fn into_evm_balance(value: SubstrateBalance) -> Option { + let value = value.into_u256(); if let Some(evm_value) = value.checked_mul(U256::from(EVM_TO_SUBSTRATE_DECIMALS)) { // Ensure the result fits within the maximum U256 value if evm_value <= U256::MAX { - Some(evm_value) + Some(EvmBalance::new(evm_value)) } else { // Log value too large log::debug!( @@ -1252,11 +1303,12 @@ impl BalanceConverter for SubtensorEvmBalanceConverter { } /// Convert from EVM balance (U256) to Substrate balance (u64) - fn into_substrate_balance(value: U256) -> Option { + fn into_substrate_balance(value: EvmBalance) -> Option { + let value = value.into_u256(); if let Some(substrate_value) = value.checked_div(U256::from(EVM_TO_SUBSTRATE_DECIMALS)) { // Ensure the result fits within the TAO balance type (u64) if substrate_value <= U256::from(u64::MAX) { - Some(substrate_value) + Some(SubstrateBalance::new(substrate_value)) } else { // Log value too large log::debug!( @@ -1420,6 +1472,40 @@ impl fp_self_contained::SelfContainedCall for RuntimeCall { } } +// Crowdloan +parameter_types! { + pub const CrowdloanPalletId: PalletId = PalletId(*b"bt/cloan"); + pub const MinimumDeposit: Balance = 10_000_000_000; // 10 TAO + pub const AbsoluteMinimumContribution: Balance = 100_000_000; // 0.1 TAO + pub const MinimumBlockDuration: BlockNumber = if cfg!(feature = "fast-blocks") { + 50 + } else { + 50400 // 7 days minimum (7 * 24 * 60 * 60 / 12) + }; + pub const MaximumBlockDuration: BlockNumber = if cfg!(feature = "fast-blocks") { + 20000 + } else { + 432000 // 60 days maximum (60 * 24 * 60 * 60 / 12) + }; + pub const RefundContributorsLimit: u32 = 50; + pub const MaxContributors: u32 = 500; +} + +impl pallet_crowdloan::Config for Runtime { + type PalletId = CrowdloanPalletId; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type WeightInfo = pallet_crowdloan::weights::SubstrateWeight; + type Preimages = Preimage; + type MinimumDeposit = MinimumDeposit; + type AbsoluteMinimumContribution = AbsoluteMinimumContribution; + type MinimumBlockDuration = MinimumBlockDuration; + type MaximumBlockDuration = MaximumBlockDuration; + type RefundContributorsLimit = RefundContributorsLimit; + type MaxContributors = MaxContributors; +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub struct Runtime @@ -1454,6 +1540,8 @@ construct_runtime!( BaseFee: pallet_base_fee = 25, Drand: pallet_drand = 26, + + Crowdloan: pallet_crowdloan = 27, } ); @@ -1523,9 +1611,49 @@ mod benches { [pallet_admin_utils, AdminUtils] [pallet_subtensor, SubtensorModule] [pallet_drand, Drand] + [pallet_crowdloan, Crowdloan] ); } +fn generate_genesis_json() -> Vec { + let json_str = r#"{ + "aura": { + "authorities": [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" + ] + }, + "balances": { + "balances": [ + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 1000000000000000 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 1000000000000000 + ] + ] + }, + "grandpa": { + "authorities": [ + [ + "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu", + 1 + ] + ] + }, + "sudo": { + "key": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" + }, + "subtensorModule": { + "balancesIssuance": 0, + "stakes": [] + } + }"#; + + json_str.as_bytes().to_vec() +} + impl_runtime_apis! { impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { @@ -1582,11 +1710,18 @@ impl_runtime_apis! { } fn get_preset(id: &Option) -> Option> { - get_preset::(id, |_| None) + get_preset::(id, |preset_id| { + let benchmark_id: sp_genesis_builder::PresetId = "benchmark".into(); + if *preset_id == benchmark_id { + Some(generate_genesis_json()) + } else { + None + } + }) } fn preset_names() -> Vec { - vec![] + vec!["benchmark".into()] } } @@ -2172,8 +2307,8 @@ fn check_whitelist() { #[test] fn test_into_substrate_balance_valid() { // Valid conversion within u64 range - let evm_balance = U256::from(1_000_000_000_000_000_000u128); // 1 TAO in EVM - let expected_substrate_balance = U256::from(1_000_000_000u128); // 1 TAO in Substrate + let evm_balance: EvmBalance = 1_000_000_000_000_000_000u128.into(); // 1 TAO in EVM + let expected_substrate_balance: SubstrateBalance = 1_000_000_000u128.into(); // 1 TAO in Substrate let result = SubtensorEvmBalanceConverter::into_substrate_balance(evm_balance); assert_eq!(result, Some(expected_substrate_balance)); @@ -2182,8 +2317,8 @@ fn test_into_substrate_balance_valid() { #[test] fn test_into_substrate_balance_large_value() { // Maximum valid balance for u64 - let evm_balance = U256::from(u64::MAX) * U256::from(EVM_TO_SUBSTRATE_DECIMALS); // Max u64 TAO in EVM - let expected_substrate_balance = U256::from(u64::MAX); + let evm_balance = EvmBalance::new(U256::from(u64::MAX) * U256::from(EVM_TO_SUBSTRATE_DECIMALS)); // Max u64 TAO in EVM + let expected_substrate_balance = SubstrateBalance::new(U256::from(u64::MAX)); let result = SubtensorEvmBalanceConverter::into_substrate_balance(evm_balance); assert_eq!(result, Some(expected_substrate_balance)); @@ -2192,8 +2327,9 @@ fn test_into_substrate_balance_large_value() { #[test] fn test_into_substrate_balance_exceeds_u64() { // EVM balance that exceeds u64 after conversion - let evm_balance = - (U256::from(u64::MAX) + U256::from(1)) * U256::from(EVM_TO_SUBSTRATE_DECIMALS); + let evm_balance = EvmBalance::new( + (U256::from(u64::MAX) + U256::from(1)) * U256::from(EVM_TO_SUBSTRATE_DECIMALS), + ); let result = SubtensorEvmBalanceConverter::into_substrate_balance(evm_balance); assert_eq!(result, None); // Exceeds u64, should return None @@ -2202,8 +2338,8 @@ fn test_into_substrate_balance_exceeds_u64() { #[test] fn test_into_substrate_balance_precision_loss() { // EVM balance with precision loss - let evm_balance = U256::from(1_000_000_000_123_456_789u128); // 1 TAO + extra precision in EVM - let expected_substrate_balance = U256::from(1_000_000_000u128); // Truncated to 1 TAO in Substrate + let evm_balance = EvmBalance::new(U256::from(1_000_000_000_123_456_789u128)); // 1 TAO + extra precision in EVM + let expected_substrate_balance = SubstrateBalance::new(U256::from(1_000_000_000u128)); // Truncated to 1 TAO in Substrate let result = SubtensorEvmBalanceConverter::into_substrate_balance(evm_balance); assert_eq!(result, Some(expected_substrate_balance)); @@ -2212,8 +2348,8 @@ fn test_into_substrate_balance_precision_loss() { #[test] fn test_into_substrate_balance_zero_value() { // Zero balance should convert to zero - let evm_balance = U256::from(0); - let expected_substrate_balance = U256::from(0); + let evm_balance = EvmBalance::new(U256::from(0)); + let expected_substrate_balance = SubstrateBalance::new(U256::from(0)); let result = SubtensorEvmBalanceConverter::into_substrate_balance(evm_balance); assert_eq!(result, Some(expected_substrate_balance)); @@ -2222,8 +2358,8 @@ fn test_into_substrate_balance_zero_value() { #[test] fn test_into_evm_balance_valid() { // Valid conversion from Substrate to EVM - let substrate_balance = U256::from(1_000_000_000u128); // 1 TAO in Substrate - let expected_evm_balance = U256::from(1_000_000_000_000_000_000u128); // 1 TAO in EVM + let substrate_balance: SubstrateBalance = 1_000_000_000u128.into(); // 1 TAO in Substrate + let expected_evm_balance = EvmBalance::new(U256::from(1_000_000_000_000_000_000u128)); // 1 TAO in EVM let result = SubtensorEvmBalanceConverter::into_evm_balance(substrate_balance); assert_eq!(result, Some(expected_evm_balance)); @@ -2232,8 +2368,9 @@ fn test_into_evm_balance_valid() { #[test] fn test_into_evm_balance_overflow() { // Substrate balance larger than u64::MAX but valid within U256 - let substrate_balance = U256::from(u64::MAX) + U256::from(1); // Large balance - let expected_evm_balance = substrate_balance * U256::from(EVM_TO_SUBSTRATE_DECIMALS); + let substrate_balance = SubstrateBalance::new(U256::from(u64::MAX) + U256::from(1)); // Large balance + let expected_evm_balance = + EvmBalance::new(substrate_balance.into_u256() * U256::from(EVM_TO_SUBSTRATE_DECIMALS)); let result = SubtensorEvmBalanceConverter::into_evm_balance(substrate_balance); assert_eq!(result, Some(expected_evm_balance)); // Should return the scaled value diff --git a/runtime/tests/pallet_proxy.rs b/runtime/tests/pallet_proxy.rs index 563c274bb9..1fcb36dec5 100644 --- a/runtime/tests/pallet_proxy.rs +++ b/runtime/tests/pallet_proxy.rs @@ -68,6 +68,15 @@ fn call_owner_util() -> RuntimeCall { }) } +// sn owner hotkey call +fn call_sn_owner_hotkey() -> RuntimeCall { + let netuid = 1; + RuntimeCall::AdminUtils(pallet_admin_utils::Call::sudo_set_sn_owner_hotkey { + netuid, + hotkey: AccountId::from(ACCOUNT).into(), + }) +} + // critical call for Subtensor fn call_propose() -> RuntimeCall { let proposal = call_remark(); @@ -230,3 +239,30 @@ fn test_non_transfer_cannot_transfer() { ); }); } + +#[test] +fn test_owner_type_cannot_set_sn_owner_hotkey() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(AccountId::from(ACCOUNT)), + AccountId::from(DELEGATE).into(), + ProxyType::Owner, + 0 + )); + + let call = call_sn_owner_hotkey(); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(AccountId::from(DELEGATE)), + AccountId::from(ACCOUNT).into(), + None, + Box::new(call.clone()), + )); + + System::assert_last_event( + pallet_proxy::Event::ProxyExecuted { + result: Err(SystemError::CallFiltered.into()), + } + .into(), + ); + }); +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 49405bc648..c741364b2a 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.85.0" +channel = "1.86" components = [ "cargo", "clippy", diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh index 8f54fa54a3..4a1c99a62c 100755 --- a/scripts/benchmark.sh +++ b/scripts/benchmark.sh @@ -1,46 +1,21 @@ #!/usr/bin/env bash +set -e -DEFAULT_BIN_PATH='./target/production/node-subtensor' -BIN_PATH=$DEFAULT_BIN_PATH -TMP_SPEC='temp.json' -OUTPUT_FILE='benchmarking.txt' +EXTRINSIC="${1:-register}" -# Getting arguments from user -while [[ $# -gt 0 ]]; do - case $1 in - -p | --bin-path) - BIN_PATH="$2" - shift - shift - ;; - -* | --*) - echo "Unknown option $1" - exit 1 - ;; - *) - POSITIONAL_ARGS+=("$1") - shift - ;; - esac -done +cargo build \ + --profile production \ + -p node-subtensor \ + --features runtime-benchmarks -# Ensure binary exists before node-subtensor executions -if [ ! -f $BIN_PATH ]; then - if [[ "$DEFAULT_BIN_PATH" == "$BIN_PATH" ]]; then - cargo build --profile production --features runtime-benchmarks - else - echo "Binary '$BIN_PATH' does not exist. You can use -p or --bin-path to specify a different location." - exit 1 - fi -fi +RUNTIME_WASM=./target/production/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm -# Build Temporary Spec -$BIN_PATH build-spec --disable-default-bootnode --raw --chain local >$TMP_SPEC - -# Run benchmark -$BIN_PATH benchmark pallet \ ---chain=$TMP_SPEC \ ---pallet pallet-subtensor --extrinsic 'schedule_coldkey_swap' \ ---output $OUTPUT_FILE - -rm $TMP_SPEC +./target/production/node-subtensor benchmark pallet \ + --runtime "$RUNTIME_WASM" \ + --genesis-builder=runtime \ + --genesis-builder-preset=benchmark \ + --wasm-execution=compiled \ + --pallet=pallet_subtensor \ + --extrinsic="$EXTRINSIC" \ + --steps 50 \ + --repeat 5 \ diff --git a/scripts/benchmark_action.sh b/scripts/benchmark_action.sh new file mode 100755 index 0000000000..34043957de --- /dev/null +++ b/scripts/benchmark_action.sh @@ -0,0 +1,288 @@ +#!/usr/bin/env bash +set -euo pipefail + +# A list of pallets we wish to benchmark +PALLETS=(subtensor admin_utils commitments drand) + +# Map of pallet -> dispatch path (relative to this script's directory) +declare -A DISPATCH_PATHS=( + [subtensor]="../pallets/subtensor/src/macros/dispatches.rs" + [admin_utils]="../pallets/admin-utils/src/lib.rs" + [commitments]="../pallets/commitments/src/lib.rs" + [drand]="../pallets/drand/src/lib.rs" +) + +# Max allowed drift (%) +THRESHOLD=15 +MAX_RETRIES=3 + +# We'll build once for runtime-benchmarks +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +RUNTIME_WASM="$SCRIPT_DIR/../target/production/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm" + +echo "Building runtime-benchmarks…" +cargo build --profile production -p node-subtensor --features runtime-benchmarks + +echo +echo "──────────────────────────────────────────" +echo " Will benchmark pallets: ${PALLETS[*]}" +echo "──────────────────────────────────────────" + +################################################################################ +# Helper to "finalize" an extrinsic. We look up code-side reads/writes/weight +# in the dispatch file, then compare them to measured values. +################################################################################ + +function process_extr() { + local e="$1" + local us="$2" + local rd="$3" + local wr="$4" + local dispatch_file="$5" + + # If any piece is empty, skip + if [[ -z "$e" || -z "$us" || -z "$rd" || -z "$wr" ]]; then + return + fi + + # Convert microseconds to picoseconds + local meas_ps + meas_ps=$(awk -v x="$us" 'BEGIN{printf("%.0f", x * 1000000)}') + + # --------------------------------------------------------------------------- + # Code-side lookup from dispatch_file + # --------------------------------------------------------------------------- + local code_record + code_record=$(awk -v extr="$e" ' + /^\s*#\[pallet::call_index\(/ { next } + + /Weight::from_parts/ { + lw = $0 + sub(/.*Weight::from_parts\(\s*/, "", lw) + sub(/[^0-9_].*$/, "", lw) + gsub(/_/, "", lw) + w = lw + } + + /reads_writes\(/ { + lw = $0 + sub(/.*reads_writes\(/, "", lw) + sub(/\).*/, "", lw) + split(lw, io, ",") + gsub(/^[ \t]+|[ \t]+$/, "", io[1]) + gsub(/^[ \t]+|[ \t]+$/, "", io[2]) + r = io[1] + wri = io[2] + next + } + + /\.reads\(/ { + lw = $0 + sub(/.*\.reads\(/, "", lw) + sub(/\).*/, "", lw) + r = lw + next + } + + /\.writes\(/ { + lw = $0 + sub(/.*\.writes\(/, "", lw) + sub(/\).*/, "", lw) + wri = lw + next + } + + # main condition: function name must match "pub fn (" + $0 ~ ("pub fn[[:space:]]+" extr "\\(") { + print w, r, wri + exit + } + ' "$dispatch_file") + + local code_w code_reads code_writes + read code_w code_reads code_writes <<<"$code_record" + + # strip underscores or non-digits + code_w="${code_w//_/}" + code_w="${code_w%%[^0-9]*}" + code_reads="${code_reads//_/}" + code_reads="${code_reads%%[^0-9]*}" + code_writes="${code_writes//_/}" + code_writes="${code_writes%%[^0-9]*}" + + # default them if empty + [[ -z "$code_w" ]] && code_w="0" + [[ -z "$code_reads" ]] && code_reads="0" + [[ -z "$code_writes" ]] && code_writes="0" + + # compute drift + local drift + drift=$(awk -v a="$meas_ps" -v b="$code_w" 'BEGIN { + if (b == "" || b == 0) { + print 99999 + exit + } + printf("%.1f", (a - b) / b * 100) + }') + + # produce summary line + summary_lines+=("$(printf "%-30s | reads code=%3s measured=%3s | writes code=%3s measured=%3s | weight code=%12s measured=%12s | drift %6s%%" \ + "$e" \ + "$code_reads" \ + "$rd" \ + "$code_writes" \ + "$wr" \ + "$code_w" \ + "$meas_ps" \ + "$drift")") + + # validations + if (( rd != code_reads )); then + failures+=("[${e}] reads mismatch code=${code_reads}, measured=${rd}") + fail=1 + fi + if (( wr != code_writes )); then + failures+=("[${e}] writes mismatch code=${code_writes}, measured=${wr}") + fail=1 + fi + if [[ "$code_w" == "0" ]]; then + failures+=("[${e}] zero code weight") + fail=1 + fi + + local abs_drift=${drift#-} + local drift_int=${abs_drift%%.*} + if (( drift_int > THRESHOLD )); then + failures+=("[${e}] weight code=${code_w}, measured=${meas_ps}, drift=${drift}%") + fail=1 + fi +} + +################################################################################ +# We'll do the standard "attempt" logic for each pallet +################################################################################ + +for pallet_name in "${PALLETS[@]}"; do + # ensure the dispatch path is defined + if [[ -z "${DISPATCH_PATHS[$pallet_name]:-}" ]]; then + echo "❌ ERROR: dispatch path not defined for pallet '$pallet_name'" + exit 1 + fi + + # Prepend $SCRIPT_DIR to the path + DISPATCH="$SCRIPT_DIR/${DISPATCH_PATHS[$pallet_name]}" + if [[ ! -f "$DISPATCH" ]]; then + echo "❌ ERROR: dispatch file not found at $DISPATCH" + exit 1 + fi + + attempt=1 + pallet_success=0 + + while (( attempt <= MAX_RETRIES )); do + echo + echo "══════════════════════════════════════" + echo "Benchmarking pallet: $pallet_name (attempt #$attempt)" + echo "Dispatch file: $DISPATCH" + echo "══════════════════════════════════════" + + TMP="$(mktemp)" + trap "rm -f \"$TMP\"" EXIT + + # Run benchmark for just this pallet + ./target/production/node-subtensor benchmark pallet \ + --runtime "$RUNTIME_WASM" \ + --genesis-builder=runtime \ + --genesis-builder-preset=benchmark \ + --wasm-execution=compiled \ + --pallet "pallet_${pallet_name}" \ + --extrinsic "*" \ + --steps 50 \ + --repeat 5 \ + | tee "$TMP" + + # now parse results + summary_lines=() + failures=() + fail=0 + + extr="" + meas_us="" + meas_reads="" + meas_writes="" + + function finalize_extr() { + process_extr "$extr" "$meas_us" "$meas_reads" "$meas_writes" "$DISPATCH" + extr="" + meas_us="" + meas_reads="" + meas_writes="" + } + + while IFS= read -r line; do + if [[ $line =~ Extrinsic:\ \"([[:alnum:]_]+)\" ]]; then + finalize_extr + extr="${BASH_REMATCH[1]}" + continue + fi + + if [[ $line =~ Time\ ~=\ *([0-9]+(\.[0-9]+)?) ]]; then + meas_us="${BASH_REMATCH[1]}" + continue + fi + + if [[ $line =~ Reads[[:space:]]*=[[:space:]]*([0-9]+) ]]; then + meas_reads="${BASH_REMATCH[1]}" + continue + fi + + if [[ $line =~ Writes[[:space:]]*=[[:space:]]*([0-9]+) ]]; then + meas_writes="${BASH_REMATCH[1]}" + continue + fi + done < "$TMP" + + finalize_extr + + echo + echo "Benchmark Summary for pallet '$pallet_name' (attempt #$attempt):" + for l in "${summary_lines[@]}"; do + echo " $l" + done + + if (( fail )); then + echo + echo "❌ Issues detected on attempt #$attempt (pallet '$pallet_name'):" + for e in "${failures[@]}"; do + echo " • $e" + done + + if (( attempt < MAX_RETRIES )); then + echo "→ Retrying…" + (( attempt++ )) + continue + else + echo + echo "❌ Benchmarks for pallet '$pallet_name' failed after $MAX_RETRIES attempts." + exit 1 + fi + else + echo + echo "✅ Pallet '$pallet_name' benchmarks all good within ±${THRESHOLD}% drift." + pallet_success=1 + break + fi + done + + # If we never succeeded for this pallet, exit + if (( pallet_success == 0 )); then + echo "❌ Could not benchmark pallet '$pallet_name' successfully." + exit 1 + fi +done + +echo +echo "══════════════════════════════════════" +echo "All requested pallets benchmarked successfully!" +echo "══════════════════════════════════════" +exit 0 diff --git a/scripts/benchmark_all.sh b/scripts/benchmark_all.sh index 580e5425eb..22d23483f3 100755 --- a/scripts/benchmark_all.sh +++ b/scripts/benchmark_all.sh @@ -1,24 +1,32 @@ -#!/bin/sh -set -ex +#!/usr/bin/env bash +set -e -# List of pallets you want to benchmark -pallets=("pallet_subtensor" "pallet_collective" "pallet_commitments" "pallet_registry" "pallet_admin_utils") +pallets=( + "pallet_subtensor" + "pallet_commitments" + "pallet_drand" + "pallet_admin_utils" +) -# Chain spec and output directory -chain_spec="finney" # or your specific chain spec +RUNTIME_WASM=./target/production/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm -for pallet in "${pallets[@]}" -do - echo "Benchmarking $pallet..." - cargo run --profile=production --features=runtime-benchmarks,try-runtime --bin node-subtensor -- benchmark pallet \ - --chain $chain_spec \ +cargo build \ + --profile production \ + -p node-subtensor \ + --features runtime-benchmarks + +for pallet in "${pallets[@]}"; do + echo "--------------------------------------------------------" + echo " Benchmarking all extrinsics for $pallet..." + echo "--------------------------------------------------------" + + ./target/production/node-subtensor benchmark pallet \ + --runtime "$RUNTIME_WASM" \ + --genesis-builder=runtime \ + --genesis-builder-preset=benchmark \ --wasm-execution=compiled \ - --pallet $pallet \ - --extrinsic '*' \ + --pallet "$pallet" \ + --extrinsic "*" \ --steps 50 \ - --repeat 5 \ - --output "pallets/$pallet/src/weights.rs" \ - --template ./.maintain/frame-weight-template.hbs # Adjust this path to your template file -done - -echo "All pallets have been benchmarked and weights updated." + --repeat 5 +done \ No newline at end of file diff --git a/scripts/localnet_patch.sh b/scripts/localnet_patch.sh new file mode 100755 index 0000000000..ef9d1959bf --- /dev/null +++ b/scripts/localnet_patch.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# This file patches the code in the repository to create a docker image with the ability to run tests in non-fast-blocks +# mode. + +set -e + +DurationOfStartCall="runtime/src/lib.rs" +DefaultPendingCooldown="pallets/subtensor/src/lib.rs" +SetChildren="pallets/subtensor/src/utils/rate_limiting.rs" + +# Checkers +if ! grep -q '7 \* 24 \* 60 \* 60 / 12 // 7 days' "$DurationOfStartCall"; then + echo "Error: Target string not found in $DurationOfStartCall" + exit 1 +fi + +if ! grep -q 'pub fn DefaultPendingCooldown() -> u64 {' "$DefaultPendingCooldown"; then + echo "Error: Target function not found in $DefaultPendingCooldown" + exit 1 +fi + +if ! grep -q 'TransactionType::SetChildren => 150, // 30 minutes' "$SetChildren"; then + echo "Error: Target string not found in $SetChildren" + exit 1 +fi + +# replace +perl -0777 -i -pe 's|7 \* 24 \* 60 \* 60 / 12 // 7 days|5 // Only 5 blocks for tests|' "$DurationOfStartCall" +perl -0777 -i -pe 's|pub fn DefaultPendingCooldown\(\) -> u64 \{\s*if cfg!\(feature = "fast-blocks"\) \{\s*return 15;\s*\}\s*7_200\s*\}|pub fn DefaultPendingCooldown() -> u64 {\n 15\n }|g' "$DefaultPendingCooldown" +perl -0777 -i -pe 's|TransactionType::SetChildren => 150, // 30 minutes|TransactionType::SetChildren => 15, // 3 min|' "$SetChildren" + +echo "Patch applied successfully." diff --git a/scripts/map_consensus.py b/scripts/map_consensus.py new file mode 100644 index 0000000000..1d09207bf3 --- /dev/null +++ b/scripts/map_consensus.py @@ -0,0 +1,144 @@ +import re +import sys +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.pyplot import cm + + +def extract_data(filepath): + """ + Extracts the emission data from a text file. + + Args: + filepath: Path to the data file. + + Returns: + A list of lists containing the numerical data, or None if an error occurs. + """ + try: + with open(filepath, "r") as f: + content = f.read() + except FileNotFoundError: + print(f"Error: File not found at {filepath}") + return None + + # Regular expression to extract data rows. Matches strings like "[0.51, 1.00, 1.00, ...]" + # Explanation: + # \[ Matches the opening square bracket. + # (?: ... ) Non-capturing group. + # [0-9.]+ Matches one or more digits or decimal points. + # ,\s* Matches a comma followed by zero or more whitespace characters. + # + Matches the previous group (number and comma) one or more times. + # [0-9.]+ Matches the last number in the list. + # \] Matches the closing square bracket. + + list_pattern = ( + r"\[(?:[0-9.]+,\s*)+[0-9.]+\]" + ) # Regular expression to match data rows + matches = re.findall(list_pattern, content) + + if not matches: + print("Error: No matching data found in the file.") + return None + + data = [] + for match in matches: + try: + # Extract numerical values from the matched string. + # 1. match[1:-1]: Removes the square brackets from the beginning and end. + # 2. .split(','): Splits the string into a list of strings at each comma. + # 3. [float(x.strip()) for x in ...]: Converts each string to a float + # after removing leading/trailing whitespace. + + row = [float(x.strip()) for x in match[1:-1].split(",")] + data.append(row) + except ValueError: + print(f"Warning: Skipping invalid data row: {match}") + + return data + + +def visualize_data(emission_data, output_filename="consensus_plot.svg"): + """ + Generates and saves a contour plot of the retention map. + + Args: + emission_data: The extracted emission data. + output_filename: The name of the output SVG file. + """ + major_ratios = {} + avg_weight_devs = {} + + # Process the data to organize it by major stake + for ( + major_stake, + major_weight, + minor_weight, + avg_weight_dev, + major_ratio, + ) in emission_data: + major_stake_str = f"{major_stake:.2f}" + maj_idx, min_idx = int(round(50 * major_weight)), int(round(50 * minor_weight)) + + avg_weight_devs.setdefault(major_stake_str, np.zeros((51, 51))) + avg_weight_devs[major_stake_str][maj_idx][min_idx] = avg_weight_dev + + major_ratios.setdefault(major_stake_str, np.zeros((51, 51))) + major_ratios[major_stake_str][maj_idx][min_idx] = major_ratio + + # Create the meshgrid for the contour plot + x = np.linspace(0, 1, 51) + y = np.linspace(0, 1, 51) + x, y = np.meshgrid(x, y, indexing="ij") + + # Set up the plot + fig = plt.figure(figsize=(6, 6), dpi=70) + ax = fig.gca() + ax.set_xticks(np.arange(0, 1, 0.05)) + ax.set_yticks(np.arange(0, 1.0, 0.05)) + ax.set_xticklabels([f"{_:.2f}"[1:] for _ in np.arange(0, 1.0, 0.05)]) + plt.grid(linestyle="dotted", color=[0.85, 0.85, 0.85]) + + # Define stakes and colors for contour lines + isolate = ["0.60"] # Stakes to highlight + stakes = [0.51, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.99] + colors = cm.viridis(np.linspace(0, 1, len(stakes) + 1)) + + # Create contour lines for each stake + for i, stake in enumerate(stakes): + contours = plt.contour( + x, + y, + major_ratios[f"{stake:.2f}"], + levels=[0.0, stake], + colors=[colors[i + 1]], + ) + if f"{stake:.2f}" in isolate: + contours.collections[1].set_linewidth(3) # Highlight isolated stake + plt.clabel(contours, inline=True, fontsize=10) + + # Add title and labels + plt.title(f"Major emission [$stake_{{maj}}=emission_{{maj}}$ retention lines]") + plt.ylabel("Minor self-weight") + plt.xlabel("Major self-weight") + + # Save the plot + plt.savefig(output_filename, format="svg") + print(f"Plot saved to {output_filename}") + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print( + "Usage: python scripts/map_consensus.py [optional_output_filename]" + ) + sys.exit(1) + + filepath = sys.argv[1] + output_filename = "consensus_plot.svg" # Default output filename + if len(sys.argv) >= 3: + output_filename = sys.argv[2] # Optional output filename + + extracted_data = extract_data(filepath) + if extracted_data: + visualize_data(extracted_data, output_filename) diff --git a/scripts/run/subtensor.sh b/scripts/run/subtensor.sh index cdc37c9014..6d9f95766b 100755 --- a/scripts/run/subtensor.sh +++ b/scripts/run/subtensor.sh @@ -41,7 +41,7 @@ function run_command() { # Command to run subtensor $F_BIN_PATH \ --base-path /tmp/blockchain \ - --chain ./chainspecs/raw_spec_finney.json \ + --chain ./raw_spec_finney.json \ --rpc-external --rpc-cors all \ --no-mdns \ --rpc-max-connections 10000 \